|
@@ -0,0 +1,521 @@
|
|
|
+package backable_ringbuffer
|
|
|
+
|
|
|
+import (
|
|
|
+ "errors"
|
|
|
+ "unsafe"
|
|
|
+)
|
|
|
+
|
|
|
+var (
|
|
|
+ ErrTooManyDataToWrite = errors.New("too many data to write")
|
|
|
+ ErrIsFull = errors.New("ringbuffer is full")
|
|
|
+ ErrIsEmpty = errors.New("ringbuffer is empty")
|
|
|
+ ErrOutOfRange = errors.New("index out of range")
|
|
|
+ ErrAccuqireLock = errors.New("no lock to accquire")
|
|
|
+)
|
|
|
+
|
|
|
+type BackableRingBuffer struct {
|
|
|
+ buf []byte
|
|
|
+ size int
|
|
|
+ r int // next position to read
|
|
|
+ w int // next position to write
|
|
|
+ isFull bool
|
|
|
+ mu Mutex
|
|
|
+}
|
|
|
+
|
|
|
+func NewBackableRingBuffer(size int) *BackableRingBuffer {
|
|
|
+ b := &BackableRingBuffer{
|
|
|
+ size: size,
|
|
|
+ buf: make([]byte, size),
|
|
|
+ }
|
|
|
+ return b
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) Read(p []byte) (n int, err error) {
|
|
|
+ if len(p) == 0 {
|
|
|
+ return 0, nil
|
|
|
+ }
|
|
|
+
|
|
|
+ r.mu.Lock()
|
|
|
+ n, err = r.read(p)
|
|
|
+ r.mu.Unlock()
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) Peep(p []byte) (n int, err error) {
|
|
|
+ if len(p) == 0 {
|
|
|
+ return 0, nil
|
|
|
+ }
|
|
|
+
|
|
|
+ r.mu.Lock()
|
|
|
+ n, err = r.peep(p)
|
|
|
+ r.mu.Unlock()
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) TryRead(p []byte) (n int, err error) {
|
|
|
+ if len(p) == 0 {
|
|
|
+ return 0, nil
|
|
|
+ }
|
|
|
+
|
|
|
+ ok := r.mu.TryLock()
|
|
|
+ if !ok {
|
|
|
+ return 0, ErrAccuqireLock
|
|
|
+ }
|
|
|
+
|
|
|
+ n, err = r.read(p)
|
|
|
+ r.mu.Unlock()
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) TryPeep(p []byte) (n int, err error) {
|
|
|
+ if len(p) == 0 {
|
|
|
+ return 0, nil
|
|
|
+ }
|
|
|
+
|
|
|
+ ok := r.mu.TryLock()
|
|
|
+ if !ok {
|
|
|
+ return 0, ErrAccuqireLock
|
|
|
+ }
|
|
|
+
|
|
|
+ n, err = r.peep(p)
|
|
|
+ r.mu.Unlock()
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) read(p []byte) (n int, err error) {
|
|
|
+ if r.w == r.r && !r.isFull {
|
|
|
+ return 0, ErrIsEmpty
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.w > r.r {
|
|
|
+ n = r.w - r.r
|
|
|
+ if n > len(p) {
|
|
|
+ n = len(p)
|
|
|
+ }
|
|
|
+ copy(p, r.buf[r.r:r.r+n])
|
|
|
+ r.r = (r.r + n) % r.size
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ n = r.size - r.r + r.w
|
|
|
+ if n > len(p) {
|
|
|
+ n = len(p)
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.r+n <= r.size {
|
|
|
+ copy(p, r.buf[r.r:r.r+n])
|
|
|
+ } else {
|
|
|
+ c1 := r.size - r.r
|
|
|
+ copy(p, r.buf[r.r:r.size])
|
|
|
+ c2 := n - c1
|
|
|
+ copy(p[c1:], r.buf[0:c2])
|
|
|
+ }
|
|
|
+ r.r = (r.r + n) % r.size
|
|
|
+
|
|
|
+ r.isFull = false
|
|
|
+
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) peep(p []byte) (n int, err error) {
|
|
|
+ if r.w == r.r && !r.isFull {
|
|
|
+ return 0, ErrIsEmpty
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.w > r.r {
|
|
|
+ n = r.w - r.r
|
|
|
+ if n > len(p) {
|
|
|
+ n = len(p)
|
|
|
+ }
|
|
|
+ copy(p, r.buf[r.r:r.r+n])
|
|
|
+ return
|
|
|
+ }
|
|
|
+
|
|
|
+ n = r.size - r.r + r.w
|
|
|
+ if n > len(p) {
|
|
|
+ n = len(p)
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.r+n <= r.size {
|
|
|
+ copy(p, r.buf[r.r:r.r+n])
|
|
|
+ } else {
|
|
|
+ c1 := r.size - r.r
|
|
|
+ copy(p, r.buf[r.r:r.size])
|
|
|
+ c2 := n - c1
|
|
|
+ copy(p[c1:], r.buf[0:c2])
|
|
|
+ }
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) ReadByte() (b byte, err error) {
|
|
|
+ r.mu.Lock()
|
|
|
+ if r.w == r.r && !r.isFull {
|
|
|
+ r.mu.Unlock()
|
|
|
+ return 0, ErrIsEmpty
|
|
|
+ }
|
|
|
+ b = r.buf[r.r]
|
|
|
+ r.r++
|
|
|
+ if r.r == r.size {
|
|
|
+ r.r = 0
|
|
|
+ }
|
|
|
+
|
|
|
+ r.isFull = false
|
|
|
+ r.mu.Unlock()
|
|
|
+ return b, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) PeepByte() (b byte, err error) {
|
|
|
+ r.mu.Lock()
|
|
|
+ if r.w == r.r && !r.isFull {
|
|
|
+ r.mu.Unlock()
|
|
|
+ return 0, ErrIsEmpty
|
|
|
+ }
|
|
|
+ b = r.buf[r.r]
|
|
|
+ r.mu.Unlock()
|
|
|
+ return b, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) Write(p []byte) (n int, err error) {
|
|
|
+ if len(p) == 0 {
|
|
|
+ return 0, nil
|
|
|
+ }
|
|
|
+ r.mu.Lock()
|
|
|
+ n, err = r.write(p)
|
|
|
+ r.mu.Unlock()
|
|
|
+
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) WriteBack(p []byte) (n int, err error) {
|
|
|
+ if len(p) == 0 {
|
|
|
+ return 0, nil
|
|
|
+ }
|
|
|
+ r.mu.Lock()
|
|
|
+ n, err = r.writeBack(p)
|
|
|
+ r.mu.Unlock()
|
|
|
+
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) TryWrite(p []byte) (n int, err error) {
|
|
|
+ if len(p) == 0 {
|
|
|
+ return 0, nil
|
|
|
+ }
|
|
|
+ ok := r.mu.TryLock()
|
|
|
+ if !ok {
|
|
|
+ return 0, ErrAccuqireLock
|
|
|
+ }
|
|
|
+
|
|
|
+ n, err = r.write(p)
|
|
|
+ r.mu.Unlock()
|
|
|
+
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) TryWriteBack(p []byte) (n int, err error) {
|
|
|
+ if len(p) == 0 {
|
|
|
+ return 0, nil
|
|
|
+ }
|
|
|
+ ok := r.mu.TryLock()
|
|
|
+ if !ok {
|
|
|
+ return 0, ErrAccuqireLock
|
|
|
+ }
|
|
|
+
|
|
|
+ n, err = r.writeBack(p)
|
|
|
+ r.mu.Unlock()
|
|
|
+
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) write(p []byte) (n int, err error) {
|
|
|
+ if r.isFull {
|
|
|
+ return 0, ErrIsFull
|
|
|
+ }
|
|
|
+
|
|
|
+ var avail int
|
|
|
+ if r.w >= r.r {
|
|
|
+ avail = r.size - r.w + r.r
|
|
|
+ } else {
|
|
|
+ avail = r.r - r.w
|
|
|
+ }
|
|
|
+
|
|
|
+ if len(p) > avail {
|
|
|
+ err = ErrTooManyDataToWrite
|
|
|
+ p = p[:avail]
|
|
|
+ }
|
|
|
+ n = len(p)
|
|
|
+
|
|
|
+ if r.w >= r.r {
|
|
|
+ c1 := r.size - r.w
|
|
|
+ if c1 >= n {
|
|
|
+ copy(r.buf[r.w:], p)
|
|
|
+ r.w += n
|
|
|
+ } else {
|
|
|
+ copy(r.buf[r.w:], p[:c1])
|
|
|
+ c2 := n - c1
|
|
|
+ copy(r.buf[0:], p[c1:])
|
|
|
+ r.w = c2
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ copy(r.buf[r.w:], p)
|
|
|
+ r.w += n
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.w == r.size {
|
|
|
+ r.w = 0
|
|
|
+ }
|
|
|
+ if r.w == r.r {
|
|
|
+ r.isFull = true
|
|
|
+ }
|
|
|
+
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) writeBack(p []byte) (n int, err error) {
|
|
|
+ if r.isFull {
|
|
|
+ return 0, ErrIsFull
|
|
|
+ }
|
|
|
+
|
|
|
+ var avail int
|
|
|
+ if r.w >= r.r {
|
|
|
+ avail = r.size - r.w + r.r
|
|
|
+ } else {
|
|
|
+ avail = r.r - r.w
|
|
|
+ }
|
|
|
+
|
|
|
+ if len(p) > avail {
|
|
|
+ err = ErrTooManyDataToWrite
|
|
|
+ p = p[:avail]
|
|
|
+ }
|
|
|
+ n = len(p)
|
|
|
+
|
|
|
+ if r.w >= r.r {
|
|
|
+ if n <= r.r {
|
|
|
+ stp := r.r - n
|
|
|
+ copy(r.buf[stp:], p)
|
|
|
+ r.r = r.r - n
|
|
|
+ } else {
|
|
|
+ lan := n - r.r
|
|
|
+ stp1 := r.size - lan
|
|
|
+ copy(r.buf[stp1:], p[:lan])
|
|
|
+ copy(r.buf, p[lan:])
|
|
|
+ r.r = stp1
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ stp2 := r.r - n
|
|
|
+ copy(r.buf[stp2:], p)
|
|
|
+ r.r = stp2
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.w == r.r {
|
|
|
+ r.isFull = true
|
|
|
+ }
|
|
|
+
|
|
|
+ return n, err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) WriteByte(c byte) error {
|
|
|
+ r.mu.Lock()
|
|
|
+ err := r.writeByte(c)
|
|
|
+ r.mu.Unlock()
|
|
|
+ return err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) WriteByteBack(c byte) error {
|
|
|
+ r.mu.Lock()
|
|
|
+ err := r.writeByteBack(c)
|
|
|
+ r.mu.Unlock()
|
|
|
+ return err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) TryWriteByte(c byte) error {
|
|
|
+ ok := r.mu.TryLock()
|
|
|
+ if !ok {
|
|
|
+ return ErrAccuqireLock
|
|
|
+ }
|
|
|
+
|
|
|
+ err := r.writeByte(c)
|
|
|
+ r.mu.Unlock()
|
|
|
+ return err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) TryWriteByteBack(c byte) error {
|
|
|
+ ok := r.mu.TryLock()
|
|
|
+ if !ok {
|
|
|
+ return ErrAccuqireLock
|
|
|
+ }
|
|
|
+
|
|
|
+ err := r.writeByteBack(c)
|
|
|
+ r.mu.Unlock()
|
|
|
+ return err
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) writeByte(c byte) error {
|
|
|
+ if r.w == r.r && r.isFull {
|
|
|
+ return ErrIsFull
|
|
|
+ }
|
|
|
+ r.buf[r.w] = c
|
|
|
+ r.w++
|
|
|
+
|
|
|
+ if r.w == r.size {
|
|
|
+ r.w = 0
|
|
|
+ }
|
|
|
+ if r.w == r.r {
|
|
|
+ r.isFull = true
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) writeByteBack(c byte) error {
|
|
|
+ if r.w == r.r && r.isFull {
|
|
|
+ return ErrIsFull
|
|
|
+ }
|
|
|
+
|
|
|
+ pos := r.r - 1
|
|
|
+ if pos < 0 {
|
|
|
+ pos = r.size - 1
|
|
|
+ }
|
|
|
+ r.buf[pos] = c
|
|
|
+ r.r = pos
|
|
|
+
|
|
|
+ if r.w == r.r {
|
|
|
+ r.isFull = true
|
|
|
+ }
|
|
|
+
|
|
|
+ return nil
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) Length() int {
|
|
|
+ r.mu.Lock()
|
|
|
+ defer r.mu.Unlock()
|
|
|
+
|
|
|
+ if r.w == r.r {
|
|
|
+ if r.isFull {
|
|
|
+ return r.size
|
|
|
+ }
|
|
|
+ return 0
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.w > r.r {
|
|
|
+ return r.w - r.r
|
|
|
+ }
|
|
|
+
|
|
|
+ return r.size - r.r + r.w
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) Capacity() int {
|
|
|
+ return r.size
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) Free() int {
|
|
|
+ r.mu.Lock()
|
|
|
+ defer r.mu.Unlock()
|
|
|
+
|
|
|
+ if r.w == r.r {
|
|
|
+ if r.isFull {
|
|
|
+ return 0
|
|
|
+ }
|
|
|
+ return r.size
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.w < r.r {
|
|
|
+ return r.r - r.w
|
|
|
+ }
|
|
|
+
|
|
|
+ return r.size - r.w + r.r
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) WriteString(s string) (n int, err error) {
|
|
|
+ x := (*[2]uintptr)(unsafe.Pointer(&s))
|
|
|
+ h := [3]uintptr{x[0], x[1], x[1]}
|
|
|
+ buf := *(*[]byte)(unsafe.Pointer(&h))
|
|
|
+ return r.Write(buf)
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) Bytes() []byte {
|
|
|
+ r.mu.Lock()
|
|
|
+ defer r.mu.Unlock()
|
|
|
+
|
|
|
+ if r.w == r.r {
|
|
|
+ if r.isFull {
|
|
|
+ buf := make([]byte, r.size)
|
|
|
+ copy(buf, r.buf[r.r:])
|
|
|
+ copy(buf[r.size-r.r:], r.buf[:r.w])
|
|
|
+ return buf
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+ }
|
|
|
+
|
|
|
+ if r.w > r.r {
|
|
|
+ buf := make([]byte, r.w-r.r)
|
|
|
+ copy(buf, r.buf[r.r:r.w])
|
|
|
+ return buf
|
|
|
+ }
|
|
|
+
|
|
|
+ n := r.size - r.r + r.w
|
|
|
+ buf := make([]byte, n)
|
|
|
+
|
|
|
+ if r.r+n < r.size {
|
|
|
+ copy(buf, r.buf[r.r:r.r+n])
|
|
|
+ } else {
|
|
|
+ c1 := r.size - r.r
|
|
|
+ copy(buf, r.buf[r.r:r.size])
|
|
|
+ c2 := n - c1
|
|
|
+ copy(buf[c1:], r.buf[0:c2])
|
|
|
+ }
|
|
|
+
|
|
|
+ return buf
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) PeepByteAt(pos int) (b byte, err error) {
|
|
|
+ defer r.mu.Unlock()
|
|
|
+ r.mu.Lock()
|
|
|
+ if r.w == r.r && !r.isFull {
|
|
|
+ return 0, ErrIsEmpty
|
|
|
+ }
|
|
|
+ if r.r == r.w && pos > 0 {
|
|
|
+ return 0, ErrOutOfRange
|
|
|
+ }
|
|
|
+ if r.r < r.w {
|
|
|
+ rpos := r.r + pos
|
|
|
+ b = r.buf[rpos]
|
|
|
+ return
|
|
|
+ } else {
|
|
|
+ if r.r+pos >= r.size {
|
|
|
+ rpos := r.r + pos - r.size
|
|
|
+ if rpos >= r.w {
|
|
|
+ return 0, ErrOutOfRange
|
|
|
+ }
|
|
|
+ b = r.buf[rpos]
|
|
|
+ return
|
|
|
+ } else {
|
|
|
+ rpos := r.r + pos
|
|
|
+ b = r.buf[rpos]
|
|
|
+ return
|
|
|
+ }
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) IsFull() bool {
|
|
|
+ r.mu.Lock()
|
|
|
+ defer r.mu.Unlock()
|
|
|
+
|
|
|
+ return r.isFull
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) IsEmpty() bool {
|
|
|
+ r.mu.Lock()
|
|
|
+ defer r.mu.Unlock()
|
|
|
+
|
|
|
+ return !r.isFull && r.w == r.r
|
|
|
+}
|
|
|
+
|
|
|
+func (r *BackableRingBuffer) Reset() {
|
|
|
+ r.mu.Lock()
|
|
|
+ defer r.mu.Unlock()
|
|
|
+
|
|
|
+ r.r = 0
|
|
|
+ r.w = 0
|
|
|
+ r.isFull = false
|
|
|
+}
|