Changed send to be synchronous. Wrote session inner gorountines.

This commit is contained in:
Jay
2026-04-18 21:44:25 -04:00
parent 8d79a002f8
commit e49c7cc021
13 changed files with 589 additions and 407 deletions

View File

@@ -4,18 +4,27 @@ import "errors"
import "fmt"
var (
// Config errors
InvalidKeepaliveTimeout = errors.New("keepalive timeout cannot be negative")
InvalidMaxQueueSize = errors.New("maximum queue size cannot be negative")
// Pool errors
ErrPoolClosed = errors.New("pool is closed")
ErrPeerNotFound = errors.New("peer not found")
ErrPeerExists = errors.New("peer already exists")
// Worker errors
ErrConnectionUnavailable = errors.New("connection unavailable")
)
func NewConfigError(text string) error {
return fmt.Errorf("configuration error: %s", text)
}
func NewPoolError(text string) error {
return fmt.Errorf("pool error: %s", text)
func NewPoolError(err error) error {
return fmt.Errorf("pool error: %w", err)
}
func NewWorkerError(id string, text string) error {
return fmt.Errorf("worker %q error: %s", id, text)
func NewWorkerError(id string, err error) error {
return fmt.Errorf("worker %q error: %w", id, err)
}

View File

@@ -0,0 +1,62 @@
package initiatorpool
import (
"fmt"
"git.wisehodl.dev/jay/go-honeybee/honeybeetest"
"git.wisehodl.dev/jay/go-honeybee/transport"
"github.com/stretchr/testify/assert"
"io"
"testing"
)
func setupWorkerTestConnection(t *testing.T) (
conn *transport.Connection,
mockSocket *honeybeetest.MockSocket,
incomingData chan honeybeetest.MockIncomingData,
outgoingData chan honeybeetest.MockOutgoingData,
) {
t.Helper()
incomingData = make(chan honeybeetest.MockIncomingData, 100)
outgoingData = make(chan honeybeetest.MockOutgoingData, 100)
mockSocket = honeybeetest.NewMockSocket()
mockSocket.CloseFunc = func() error {
mockSocket.Once.Do(func() { close(mockSocket.Closed) })
return nil
}
mockSocket.ReadMessageFunc = func() (int, []byte, error) {
select {
case data := <-incomingData:
return data.MsgType, data.Data, data.Err
case <-mockSocket.Closed:
return 0, nil, io.EOF
}
}
mockSocket.WriteMessageFunc = func(msgType int, data []byte) error {
select {
case outgoingData <- honeybeetest.MockOutgoingData{MsgType: msgType, Data: data}:
return nil
case <-mockSocket.Closed:
return io.EOF
default:
return fmt.Errorf("mock outgoing channel unavailable")
}
}
var err error
conn, err = transport.NewConnectionFromSocket(mockSocket, nil, nil)
assert.NoError(t, err)
return
}
func connClosed(conn *transport.Connection) bool {
select {
case _, ok := <-conn.Errors():
return !ok
default:
return false
}
}

View File

@@ -155,12 +155,12 @@ func (p *Pool) Connect(id string) error {
defer p.mu.Unlock()
if p.closed {
return NewPoolError("pool is closed")
return NewPoolError(ErrPoolClosed)
}
_, exists := p.peers[id]
if exists {
return NewPoolError("connection already exists")
return NewPoolError(ErrPeerExists)
}
// The worker factory must be non-blocking to avoid deadlocks
@@ -199,13 +199,13 @@ func (p *Pool) Remove(id string) error {
p.mu.Lock()
if p.closed {
p.mu.Unlock()
return NewPoolError("pool is closed")
return NewPoolError(ErrPoolClosed)
}
peer, exists := p.peers[id]
if !exists {
p.mu.Unlock()
return NewPoolError("connection not found")
return NewPoolError(ErrPeerNotFound)
}
delete(p.peers, id)
p.mu.Unlock()
@@ -225,12 +225,12 @@ func (p *Pool) Send(id string, data []byte) error {
defer p.mu.RUnlock()
if p.closed {
return NewPoolError("pool is closed")
return NewPoolError(ErrPoolClosed)
}
peer, exists := p.peers[id]
if !exists {
return NewPoolError("connection not found")
return NewPoolError(ErrPeerNotFound)
}
return peer.worker.Send(data)

View File

@@ -64,7 +64,7 @@ func _TestPoolConnect(t *testing.T) {
// trailing slash normalizes to same key
err = pool.Connect("wss://test/")
assert.Error(t, err)
assert.ErrorContains(t, err, "already exists")
assert.ErrorIs(t, err, ErrPeerExists)
pool.mu.RLock()
assert.Len(t, pool.peers, 1)
@@ -152,7 +152,7 @@ func _TestPoolRemove(t *testing.T) {
// remove unknown connection
err = pool.Remove("wss://unknown")
assert.ErrorContains(t, err, "connection not found")
assert.ErrorIs(t, err, ErrPeerNotFound)
})
t.Run("closed pool returns error", func(t *testing.T) {
@@ -172,7 +172,7 @@ func _TestPoolRemove(t *testing.T) {
// attempt to remove connection
err = pool.Remove("wss://test")
assert.ErrorContains(t, err, "pool is closed")
assert.ErrorIs(t, err, ErrPoolClosed)
})
}

View File

@@ -5,6 +5,7 @@ import (
"context"
"git.wisehodl.dev/jay/go-honeybee/transport"
"sync"
"sync/atomic"
"time"
)
@@ -16,11 +17,14 @@ type receivedMessage struct {
}
type Worker struct {
ctx context.Context
cancel context.CancelFunc
id string
config *WorkerConfig
outbound chan []byte
ctx context.Context
cancel context.CancelFunc
id string
config *WorkerConfig
conn atomic.Pointer[transport.Connection]
heartbeat chan struct{}
}
func NewWorker(
@@ -40,25 +44,34 @@ func NewWorker(
wctx, cancel := context.WithCancel(ctx)
w := &Worker{
ctx: wctx,
cancel: cancel,
id: id,
outbound: make(chan []byte, 64),
config: config,
ctx: wctx,
cancel: cancel,
id: id,
config: config,
heartbeat: make(chan struct{}),
}
return w, nil
}
func (w *Worker) Send(data []byte) error {
select {
case w.outbound <- data:
return nil
case <-w.ctx.Done():
return NewWorkerError(w.id, "worker is stopped")
default:
return NewWorkerError(w.id, "outbound queue full")
conn := w.conn.Load()
if conn == nil {
return NewWorkerError(w.id, ErrConnectionUnavailable)
}
err := conn.Send(data)
if err != nil {
return NewWorkerError(w.id, err)
}
select {
case w.heartbeat <- struct{}{}:
case <-w.ctx.Done():
}
return nil
}
func (w *Worker) Start(
@@ -76,7 +89,6 @@ func (w *Worker) runSession(
wctx WorkerContext,
messages chan<- receivedMessage,
heartbeat chan<- struct{},
dial chan<- struct{},
keepalive <-chan struct{},
@@ -88,19 +100,38 @@ func (w *Worker) runSession(
func (w *Worker) runReader(
conn *transport.Connection,
messages chan<- receivedMessage,
heartbeat chan<- struct{},
sessionDone <-chan struct{},
onStop func(),
) {
}
defer func() {
conn.Close()
onStop()
}()
func (w *Worker) runWriter(
conn *transport.Connection,
outbound <-chan []byte,
heartbeat chan<- struct{},
sessionDone <-chan struct{},
onStop func(),
) {
for {
select {
case <-sessionDone:
return
case data, ok := <-conn.Incoming():
if !ok {
// connection has closed
return
}
// send message forward
messages <- receivedMessage{
data: data,
receivedAt: time.Now(),
}
// send heartbeat
select {
case w.heartbeat <- struct{}{}:
case <-sessionDone:
return
}
}
}
}
func (w *Worker) runStopMonitor(
@@ -110,6 +141,16 @@ func (w *Worker) runStopMonitor(
sessionDone <-chan struct{},
onStop func(),
) {
defer func() {
conn.Close()
onStop()
}()
select {
case <-ctx.Done():
case <-keepalive:
case <-sessionDone:
}
}
func (w *Worker) runForwarder(
@@ -157,7 +198,6 @@ func (w *Worker) runForwarder(
func (w *Worker) runKeepalive(
ctx context.Context,
heartbeat <-chan struct{},
keepalive chan<- struct{},
) {
// disable keepalive timeout if not configured
@@ -176,7 +216,7 @@ func (w *Worker) runKeepalive(
select {
case <-ctx.Done():
return
case <-heartbeat:
case <-w.heartbeat:
// drain the timer channel and reset
if !timer.Stop() {
select {

View File

@@ -8,6 +8,7 @@ import (
"git.wisehodl.dev/jay/go-honeybee/types"
"github.com/stretchr/testify/assert"
"net/http"
"sync"
"sync/atomic"
"testing"
"time"
@@ -109,18 +110,21 @@ func TestRunForwarder(t *testing.T) {
func TestRunKeepalive(t *testing.T) {
t.Run("heartbeat resets timer, no keepalive signal fired", func(t *testing.T) {
heartbeat := make(chan struct{}, 3)
heartbeat := make(chan struct{})
keepalive := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
w := &Worker{config: &WorkerConfig{KeepaliveTimeout: 100 * time.Millisecond}}
go w.runKeepalive(ctx, heartbeat, keepalive)
w := &Worker{
config: &WorkerConfig{KeepaliveTimeout: 100 * time.Millisecond},
heartbeat: heartbeat,
}
go w.runKeepalive(ctx, keepalive)
// send heartbeats faster than the timeout
for i := 0; i < 5; i++ {
time.Sleep(30 * time.Millisecond)
heartbeat <- struct{}{}
w.heartbeat <- struct{}{}
}
// because the timer is being reset, keepalive signal should not be sent
@@ -135,13 +139,12 @@ func TestRunKeepalive(t *testing.T) {
})
t.Run("keepalive timeout fires signal", func(t *testing.T) {
heartbeat := make(chan struct{})
keepalive := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
w := &Worker{config: &WorkerConfig{KeepaliveTimeout: 20 * time.Millisecond}}
go w.runKeepalive(ctx, heartbeat, keepalive)
go w.runKeepalive(ctx, keepalive)
// send no heartbeats, wait for timeout and keepalive signal
assert.Eventually(t, func() bool {
@@ -155,14 +158,13 @@ func TestRunKeepalive(t *testing.T) {
})
t.Run("exits on context cancellation", func(t *testing.T) {
heartbeat := make(chan struct{})
keepalive := make(chan struct{}, 1)
ctx, cancel := context.WithCancel(context.Background())
w := &Worker{config: &WorkerConfig{KeepaliveTimeout: 20 * time.Second}}
done := make(chan struct{})
go func() {
w.runKeepalive(ctx, heartbeat, keepalive)
w.runKeepalive(ctx, keepalive)
close(done)
}()
@@ -178,6 +180,73 @@ func TestRunKeepalive(t *testing.T) {
})
}
func TestRunStopMonitor(t *testing.T) {
t.Run("keepalive signal calls conn.Close and onStop", func(t *testing.T) {
conn, _, _, _ := setupWorkerTestConnection(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
keepalive := make(chan struct{}, 1)
sessionDone := make(chan struct{})
onStopCalled := atomic.Bool{}
onStop := func() { onStopCalled.Store(true) }
w := &Worker{id: "wss://test"}
go w.runStopMonitor(ctx, conn, keepalive, sessionDone, onStop)
keepalive <- struct{}{}
assert.Eventually(t, func() bool {
return connClosed(conn)
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
assert.True(t, onStopCalled.Load())
})
t.Run("ctx.Done calls conn.Close and onStop", func(t *testing.T) {
conn, _, _, _ := setupWorkerTestConnection(t)
ctx, cancel := context.WithCancel(context.Background())
keepalive := make(chan struct{})
sessionDone := make(chan struct{})
onStopCalled := atomic.Bool{}
onStop := func() { onStopCalled.Store(true) }
w := &Worker{id: "wss://test"}
go w.runStopMonitor(ctx, conn, keepalive, sessionDone, onStop)
cancel()
assert.Eventually(t, func() bool {
return connClosed(conn)
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
assert.True(t, onStopCalled.Load())
})
t.Run("sessionDone close calls conn.Close and onStop", func(t *testing.T) {
conn, _, _, _ := setupWorkerTestConnection(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
keepalive := make(chan struct{})
sessionDone := make(chan struct{})
onStopCalled := atomic.Bool{}
onStop := func() { onStopCalled.Store(true) }
w := &Worker{id: "wss://test"}
go w.runStopMonitor(ctx, conn, keepalive, sessionDone, onStop)
close(sessionDone)
assert.Eventually(t, func() bool {
return connClosed(conn)
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
assert.True(t, onStopCalled.Load())
})
}
func TestRunDialer(t *testing.T) {
t.Run("successful dial delivers connection to newConn", func(t *testing.T) {
w := &Worker{id: "wss://test"}
@@ -395,3 +464,104 @@ func TestRunDialer(t *testing.T) {
assert.Empty(t, newConn)
})
}
func TestWorkerSend(t *testing.T) {
t.Run("data sent to mock socket", func(t *testing.T) {
conn, _, _, outgoingData := setupWorkerTestConnection(t)
defer conn.Close()
ctx, cancel := context.WithCancel(context.Background())
heartbeat := make(chan struct{})
heartbeatCount := atomic.Int32{}
w := &Worker{
ctx: ctx,
cancel: cancel,
id: "wss://test",
heartbeat: heartbeat,
}
w.conn.Store(conn)
defer w.cancel()
go func() {
for range heartbeat {
heartbeatCount.Add(1)
}
}()
testData := []byte("hello")
err := w.Send(testData)
assert.NoError(t, err)
// one heartbeat was sent
assert.Equal(t, 1, int(heartbeatCount.Load()))
// message was sent by the socket
assert.Eventually(t, func() bool {
select {
case msg := <-outgoingData:
return string(msg.Data) == "hello"
default:
return false
}
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
})
t.Run("sends one heartbeat per successful send", func(t *testing.T) {
conn, _, _, _ := setupWorkerTestConnection(t)
defer conn.Close()
ctx, cancel := context.WithCancel(context.Background())
heartbeat := make(chan struct{})
heartbeatCount := atomic.Int32{}
w := &Worker{
ctx: ctx,
cancel: cancel,
id: "wss://test",
heartbeat: heartbeat,
}
w.conn.Store(conn)
defer w.cancel()
go func() {
for range heartbeat {
heartbeatCount.Add(1)
}
}()
const count = 3
for i := 0; i < count; i++ {
err := w.Send([]byte(fmt.Sprintf("msg-%d", i)))
assert.NoError(t, err)
}
assert.Equal(t, count, int(heartbeatCount.Load()))
})
t.Run("returns error if connection is unavailable", func(t *testing.T) {
// no connection available to worker
ctx, cancel := context.WithCancel(context.Background())
heartbeat := make(chan struct{})
w := &Worker{
ctx: ctx,
cancel: cancel,
id: "wss://test",
heartbeat: heartbeat,
}
defer w.cancel()
go func() {
for range heartbeat {
}
}()
err := w.Send([]byte("hello"))
assert.ErrorIs(t, err, ErrConnectionUnavailable)
})
}