Renamed package, set up for worker session pattern.

This commit is contained in:
Jay
2026-04-18 12:44:49 -04:00
parent 2d5e55ccaa
commit 4c17b9539a
7 changed files with 117 additions and 58 deletions

185
initiatorpool/config.go Normal file
View File

@@ -0,0 +1,185 @@
package initiatorpool
import (
"git.wisehodl.dev/jay/go-honeybee/transport"
"time"
)
// Types
type WorkerFactory func(id string, stop <-chan struct{}) (*Worker, error)
// Pool Config
type PoolConfig struct {
ConnectionConfig *transport.ConnectionConfig
WorkerFactory WorkerFactory
WorkerConfig *WorkerConfig
}
type PoolOption func(*PoolConfig) error
func NewPoolConfig(options ...PoolOption) (*PoolConfig, error) {
conf := GetDefaultPoolConfig()
if err := applyPoolOptions(conf, options...); err != nil {
return nil, err
}
if err := ValidatePoolConfig(conf); err != nil {
return nil, err
}
return conf, nil
}
func GetDefaultPoolConfig() *PoolConfig {
return &PoolConfig{
ConnectionConfig: nil,
WorkerFactory: nil,
WorkerConfig: nil,
}
}
func applyPoolOptions(config *PoolConfig, options ...PoolOption) error {
for _, option := range options {
if err := option(config); err != nil {
return err
}
}
return nil
}
func ValidatePoolConfig(config *PoolConfig) error {
var err error
if config.ConnectionConfig != nil {
err = transport.ValidateConnectionConfig(config.ConnectionConfig)
if err != nil {
return err
}
}
if config.WorkerConfig != nil {
err = ValidateWorkerConfig(config.WorkerConfig)
if err != nil {
return err
}
}
return nil
}
func WithConnectionConfig(cc *transport.ConnectionConfig) PoolOption {
return func(c *PoolConfig) error {
err := transport.ValidateConnectionConfig(cc)
if err != nil {
return err
}
c.ConnectionConfig = cc
return nil
}
}
func WithWorkerConfig(wc *WorkerConfig) PoolOption {
return func(c *PoolConfig) error {
err := ValidateWorkerConfig(wc)
if err != nil {
return err
}
c.WorkerConfig = wc
return nil
}
}
func WithWorkerFactory(wf WorkerFactory) PoolOption {
return func(c *PoolConfig) error {
c.WorkerFactory = wf
return nil
}
}
// Worker Config
type WorkerConfig struct {
IdleTimeout time.Duration
MaxQueueSize int
}
type WorkerOption func(*WorkerConfig) error
func NewWorkerConfig(options ...WorkerOption) (*WorkerConfig, error) {
conf := GetDefaultWorkerConfig()
if err := applyWorkerOptions(conf, options...); err != nil {
return nil, err
}
if err := ValidateWorkerConfig(conf); err != nil {
return nil, err
}
return conf, nil
}
func GetDefaultWorkerConfig() *WorkerConfig {
return &WorkerConfig{
IdleTimeout: 20 * time.Second,
MaxQueueSize: 0, // disabled by default
}
}
func applyWorkerOptions(config *WorkerConfig, options ...WorkerOption) error {
for _, option := range options {
if err := option(config); err != nil {
return err
}
}
return nil
}
func ValidateWorkerConfig(config *WorkerConfig) error {
err := validateIdleTimeout(config.IdleTimeout)
if err != nil {
return err
}
err = validateMaxQueueSize(config.MaxQueueSize)
if err != nil {
return err
}
return nil
}
func validateMaxQueueSize(value int) error {
if value < 0 {
return InvalidMaxQueueSize
}
return nil
}
func validateIdleTimeout(value time.Duration) error {
if value < 0 {
return InvalidIdleTimeout
}
return nil
}
// When IdleTimeout is set to zero, idle timeouts are disabled.
func WithIdleTimeout(value time.Duration) WorkerOption {
return func(c *WorkerConfig) error {
err := validateIdleTimeout(value)
if err != nil {
return err
}
c.IdleTimeout = value
return nil
}
}
// When MaxQueueSize is set to zero, queue limits are disabled.
func WithMaxQueueSize(value int) WorkerOption {
return func(c *WorkerConfig) error {
err := validateMaxQueueSize(value)
if err != nil {
return err
}
c.MaxQueueSize = value
return nil
}
}

View File

@@ -0,0 +1,110 @@
package initiatorpool
import (
"git.wisehodl.dev/jay/go-honeybee/transport"
"github.com/stretchr/testify/assert"
"testing"
"time"
)
func TestNewPoolConfig(t *testing.T) {
conf, err := NewPoolConfig()
assert.NoError(t, err)
assert.Equal(t, conf, &PoolConfig{
ConnectionConfig: nil,
WorkerConfig: nil,
WorkerFactory: nil,
})
}
func TestDefaultPoolConfig(t *testing.T) {
conf := GetDefaultPoolConfig()
assert.Equal(t, conf, &PoolConfig{
ConnectionConfig: nil,
WorkerConfig: nil,
WorkerFactory: nil,
})
}
func TestApplyPoolOptions(t *testing.T) {
conf := &PoolConfig{}
err := applyPoolOptions(
conf,
WithConnectionConfig(&transport.ConnectionConfig{}),
)
assert.NoError(t, err)
assert.Equal(t, 0*time.Second, conf.ConnectionConfig.WriteTimeout)
}
func TestWithConnectionConfig(t *testing.T) {
conf := &PoolConfig{}
opt := WithConnectionConfig(&transport.ConnectionConfig{WriteTimeout: 1 * time.Second})
err := applyPoolOptions(conf, opt)
assert.NoError(t, err)
assert.NotNil(t, conf.ConnectionConfig)
assert.Equal(t, 1*time.Second, conf.ConnectionConfig.WriteTimeout)
// invalid config is rejected
conf = &PoolConfig{}
opt = WithConnectionConfig(&transport.ConnectionConfig{WriteTimeout: -1 * time.Second})
err = applyPoolOptions(conf, opt)
assert.Error(t, err)
}
func TestValidatePoolConfig(t *testing.T) {
cases := []struct {
name string
conf PoolConfig
wantErr error
wantErrText string
}{
{
name: "valid empty",
conf: *&PoolConfig{},
},
{
name: "valid defaults",
conf: *GetDefaultPoolConfig(),
},
{
name: "valid complete",
conf: PoolConfig{
ConnectionConfig: &transport.ConnectionConfig{},
},
},
{
name: "invalid connection config",
conf: PoolConfig{
ConnectionConfig: &transport.ConnectionConfig{
Retry: &transport.RetryConfig{
InitialDelay: 10 * time.Second,
MaxDelay: 1 * time.Second,
},
},
},
wantErrText: "initial delay may not exceed maximum delay",
},
}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
err := ValidatePoolConfig(&tc.conf)
if tc.wantErr != nil || tc.wantErrText != "" {
if tc.wantErr != nil {
assert.ErrorIs(t, err, tc.wantErr)
}
if tc.wantErrText != "" {
assert.ErrorContains(t, err, tc.wantErrText)
}
return
}
assert.NoError(t, err)
})
}
}

17
initiatorpool/errors.go Normal file
View File

@@ -0,0 +1,17 @@
package initiatorpool
import "errors"
import "fmt"
var (
InvalidIdleTimeout = errors.New("idle timeout cannot be negative")
InvalidMaxQueueSize = errors.New("maximum queue size cannot be negative")
)
func NewConfigError(text string) error {
return fmt.Errorf("configuration error: %s", text)
}
func NewPoolError(text string) error {
return fmt.Errorf("pool error: %s", text)
}

241
initiatorpool/pool.go Normal file
View File

@@ -0,0 +1,241 @@
package initiatorpool
import (
"git.wisehodl.dev/jay/go-honeybee/transport"
"git.wisehodl.dev/jay/go-honeybee/types"
"log/slog"
"sync"
"time"
)
// Types
type Peer struct {
id string
worker *Worker
stop chan struct{}
}
type WorkerContext struct {
Inbox chan<- InboxMessage
Events chan<- PoolEvent
Errors chan<- error
PoolDone <-chan struct{}
Logger *slog.Logger
Dialer types.Dialer
ConnectionConfig *transport.ConnectionConfig
}
type InboxMessage struct {
ID string
Data []byte
ReceivedAt time.Time
}
type PoolEventKind string
const (
EventConnected PoolEventKind = "connected"
EventDisconnected PoolEventKind = "disconnected"
)
type PoolEvent struct {
ID string
Kind PoolEventKind
}
// Pool
type Pool struct {
peers map[string]*Peer
inbox chan InboxMessage
events chan PoolEvent
errors chan error
done chan struct{}
dialer types.Dialer
config *PoolConfig
logger *slog.Logger
mu sync.RWMutex
wg sync.WaitGroup
closed bool
}
func NewPool(config *PoolConfig, logger *slog.Logger) (*Pool, error) {
if config == nil {
config = GetDefaultPoolConfig()
}
// If a custom factory is supplied, config.WorkerConfig is not used.
// The factory function should be non-blocking or else Connect() may cause
// deadlocks.
if config.WorkerFactory == nil {
config.WorkerFactory = func(id string, stop <-chan struct{}) (*Worker, error) {
return NewWorker(id, stop, config.WorkerConfig)
}
}
if err := ValidatePoolConfig(config); err != nil {
return nil, err
}
p := &Pool{
peers: make(map[string]*Peer),
inbox: make(chan InboxMessage, 256),
events: make(chan PoolEvent, 10),
errors: make(chan error, 10),
done: make(chan struct{}),
dialer: transport.NewDialer(),
config: config,
logger: logger,
}
return p, nil
}
func (p *Pool) Peers() map[string]*Peer {
return p.peers
}
func (p *Pool) Inbox() chan InboxMessage {
return p.inbox
}
func (p *Pool) Events() chan PoolEvent {
return p.events
}
func (p *Pool) Errors() chan error {
return p.errors
}
func (p *Pool) SetDialer(d types.Dialer) {
if d == nil {
panic("dialer cannot be nil")
}
p.dialer = d
}
func (p *Pool) Close() {
p.mu.Lock()
if p.closed {
p.mu.Unlock()
return
}
p.closed = true
close(p.done)
peers := p.peers
p.peers = make(map[string]*Peer)
p.mu.Unlock()
for _, p := range peers {
close(p.stop)
}
go func() {
p.wg.Wait()
close(p.inbox)
close(p.events)
close(p.errors)
}()
}
func (p *Pool) Connect(id string) error {
id, err := transport.NormalizeURL(id)
if err != nil {
return err
}
p.mu.Lock()
defer p.mu.Unlock()
if p.closed {
return NewPoolError("pool is closed")
}
_, exists := p.peers[id]
if exists {
return NewPoolError("connection already exists")
}
// Create new worker
stop := make(chan struct{})
// The worker factory must be non-blocking to avoid deadlocks
worker, err := p.config.WorkerFactory(id, stop)
if err != nil {
close(stop)
return err
}
var logger *slog.Logger
if p.logger != nil {
logger = p.logger.With("id", id)
}
ctx := WorkerContext{
Inbox: p.inbox,
Events: p.events,
Errors: p.errors,
PoolDone: p.done,
Logger: logger,
Dialer: p.dialer,
ConnectionConfig: p.config.ConnectionConfig,
}
p.wg.Add(1)
go worker.Start(ctx, &p.wg)
p.peers[id] = &Peer{id: id, worker: worker, stop: stop}
return nil
}
func (p *Pool) Remove(id string) error {
id, err := transport.NormalizeURL(id)
if err != nil {
return err
}
p.mu.Lock()
if p.closed {
p.mu.Unlock()
return NewPoolError("pool is closed")
}
peer, exists := p.peers[id]
if !exists {
p.mu.Unlock()
return NewPoolError("connection not found")
}
delete(p.peers, id)
p.mu.Unlock()
close(peer.stop)
return nil
}
func (p *Pool) Send(id string, data []byte) error {
id, err := transport.NormalizeURL(id)
if err != nil {
return err
}
p.mu.RLock()
defer p.mu.RUnlock()
if p.closed {
return NewPoolError("pool is closed")
}
peer, exists := p.peers[id]
if !exists {
return NewPoolError("connection not found")
}
return peer.worker.Send(data)
}

225
initiatorpool/pool_test.go Normal file
View File

@@ -0,0 +1,225 @@
package initiatorpool
import (
"fmt"
"git.wisehodl.dev/jay/go-honeybee/honeybeetest"
"git.wisehodl.dev/jay/go-honeybee/transport"
"git.wisehodl.dev/jay/go-honeybee/types"
"github.com/gorilla/websocket"
"github.com/stretchr/testify/assert"
"net/http"
"testing"
"time"
)
// TODO: Worker must connect and emit events.
func _TestPoolConnect(t *testing.T) {
t.Run("successfully adds connection", func(t *testing.T) {
mockSocket := honeybeetest.NewMockSocket()
mockDialer := &honeybeetest.MockDialer{
DialFunc: func(string, http.Header) (types.Socket, *http.Response, error) {
return mockSocket, nil, nil
},
}
pool, err := NewPool(nil, nil)
assert.NoError(t, err)
pool.dialer = mockDialer
err = pool.Connect("wss://test")
assert.NoError(t, err)
assert.Eventually(t, func() bool {
select {
case event := <-pool.events:
return event.ID == "wss://test" && event.Kind == EventConnected
default:
return false
}
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
_, exists := pool.peers["wss://test"]
assert.True(t, exists)
pool.Close()
})
t.Run("does not add duplicate", func(t *testing.T) {
mockSocket := honeybeetest.NewMockSocket()
mockDialer := &honeybeetest.MockDialer{
DialFunc: func(string, http.Header) (types.Socket, *http.Response, error) {
return mockSocket, nil, nil
},
}
pool, err := NewPool(nil, nil)
assert.NoError(t, err)
pool.dialer = mockDialer
err = pool.Connect("wss://test")
assert.NoError(t, err)
// trailing slash normalizes to same key
err = pool.Connect("wss://test/")
assert.Error(t, err)
assert.ErrorContains(t, err, "already exists")
pool.mu.RLock()
assert.Len(t, pool.peers, 1)
pool.mu.RUnlock()
pool.Close()
})
t.Run("fails to add connection", func(t *testing.T) {
pool, err := NewPool(
&PoolConfig{
ConnectionConfig: &transport.ConnectionConfig{
Retry: &transport.RetryConfig{
MaxRetries: 1,
InitialDelay: 1 * time.Millisecond,
MaxDelay: 5 * time.Millisecond,
}},
}, nil)
assert.NoError(t, err)
pool.dialer = &honeybeetest.MockDialer{
DialFunc: func(string, http.Header) (types.Socket, *http.Response, error) {
return nil, nil, fmt.Errorf("dial failed")
},
}
err = pool.Connect("wss://test")
assert.Error(t, err)
pool.mu.RLock()
assert.Len(t, pool.peers, 0)
pool.mu.RUnlock()
select {
case event := <-pool.events:
t.Fatalf("unexpected event: %+v", event)
default:
}
pool.Close()
})
}
// TODO: Worker must stop connection and emit events
func _TestPoolRemove(t *testing.T) {
t.Run("removes known url", func(t *testing.T) {
mockSocket := honeybeetest.NewMockSocket()
mockDialer := &honeybeetest.MockDialer{
DialFunc: func(string, http.Header) (types.Socket, *http.Response, error) {
return mockSocket, nil, nil
},
}
pool, err := NewPool(nil, nil)
assert.NoError(t, err)
pool.dialer = mockDialer
pool.Connect("wss://test")
expectEvent(t, pool.events, "wss://test", EventConnected)
err = pool.Remove("wss://test/")
assert.NoError(t, err)
// expect a disconnected event
expectEvent(t, pool.events, "wss://test", EventDisconnected)
// connection no longer in pool
pool.mu.Lock()
defer pool.mu.Unlock()
_, ok := pool.peers["wss://peer2"]
assert.False(t, ok, "connection is still in pool")
})
t.Run("unknown url returns error", func(t *testing.T) {
mockSocket := honeybeetest.NewMockSocket()
mockDialer := &honeybeetest.MockDialer{
DialFunc: func(string, http.Header) (types.Socket, *http.Response, error) {
return mockSocket, nil, nil
},
}
pool, err := NewPool(nil, nil)
assert.NoError(t, err)
pool.dialer = mockDialer
// remove unknown connection
err = pool.Remove("wss://unknown")
assert.ErrorContains(t, err, "connection not found")
})
t.Run("closed pool returns error", func(t *testing.T) {
mockSocket := honeybeetest.NewMockSocket()
mockDialer := &honeybeetest.MockDialer{
DialFunc: func(string, http.Header) (types.Socket, *http.Response, error) {
return mockSocket, nil, nil
},
}
pool, err := NewPool(nil, nil)
assert.NoError(t, err)
pool.dialer = mockDialer
// close pool
pool.Close()
// attempt to remove connection
err = pool.Remove("wss://test")
assert.ErrorContains(t, err, "pool is closed")
})
}
// TODO: update worker to be responsible for send
func _TestPoolSend(t *testing.T) {
mockSocket := honeybeetest.NewMockSocket()
outgoingData := make(chan honeybeetest.MockOutgoingData, 10)
mockSocket.WriteMessageFunc = func(msgType int, data []byte) error {
outgoingData <- honeybeetest.MockOutgoingData{MsgType: msgType, Data: data}
return nil
}
mockDialer := &honeybeetest.MockDialer{
DialFunc: func(string, http.Header) (types.Socket, *http.Response, error) {
return mockSocket, nil, nil
},
}
pool, err := NewPool(nil, nil)
assert.NoError(t, err)
pool.dialer = mockDialer
err = pool.Connect("wss://test")
assert.NoError(t, err)
expectEvent(t, pool.events, "wss://test", EventConnected)
err = pool.Send("wss://test", []byte("hello"))
assert.NoError(t, err)
honeybeetest.ExpectWrite(t, outgoingData, websocket.TextMessage, []byte("hello"))
pool.Close()
}
func expectEvent(
t *testing.T,
events chan PoolEvent,
expectedURL string,
expectedKind PoolEventKind,
) {
t.Helper()
assert.Eventually(t, func() bool {
select {
case e := <-events:
return e.ID == expectedURL && e.Kind == expectedKind
default:
return false
}
}, honeybeetest.TestTimeout, honeybeetest.TestTick,
fmt.Sprintf("expected event: URL=%q, Kind=%q",
expectedURL, expectedKind))
}

225
initiatorpool/worker.go Normal file
View File

@@ -0,0 +1,225 @@
package initiatorpool
import (
"container/list"
"git.wisehodl.dev/jay/go-honeybee/transport"
"sync"
"time"
)
// Worker
type receivedMessage struct {
data []byte
receivedAt time.Time
}
type Worker struct {
id string
stop <-chan struct{}
config *WorkerConfig
outbound chan []byte
}
func NewWorker(
id string,
stop <-chan struct{},
config *WorkerConfig,
) (*Worker, error) {
if config == nil {
config = GetDefaultWorkerConfig()
}
err := ValidateWorkerConfig(config)
if err != nil {
return nil, err
}
w := &Worker{
id: id,
stop: stop,
outbound: make(chan []byte, 64),
config: config,
}
return w, nil
}
func (w *Worker) dial(ctx WorkerContext) (*transport.Connection, error) {
conn, err := transport.NewConnection(w.id, ctx.ConnectionConfig, ctx.Logger)
if err != nil {
return nil, err
}
conn.SetDialer(ctx.Dialer)
return conn, conn.Connect()
}
func (w *Worker) Send(data []byte) error {
return nil
}
func (w *Worker) Start(
ctx WorkerContext,
wg *sync.WaitGroup,
) {
}
func (w *Worker) runSession(
conn *transport.Connection,
messages chan<- receivedMessage,
heartbeat chan<- struct{},
reconnect chan<- struct{},
outbound <-chan []byte,
idle <-chan struct{},
newConn <-chan *transport.Connection,
ctx WorkerContext,
workerDone <-chan struct{},
poolDone <-chan struct{},
)
func (w *Worker) runReader(
conn *transport.Connection,
messages chan<- receivedMessage,
heartbeat chan<- struct{},
workerDone <-chan struct{},
poolDone <-chan struct{},
sessionDone <-chan struct{},
onStop func(),
) {
}
func (w *Worker) runWriter(
conn *transport.Connection,
outbound <-chan []byte,
heartbeat chan<- struct{},
workerDone <-chan struct{},
poolDone <-chan struct{},
sessionDone <-chan struct{},
onStop func(),
) {
}
func (w *Worker) runStopMonitor(
conn *transport.Connection,
stop <-chan struct{},
workerDone <-chan struct{},
poolDone <-chan struct{},
sessionDone <-chan struct{},
onStop func(),
) {
}
func (w *Worker) runForwarder(
messages <-chan receivedMessage,
inbox chan<- InboxMessage,
stop <-chan struct{},
poolDone <-chan struct{},
maxQueueSize int,
) {
queue := list.New()
for {
var out chan<- InboxMessage
var next receivedMessage
// enable inbox if it is populated
if queue.Len() > 0 {
out = inbox
// read the first message in the queue
next = queue.Front().Value.(receivedMessage)
}
select {
case <-stop:
return
case <-poolDone:
return
case msg := <-messages:
// limit queue size if maximum is configured
if maxQueueSize > 0 && queue.Len() >= maxQueueSize {
// drop oldest message
queue.Remove(queue.Front())
}
// add new message
queue.PushBack(msg)
// send next message to inbox
case out <- InboxMessage{
ID: w.id,
Data: next.data,
ReceivedAt: next.receivedAt,
}:
// drop message from queue
queue.Remove(queue.Front())
}
}
}
func (w *Worker) runIdleMonitor(
heartbeat <-chan struct{},
idle chan<- struct{},
stop <-chan struct{},
poolDone <-chan struct{},
) {
// disable idle timeout if not configured
if w.config.IdleTimeout <= 0 {
// wait for stop signal and exit
select {
case <-stop:
case <-poolDone:
}
return
}
timer := time.NewTimer(w.config.IdleTimeout)
defer timer.Stop()
for {
select {
case <-stop:
return
case <-poolDone:
return
case <-heartbeat:
// drain the timer channel and reset
if !timer.Stop() {
select {
case <-timer.C:
default:
}
}
timer.Reset(w.config.IdleTimeout)
// timer completed
case <-timer.C:
// send idle signal, then reset the timer
select {
case idle <- struct{}{}:
default:
}
timer.Reset(w.config.IdleTimeout)
}
}
}
func (w *Worker) runReconnector(
reconnect <-chan struct{},
newConn chan<- *transport.Connection,
stop <-chan struct{},
poolDone <-chan struct{},
) {
}

View File

@@ -0,0 +1,185 @@
package initiatorpool
import (
"git.wisehodl.dev/jay/go-honeybee/honeybeetest"
// "git.wisehodl.dev/jay/go-honeybee/transport"
// "git.wisehodl.dev/jay/go-honeybee/types"
"github.com/stretchr/testify/assert"
// "net/http"
"testing"
"time"
)
// Forwarder
func TestRunForwarder(t *testing.T) {
t.Run("message passes through to inbox", func(t *testing.T) {
messages := make(chan receivedMessage, 1)
inbox := make(chan InboxMessage, 1)
stop := make(chan struct{})
defer close(stop)
w := &Worker{id: "wss://test"}
go w.runForwarder(messages, inbox, stop, nil, 0)
messages <- receivedMessage{data: []byte("hello"), receivedAt: time.Now()}
assert.Eventually(t, func() bool {
select {
case msg := <-inbox:
return string(msg.Data) == "hello" && msg.ID == "wss://test"
default:
return false
}
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
})
t.Run("oldest message dropped when queue is full", func(t *testing.T) {
messages := make(chan receivedMessage, 1)
inbox := make(chan InboxMessage, 1)
stop := make(chan struct{})
defer close(stop)
gate := make(chan struct{})
gatedInbox := make(chan InboxMessage)
// gate the inbox from receiving messages until the gate is opened
go func() {
<-gate
for msg := range gatedInbox {
inbox <- msg
}
}()
w := &Worker{id: "wss://test"}
go w.runForwarder(messages, gatedInbox, stop, nil, 2)
// send three messages while the gated inbox is blocked
messages <- receivedMessage{data: []byte("first"), receivedAt: time.Now()}
messages <- receivedMessage{data: []byte("second"), receivedAt: time.Now()}
messages <- receivedMessage{data: []byte("third"), receivedAt: time.Now()}
// allow time for the first message to be dropped
time.Sleep(20 * time.Millisecond)
// close the gate, draining messages into the inbox
close(gate)
// receive messages from the inbox
var received []string
assert.Eventually(t, func() bool {
select {
case msg := <-inbox:
received = append(received, string(msg.Data))
default:
}
return len(received) == 2
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
// first message was dropped
assert.Equal(t, []string{"second", "third"}, received)
})
t.Run("exits on stop", func(t *testing.T) {
messages := make(chan receivedMessage, 1)
inbox := make(chan InboxMessage, 1)
stop := make(chan struct{})
w := &Worker{id: "wss://test"}
done := make(chan struct{})
go func() {
w.runForwarder(messages, inbox, stop, nil, 0)
close(done)
}()
close(stop)
assert.Eventually(t, func() bool {
select {
case <-done:
return true
default:
return false
}
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
})
}
func TestRunIdleMonitor(t *testing.T) {
t.Run("heartbeat resets timer, no idle signal fired", func(t *testing.T) {
heartbeat := make(chan struct{}, 3)
idle := make(chan struct{}, 1)
stop := make(chan struct{})
defer close(stop)
w := &Worker{config: &WorkerConfig{IdleTimeout: 100 * time.Millisecond}}
go w.runIdleMonitor(heartbeat, idle, stop, nil)
// send heartbeats faster than the timeout
for i := 0; i < 5; i++ {
time.Sleep(30 * time.Millisecond)
heartbeat <- struct{}{}
}
// because the timer is being reset, idle signal should not be sent
assert.Never(t, func() bool {
select {
case <-idle:
return true
default:
return false
}
}, honeybeetest.NegativeTestTimeout, honeybeetest.TestTick)
})
t.Run("idle timeout fires signal", func(t *testing.T) {
heartbeat := make(chan struct{})
idle := make(chan struct{}, 1)
stop := make(chan struct{})
defer close(stop)
w := &Worker{config: &WorkerConfig{IdleTimeout: 20 * time.Millisecond}}
go w.runIdleMonitor(heartbeat, idle, stop, nil)
// send no heartbeats, wait for timeout and idle signal
assert.Eventually(t, func() bool {
select {
case <-idle:
return true
default:
return false
}
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
})
t.Run("exits on stop", func(t *testing.T) {
heartbeat := make(chan struct{})
idle := make(chan struct{}, 1)
stop := make(chan struct{})
w := &Worker{config: &WorkerConfig{IdleTimeout: 20 * time.Second}}
done := make(chan struct{})
go func() {
w.runIdleMonitor(heartbeat, idle, stop, nil)
close(done)
}()
// send stop signal
close(stop)
assert.Eventually(t, func() bool {
select {
case <-done:
return true
default:
return false
}
}, honeybeetest.TestTimeout, honeybeetest.TestTick)
})
}
func TestRunReconnector(t *testing.T) {
t.Run("reconnect emits events, new connection", func(t *testing.T) {})
t.Run("dial failure emits error", func(t *testing.T) {})
t.Run("exits on stop", func(t *testing.T) {})
}