Skip to content

Commit

Permalink
Change persistent queue to not use sized channel, improve memory usag…
Browse files Browse the repository at this point in the history
…e and performance

Signed-off-by: Bogdan Drutu <[email protected]>
  • Loading branch information
bogdandrutu committed Jan 9, 2025
1 parent 286b2f5 commit bdd2de7
Show file tree
Hide file tree
Showing 6 changed files with 225 additions and 271 deletions.
16 changes: 11 additions & 5 deletions exporter/exporterqueue/bounded_memory_queue.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@ import (
type boundedMemoryQueue[T any] struct {
component.StartFunc
*sizedChannel[memQueueEl[T]]
sizer sizer[T]
}

// memoryQueueSettings defines internal parameters for boundedMemoryQueue creation.
Expand All @@ -30,18 +29,17 @@ type memoryQueueSettings[T any] struct {
// callback for dropped items (e.g. useful to emit metrics).
func newBoundedMemoryQueue[T any](set memoryQueueSettings[T]) Queue[T] {
return &boundedMemoryQueue[T]{
sizedChannel: newSizedChannel[memQueueEl[T]](set.capacity, nil, 0),
sizer: set.sizer,
sizedChannel: newSizedChannel[memQueueEl[T]](set.capacity, nil, 0, memQueueElSizer[T]{sizer: set.sizer}),
}
}

// Offer is used by the producer to submit new item to the queue. Calling this method on a stopped queue will panic.
func (q *boundedMemoryQueue[T]) Offer(ctx context.Context, req T) error {
return q.sizedChannel.push(memQueueEl[T]{ctx: ctx, req: req}, q.sizer.Sizeof(req), nil)
return q.sizedChannel.push(memQueueEl[T]{ctx: ctx, req: req})
}

func (q *boundedMemoryQueue[T]) Read(_ context.Context) (uint64, context.Context, T, bool) {
item, ok := q.sizedChannel.pop(func(el memQueueEl[T]) int64 { return q.sizer.Sizeof(el.req) })
item, ok := q.sizedChannel.pop()
return 0, item.ctx, item.req, ok
}

Expand All @@ -60,3 +58,11 @@ type memQueueEl[T any] struct {
req T
ctx context.Context
}

type memQueueElSizer[T any] struct {
sizer sizer[T]
}

func (mqes memQueueElSizer[T]) Sizeof(el memQueueEl[T]) int64 {
return mqes.sizer.Sizeof(el.req)
}
33 changes: 14 additions & 19 deletions exporter/exporterqueue/bounded_memory_queue_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ import (
"github.com/stretchr/testify/require"

"go.opentelemetry.io/collector/component/componenttest"
"go.opentelemetry.io/collector/pdata/ptrace"
"go.opentelemetry.io/collector/pdata/testdata"
)

// In this test we run a queue with capacity 1 and a single consumer.
Expand Down Expand Up @@ -98,49 +100,50 @@ func TestShutdownWhileNotEmpty(t *testing.T) {
}

func Benchmark_QueueUsage_1000_requests(b *testing.B) {
benchmarkQueueUsage(b, &requestSizer[fakeReq]{}, 1000)
benchmarkQueueUsage(b, &requestSizer[ptrace.Traces]{}, 1000)
}

func Benchmark_QueueUsage_100000_requests(b *testing.B) {
benchmarkQueueUsage(b, &requestSizer[fakeReq]{}, 100000)
benchmarkQueueUsage(b, &requestSizer[ptrace.Traces]{}, 100000)
}

func Benchmark_QueueUsage_10000_items(b *testing.B) {
// each request has 10 items: 1000 requests = 10000 items
benchmarkQueueUsage(b, &itemsSizer[fakeReq]{}, 1000)
benchmarkQueueUsage(b, &itemsSizer{}, 1000)
}

func Benchmark_QueueUsage_1M_items(b *testing.B) {
// each request has 10 items: 100000 requests = 1M items
benchmarkQueueUsage(b, &itemsSizer[fakeReq]{}, 100000)
benchmarkQueueUsage(b, &itemsSizer{}, 100000)
}

func TestQueueUsage(t *testing.T) {
t.Run("requests_based", func(t *testing.T) {
queueUsage(t, &requestSizer[fakeReq]{}, 10)
queueUsage(t, &requestSizer[ptrace.Traces]{}, 10)
})
t.Run("items_based", func(t *testing.T) {
queueUsage(t, &itemsSizer[fakeReq]{}, 10)
queueUsage(t, &itemsSizer{}, 10)
})
}

func benchmarkQueueUsage(b *testing.B, sizer sizer[fakeReq], requestsCount int) {
func benchmarkQueueUsage(b *testing.B, sizer sizer[ptrace.Traces], requestsCount int) {
b.ReportAllocs()
for i := 0; i < b.N; i++ {
queueUsage(b, sizer, requestsCount)
}
}

func queueUsage(tb testing.TB, sizer sizer[fakeReq], requestsCount int) {
q := newBoundedMemoryQueue[fakeReq](memoryQueueSettings[fakeReq]{sizer: sizer, capacity: int64(10 * requestsCount)})
func queueUsage(tb testing.TB, sizer sizer[ptrace.Traces], requestsCount int) {
q := newBoundedMemoryQueue[ptrace.Traces](memoryQueueSettings[ptrace.Traces]{sizer: sizer, capacity: int64(10 * requestsCount)})
consumed := &atomic.Int64{}
require.NoError(tb, q.Start(context.Background(), componenttest.NewNopHost()))
ac := newAsyncConsumer(q, 1, func(context.Context, fakeReq) error {
ac := newAsyncConsumer(q, 1, func(context.Context, ptrace.Traces) error {
consumed.Add(1)
return nil
})
td := testdata.GenerateTraces(10)
for j := 0; j < requestsCount; j++ {
require.NoError(tb, q.Offer(context.Background(), fakeReq{10}))
require.NoError(tb, q.Offer(context.Background(), td))
}
assert.NoError(tb, q.Shutdown(context.Background()))
assert.NoError(tb, ac.Shutdown(context.Background()))
Expand All @@ -158,14 +161,6 @@ func TestZeroSizeNoConsumers(t *testing.T) {
assert.NoError(t, q.Shutdown(context.Background()))
}

type fakeReq struct {
itemsCount int
}

func (r fakeReq) ItemsCount() int {
return r.itemsCount
}

func consume[T any](q Queue[T], consumeFunc func(context.Context, T) error) bool {
index, ctx, req, ok := q.Read(context.Background())
if !ok {
Expand Down
Loading

0 comments on commit bdd2de7

Please sign in to comment.