Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
49 changes: 25 additions & 24 deletions go/pools/smartconnpool/pool.go
Original file line number Diff line number Diff line change
Expand Up @@ -781,35 +781,36 @@ func (pool *ConnPool[C]) closeIdleResources(now time.Time) {
mono := monotonicFromTime(now)

closeInStack := func(s *connStack[C]) {
// Do a read-only best effort iteration of all the connection in this
// stack and atomically attempt to mark them as expired.
// Any connections that are marked as expired are _not_ removed from
// the stack; it's generally unsafe to remove nodes from the stack
// besides the head. When clients pop from the stack, they'll immediately
// notice the expired connection and ignore it.
// see: timestamp.expired
for conn := s.Peek(); conn != nil; conn = conn.next.Load() {
expiredConnections := make([]*Pooled[C], 0)
validConnections := make([]*Pooled[C], 0)

// Pop out connections from the stack until we get a `nil` connection
for conn, ok := s.Pop(); ok; conn, ok = s.Pop() {
if conn.timeUsed.expired(mono, timeout) {
pool.Metrics.idleClosed.Add(1)
expiredConnections = append(expiredConnections, conn)
} else {
validConnections = append(validConnections, conn)
}
}

conn.Close()
pool.closedConn()
// Return all the valid connections back to waiters or the stack
for _, conn := range validConnections {
pool.tryReturnConn(conn)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We don't want to call pool.closedConn() if this returns false?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't think so. 🤔

tryReturnConn returns true if there was a direct connection hand-off via the waitlist, but it returns false if:

  • the connection was closed because we have too many connections open. In this case, closeOnIdleLimitReached will manage the active connection count and there is no need to call closedConn
  • the connections was added back to one of the stacks (no need to call closedConn because we didn't close anything)

}

// Using context.Background() is fine since MySQL connection already enforces
// a connect timeout via the `db-connect-timeout-ms` config param.
c, err := pool.getNew(context.Background())
if err != nil {
// If we couldn't open a new connection, just continue
continue
}
// Close all the expired connections and open new ones to replace them
for _, conn := range expiredConnections {
pool.Metrics.idleClosed.Add(1)

// opening a new connection might have raced with other goroutines,
// so it's possible that we got back `nil` here
if c != nil {
// Return the new connection to the pool
pool.tryReturnConn(c)
}
conn.Close()

err := pool.connReopen(context.Background(), conn, mono)
if err != nil {
pool.closedConn()
continue
}

pool.tryReturnConn(conn)
}
}

Expand Down
16 changes: 11 additions & 5 deletions go/pools/smartconnpool/pool_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1364,16 +1364,22 @@ func TestIdleTimeoutConnectionLeak(t *testing.T) {

// Try to get connections while they're being reopened
// This should trigger the bug where connections get discarded
wg := sync.WaitGroup{}

for i := 0; i < 2; i++ {
getCtx, cancel := context.WithTimeout(t.Context(), 50*time.Millisecond)
defer cancel()
wg.Go(func() {
getCtx, cancel := context.WithTimeout(t.Context(), 300*time.Millisecond)
defer cancel()

conn, err := p.Get(getCtx, nil)
require.NoError(t, err)
conn, err := p.Get(getCtx, nil)
require.NoError(t, err)

p.put(conn)
p.put(conn)
})
}

wg.Wait()

// Wait a moment for all reopening to complete
require.EventuallyWithT(t, func(c *assert.CollectT) {
// Check the actual number of currently open connections
Expand Down
Loading