Skip to content

net/http: add MaxConnLifespan to Transport #46714

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions api/next.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
pkg net/http, type Transport struct, MaxConnLifespan time.Duration
48 changes: 44 additions & 4 deletions src/net/http/h2_bundle.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

59 changes: 53 additions & 6 deletions src/net/http/transport.go
Original file line number Diff line number Diff line change
Expand Up @@ -189,6 +189,10 @@ type Transport struct {
// uncompressed.
DisableCompression bool

// MaxConnLifespan controls how long a connection is allowed
// to be reused before it must be closed. Zero means no limit.
MaxConnLifespan time.Duration

// MaxIdleConns controls the maximum number of idle (keep-alive)
// connections across all hosts. Zero means no limit.
MaxIdleConns int
Expand Down Expand Up @@ -316,6 +320,7 @@ func (t *Transport) Clone() *Transport {
TLSHandshakeTimeout: t.TLSHandshakeTimeout,
DisableKeepAlives: t.DisableKeepAlives,
DisableCompression: t.DisableCompression,
MaxConnLifespan: t.MaxConnLifespan,
MaxIdleConns: t.MaxIdleConns,
MaxIdleConnsPerHost: t.MaxIdleConnsPerHost,
MaxConnsPerHost: t.MaxConnsPerHost,
Expand Down Expand Up @@ -987,14 +992,22 @@ func (t *Transport) tryPutIdleConn(pconn *persistConn) error {
t.removeIdleConnLocked(oldest)
}

ttl, hasTtl := pconn.timeToLive()

// Set idle timer, but only for HTTP/1 (pconn.alt == nil).
// The HTTP/2 implementation manages the idle timer itself
// (see idleConnTimeout in h2_bundle.go).
if t.IdleConnTimeout > 0 && pconn.alt == nil {
if (hasTtl || t.IdleConnTimeout > 0) && pconn.alt == nil {

timeout := t.IdleConnTimeout
if hasTtl && (timeout <= 0 || ttl < timeout) {
timeout = ttl
}

if pconn.idleTimer != nil {
pconn.idleTimer.Reset(t.IdleConnTimeout)
pconn.idleTimer.Reset(timeout)
} else {
pconn.idleTimer = time.AfterFunc(t.IdleConnTimeout, pconn.closeConnIfStillIdle)
pconn.idleTimer = time.AfterFunc(timeout, pconn.closeConnIfStillIdle)
}
}
pconn.idleAt = time.Now()
Expand Down Expand Up @@ -1024,9 +1037,10 @@ func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) {
// If IdleConnTimeout is set, calculate the oldest
// persistConn.idleAt time we're willing to use a cached idle
// conn.
now := time.Now()
var oldTime time.Time
if t.IdleConnTimeout > 0 {
oldTime = time.Now().Add(-t.IdleConnTimeout)
oldTime = now.Add(-t.IdleConnTimeout)
}

// Look for most recently-used idle connection.
Expand All @@ -1039,7 +1053,8 @@ func (t *Transport) queueForIdleConn(w *wantConn) (delivered bool) {
// See whether this connection has been idle too long, considering
// only the wall time (the Round(0)), in case this is a laptop or VM
// coming out of suspend with previously cached idle connections.
tooOld := !oldTime.IsZero() && pconn.idleAt.Round(0).Before(oldTime)
tooOld := (!oldTime.IsZero() && pconn.idleAt.Round(0).Before(oldTime)) || (!pconn.reuseDeadline.IsZero() && pconn.reuseDeadline.Round(0).Before(now))

if tooOld {
// Async cleanup. Launch in its own goroutine (as if a
// time.AfterFunc called it); it acquires idleMu, which we're
Expand Down Expand Up @@ -1620,6 +1635,11 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *pers
}
}

var reuseDeadline time.Time
if t.MaxConnLifespan > 0 {
reuseDeadline = time.Now().Add(t.MaxConnLifespan)
}

// Proxy setup.
switch {
case cm.proxyURL == nil:
Expand Down Expand Up @@ -1740,10 +1760,11 @@ func (t *Transport) dialConn(ctx context.Context, cm connectMethod) (pconn *pers
// pconn.conn was closed by next (http2configureTransports.upgradeFn).
return nil, e.RoundTripErr()
}
return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: alt}, nil
return &persistConn{t: t, cacheKey: pconn.cacheKey, alt: alt, reuseDeadline: reuseDeadline}, nil
}
}

pconn.reuseDeadline = reuseDeadline
pconn.br = bufio.NewReaderSize(pconn, t.readBufferSize())
pconn.bw = bufio.NewWriterSize(persistConnWriter{pconn}, t.writeBufferSize())

Expand Down Expand Up @@ -1895,6 +1916,8 @@ type persistConn struct {

writeLoopDone chan struct{} // closed when write loop ends

reuseDeadline time.Time // time when this connection can no longer be reused

// Both guarded by Transport.idleMu:
idleAt time.Time // time it last become idle
idleTimer *time.Timer // holding an AfterFunc to close it
Expand All @@ -1911,6 +1934,30 @@ type persistConn struct {
mutateHeaderFunc func(Header)
}

// timeToLive checks if a persistent connection has been initialized
// from a transport with MaxConnLifespan > 0 and returns the time
// remaining for this connection to be reusable. The second response
// would be true in this case.
//
// If the connection has a zero-value reuseDeadline set then
// it returns (0, false)
//
// The returned duration will never be less than zero and the connection's
// idle time is NOT taken into account.
func (pc *persistConn) timeToLive() (time.Duration, bool) {

if pc.reuseDeadline.IsZero() {
return 0, false
}

ttl := time.Until(pc.reuseDeadline)
if ttl < 0 {
return 0, true
}

return ttl, true
}

func (pc *persistConn) maxHeaderResponseSize() int64 {
if v := pc.t.MaxResponseHeaderBytes; v != 0 {
return v
Expand Down
65 changes: 65 additions & 0 deletions src/net/http/transport_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4920,6 +4920,70 @@ func TestTransportMaxIdleConns(t *testing.T) {
}
}

func TestTransportMaxConnLifespan_h1(t *testing.T) { testTransportMaxConnLifespan(t, h1Mode) }
func TestTransportMaxConnLifespan_h2(t *testing.T) { testTransportMaxConnLifespan(t, h2Mode) }
func testTransportMaxConnLifespan(t *testing.T, h2 bool) {
if testing.Short() {
t.Skip("skipping in short mode")
}
defer afterTest(t)

const timeout = 1 * time.Second

cst := newClientServerTest(t, h2, HandlerFunc(func(w ResponseWriter, r *Request) {
// No body for convenience.
}))
defer cst.close()
tr := cst.tr
tr.MaxConnLifespan = timeout
tr.IdleConnTimeout = timeout * 3
defer tr.CloseIdleConnections()
c := &Client{Transport: tr}

idleConns := func() []string {
if h2 {
return tr.IdleConnStrsForTesting_h2()
} else {
return tr.IdleConnStrsForTesting()
}
}

var conn string
doReq := func(n int) {
req, _ := NewRequest("GET", cst.ts.URL, nil)
req = req.WithContext(httptrace.WithClientTrace(context.Background(), &httptrace.ClientTrace{
PutIdleConn: func(err error) {
if err != nil {
t.Errorf("failed to keep idle conn: %v", err)
}
},
}))
res, err := c.Do(req)
if err != nil {
t.Fatal(err)
}
res.Body.Close()
conns := idleConns()
if len(conns) != 1 {
t.Fatalf("req %v: unexpected number of idle conns: %q", n, conns)
}
if conn == "" {
conn = conns[0]
}
if conn != conns[0] {
t.Fatalf("req %v: cached connection changed; expected the same one throughout the test", n)
}
}
for i := 0; i < 3; i++ {
doReq(i)
time.Sleep(timeout / 4)
}
time.Sleep(timeout / 2)
if got := idleConns(); len(got) != 0 {
t.Errorf("idle conns = %q; want none", got)
}
}

func TestTransportIdleConnTimeout_h1(t *testing.T) { testTransportIdleConnTimeout(t, h1Mode) }
func TestTransportIdleConnTimeout_h2(t *testing.T) { testTransportIdleConnTimeout(t, h2Mode) }
func testTransportIdleConnTimeout(t *testing.T, h2 bool) {
Expand Down Expand Up @@ -5912,6 +5976,7 @@ func TestTransportClone(t *testing.T) {
TLSHandshakeTimeout: time.Second,
DisableKeepAlives: true,
DisableCompression: true,
MaxConnLifespan: time.Second,
MaxIdleConns: 1,
MaxIdleConnsPerHost: 1,
MaxConnsPerHost: 1,
Expand Down