From 9aa58be1e8b0cde307f159c7d0d1d44a9c181d9b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 17 Dec 2025 17:52:58 +0000 Subject: [PATCH] Bump golang.org/x/crypto from 0.41.0 to 0.45.0 Bumps [golang.org/x/crypto](https://github.com/golang/crypto) from 0.41.0 to 0.45.0. - [Commits](https://github.com/golang/crypto/compare/v0.41.0...v0.45.0) --- updated-dependencies: - dependency-name: golang.org/x/crypto dependency-version: 0.45.0 dependency-type: indirect ... Signed-off-by: dependabot[bot] --- go.mod | 12 +- go.sum | 24 +- .../x/crypto/curve25519/curve25519.go | 13 +- vendor/golang.org/x/net/http2/config.go | 63 +- vendor/golang.org/x/net/http2/config_go124.go | 61 -- vendor/golang.org/x/net/http2/config_go125.go | 15 + vendor/golang.org/x/net/http2/config_go126.go | 15 + .../x/net/http2/config_pre_go124.go | 16 - vendor/golang.org/x/net/http2/frame.go | 101 ++- vendor/golang.org/x/net/http2/gotrack.go | 17 +- vendor/golang.org/x/net/http2/http2.go | 35 +- vendor/golang.org/x/net/http2/server.go | 143 ++--- vendor/golang.org/x/net/http2/timer.go | 20 - vendor/golang.org/x/net/http2/transport.go | 196 +++--- vendor/golang.org/x/net/http2/writesched.go | 67 +- ...rity.go => writesched_priority_rfc7540.go} | 109 ++-- .../net/http2/writesched_priority_rfc9218.go | 209 ++++++ .../x/net/http2/writesched_roundrobin.go | 2 +- .../x/net/internal/httpcommon/request.go | 4 +- vendor/golang.org/x/sync/errgroup/errgroup.go | 2 +- vendor/golang.org/x/text/unicode/bidi/core.go | 11 +- .../x/tools/go/ast/astutil/imports.go | 67 +- vendor/golang.org/x/tools/go/ast/edge/edge.go | 295 +++++++++ .../x/tools/go/ast/inspector/cursor.go | 502 +++++++++++++++ .../x/tools/go/ast/inspector/inspector.go | 311 +++++++++ .../x/tools/go/ast/inspector/iter.go | 85 +++ .../x/tools/go/ast/inspector/typeof.go | 227 +++++++ .../x/tools/go/ast/inspector/walk.go | 341 ++++++++++ .../golang.org/x/tools/go/packages/golist.go | 6 - .../golang.org/x/tools/go/packages/visit.go | 85 ++- .../x/tools/go/types/objectpath/objectpath.go | 5 +- .../x/tools/go/types/typeutil/map.go | 19 +- vendor/golang.org/x/tools/imports/forward.go | 6 - .../x/tools/internal/event/core/event.go | 5 - .../x/tools/internal/gcimporter/iexport.go | 1 - .../internal/gcimporter/iimport_go122.go | 53 -- .../x/tools/internal/imports/fix.go | 6 +- .../x/tools/internal/modindex/symbols.go | 3 +- .../x/tools/internal/stdlib/deps.go | 596 +++++++++--------- .../x/tools/internal/stdlib/manifest.go | 58 +- .../x/tools/internal/typesinternal/fx.go | 49 ++ .../x/tools/internal/typesinternal/isnamed.go | 71 +++ .../tools/internal/typesinternal/qualifier.go | 8 + .../x/tools/internal/typesinternal/types.go | 48 +- .../tools/internal/typesinternal/zerovalue.go | 17 +- vendor/modules.txt | 26 +- 46 files changed, 3117 insertions(+), 908 deletions(-) delete mode 100644 vendor/golang.org/x/net/http2/config_go124.go create mode 100644 vendor/golang.org/x/net/http2/config_go125.go create mode 100644 vendor/golang.org/x/net/http2/config_go126.go delete mode 100644 vendor/golang.org/x/net/http2/config_pre_go124.go delete mode 100644 vendor/golang.org/x/net/http2/timer.go rename vendor/golang.org/x/net/http2/{writesched_priority.go => writesched_priority_rfc7540.go} (77%) create mode 100644 vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go create mode 100644 vendor/golang.org/x/tools/go/ast/edge/edge.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/cursor.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/inspector.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/iter.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/typeof.go create mode 100644 vendor/golang.org/x/tools/go/ast/inspector/walk.go delete mode 100644 vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/fx.go create mode 100644 vendor/golang.org/x/tools/internal/typesinternal/isnamed.go diff --git a/go.mod b/go.mod index 7a560b6540..931a569877 100644 --- a/go.mod +++ b/go.mod @@ -39,8 +39,8 @@ require ( go.etcd.io/bbolt v1.4.0 go.opencensus.io v0.24.0 go.uber.org/mock v0.6.0 - golang.org/x/net v0.43.0 - golang.org/x/sync v0.16.0 + golang.org/x/net v0.47.0 + golang.org/x/sync v0.18.0 golang.org/x/sys v0.39.0 google.golang.org/grpc v1.75.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 @@ -119,10 +119,10 @@ require ( go.opentelemetry.io/otel/metric v1.37.0 // indirect go.opentelemetry.io/otel/sdk v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect - golang.org/x/crypto v0.41.0 // indirect - golang.org/x/mod v0.27.0 // indirect - golang.org/x/text v0.28.0 // indirect - golang.org/x/tools v0.36.0 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/tools v0.38.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect sigs.k8s.io/yaml v1.4.0 // indirect diff --git a/go.sum b/go.sum index f25a3f2f8d..b40067757b 100644 --- a/go.sum +++ b/go.sum @@ -1144,8 +1144,8 @@ golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5 golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc= -golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= -golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1215,8 +1215,8 @@ golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1302,8 +1302,8 @@ golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k= golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk= golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E= -golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= -golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1380,8 +1380,8 @@ golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= -golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= -golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1556,8 +1556,8 @@ golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU= -golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= -golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1641,8 +1641,8 @@ golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxb golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c= golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= -golang.org/x/tools v0.36.0 h1:kWS0uv/zsvHEle1LbV5LE8QujrxB3wfQyxHfhOk0Qkg= -golang.org/x/tools v0.36.0/go.mod h1:WBDiHKJK8YgLHlcQPYQzNCkUxUypCaa5ZegCVutKm+s= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/crypto/curve25519/curve25519.go b/vendor/golang.org/x/crypto/curve25519/curve25519.go index 21ca3b2ee4..048faef3a5 100644 --- a/vendor/golang.org/x/crypto/curve25519/curve25519.go +++ b/vendor/golang.org/x/crypto/curve25519/curve25519.go @@ -3,11 +3,14 @@ // license that can be found in the LICENSE file. // Package curve25519 provides an implementation of the X25519 function, which -// performs scalar multiplication on the elliptic curve known as Curve25519. -// See RFC 7748. +// performs scalar multiplication on the elliptic curve known as Curve25519 +// according to [RFC 7748]. // -// This package is a wrapper for the X25519 implementation -// in the crypto/ecdh package. +// The curve25519 package is a wrapper for the X25519 implementation in the +// crypto/ecdh package. It is [frozen] and is not accepting new features. +// +// [RFC 7748]: https://datatracker.ietf.org/doc/html/rfc7748 +// [frozen]: https://go.dev/wiki/Frozen package curve25519 import "crypto/ecdh" @@ -36,7 +39,7 @@ func ScalarBaseMult(dst, scalar *[32]byte) { curve := ecdh.X25519() priv, err := curve.NewPrivateKey(scalar[:]) if err != nil { - panic("curve25519: internal error: scalarBaseMult was not 32 bytes") + panic("curve25519: " + err.Error()) } copy(dst[:], priv.PublicKey().Bytes()) } diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go index ca645d9a1a..8a7a89d016 100644 --- a/vendor/golang.org/x/net/http2/config.go +++ b/vendor/golang.org/x/net/http2/config.go @@ -27,6 +27,7 @@ import ( // - If the resulting value is zero or out of range, use a default. type http2Config struct { MaxConcurrentStreams uint32 + StrictMaxConcurrentRequests bool MaxDecoderHeaderTableSize uint32 MaxEncoderHeaderTableSize uint32 MaxReadFrameSize uint32 @@ -55,7 +56,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { PermitProhibitedCipherSuites: h2.PermitProhibitedCipherSuites, CountError: h2.CountError, } - fillNetHTTPServerConfig(&conf, h1) + fillNetHTTPConfig(&conf, h1.HTTP2) setConfigDefaults(&conf, true) return conf } @@ -64,12 +65,13 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config { // (the net/http Transport). func configFromTransport(h2 *Transport) http2Config { conf := http2Config{ - MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, - MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, - MaxReadFrameSize: h2.MaxReadFrameSize, - SendPingTimeout: h2.ReadIdleTimeout, - PingTimeout: h2.PingTimeout, - WriteByteTimeout: h2.WriteByteTimeout, + StrictMaxConcurrentRequests: h2.StrictMaxConcurrentStreams, + MaxEncoderHeaderTableSize: h2.MaxEncoderHeaderTableSize, + MaxDecoderHeaderTableSize: h2.MaxDecoderHeaderTableSize, + MaxReadFrameSize: h2.MaxReadFrameSize, + SendPingTimeout: h2.ReadIdleTimeout, + PingTimeout: h2.PingTimeout, + WriteByteTimeout: h2.WriteByteTimeout, } // Unlike most config fields, where out-of-range values revert to the default, @@ -81,7 +83,7 @@ func configFromTransport(h2 *Transport) http2Config { } if h2.t1 != nil { - fillNetHTTPTransportConfig(&conf, h2.t1) + fillNetHTTPConfig(&conf, h2.t1.HTTP2) } setConfigDefaults(&conf, false) return conf @@ -120,3 +122,48 @@ func adjustHTTP1MaxHeaderSize(n int64) int64 { const typicalHeaders = 10 // conservative return n + typicalHeaders*perFieldOverhead } + +func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { + if h2 == nil { + return + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if http2ConfigStrictMaxConcurrentRequests(h2) { + conf.StrictMaxConcurrentRequests = true + } + if h2.MaxEncoderHeaderTableSize != 0 { + conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) + } + if h2.MaxDecoderHeaderTableSize != 0 { + conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) + } + if h2.MaxConcurrentStreams != 0 { + conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) + } + if h2.MaxReadFrameSize != 0 { + conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) + } + if h2.MaxReceiveBufferPerConnection != 0 { + conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) + } + if h2.MaxReceiveBufferPerStream != 0 { + conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) + } + if h2.SendPingTimeout != 0 { + conf.SendPingTimeout = h2.SendPingTimeout + } + if h2.PingTimeout != 0 { + conf.PingTimeout = h2.PingTimeout + } + if h2.WriteByteTimeout != 0 { + conf.WriteByteTimeout = h2.WriteByteTimeout + } + if h2.PermitProhibitedCipherSuites { + conf.PermitProhibitedCipherSuites = true + } + if h2.CountError != nil { + conf.CountError = h2.CountError + } +} diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go deleted file mode 100644 index 5b516c55ff..0000000000 --- a/vendor/golang.org/x/net/http2/config_go124.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.24 - -package http2 - -import "net/http" - -// fillNetHTTPServerConfig sets fields in conf from srv.HTTP2. -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) { - fillNetHTTPConfig(conf, srv.HTTP2) -} - -// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2. -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) { - fillNetHTTPConfig(conf, tr.HTTP2) -} - -func fillNetHTTPConfig(conf *http2Config, h2 *http.HTTP2Config) { - if h2 == nil { - return - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxEncoderHeaderTableSize != 0 { - conf.MaxEncoderHeaderTableSize = uint32(h2.MaxEncoderHeaderTableSize) - } - if h2.MaxDecoderHeaderTableSize != 0 { - conf.MaxDecoderHeaderTableSize = uint32(h2.MaxDecoderHeaderTableSize) - } - if h2.MaxConcurrentStreams != 0 { - conf.MaxConcurrentStreams = uint32(h2.MaxConcurrentStreams) - } - if h2.MaxReadFrameSize != 0 { - conf.MaxReadFrameSize = uint32(h2.MaxReadFrameSize) - } - if h2.MaxReceiveBufferPerConnection != 0 { - conf.MaxUploadBufferPerConnection = int32(h2.MaxReceiveBufferPerConnection) - } - if h2.MaxReceiveBufferPerStream != 0 { - conf.MaxUploadBufferPerStream = int32(h2.MaxReceiveBufferPerStream) - } - if h2.SendPingTimeout != 0 { - conf.SendPingTimeout = h2.SendPingTimeout - } - if h2.PingTimeout != 0 { - conf.PingTimeout = h2.PingTimeout - } - if h2.WriteByteTimeout != 0 { - conf.WriteByteTimeout = h2.WriteByteTimeout - } - if h2.PermitProhibitedCipherSuites { - conf.PermitProhibitedCipherSuites = true - } - if h2.CountError != nil { - conf.CountError = h2.CountError - } -} diff --git a/vendor/golang.org/x/net/http2/config_go125.go b/vendor/golang.org/x/net/http2/config_go125.go new file mode 100644 index 0000000000..b4373fe33c --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go125.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return false +} diff --git a/vendor/golang.org/x/net/http2/config_go126.go b/vendor/golang.org/x/net/http2/config_go126.go new file mode 100644 index 0000000000..6b071c149d --- /dev/null +++ b/vendor/golang.org/x/net/http2/config_go126.go @@ -0,0 +1,15 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.26 + +package http2 + +import ( + "net/http" +) + +func http2ConfigStrictMaxConcurrentRequests(h2 *http.HTTP2Config) bool { + return h2.StrictMaxConcurrentRequests +} diff --git a/vendor/golang.org/x/net/http2/config_pre_go124.go b/vendor/golang.org/x/net/http2/config_pre_go124.go deleted file mode 100644 index 060fd6c64c..0000000000 --- a/vendor/golang.org/x/net/http2/config_pre_go124.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.24 - -package http2 - -import "net/http" - -// Pre-Go 1.24 fallback. -// The Server.HTTP2 and Transport.HTTP2 config fields were added in Go 1.24. - -func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {} - -func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {} diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index db3264da8c..9a4bd123c9 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -280,6 +280,8 @@ type Framer struct { // lastHeaderStream is non-zero if the last frame was an // unfinished HEADERS/CONTINUATION. lastHeaderStream uint32 + // lastFrameType holds the type of the last frame for verifying frame order. + lastFrameType FrameType maxReadSize uint32 headerBuf [frameHeaderLen]byte @@ -347,7 +349,7 @@ func (fr *Framer) maxHeaderListSize() uint32 { func (f *Framer) startWrite(ftype FrameType, flags Flags, streamID uint32) { // Write the FrameHeader. f.wbuf = append(f.wbuf[:0], - 0, // 3 bytes of length, filled in in endWrite + 0, // 3 bytes of length, filled in endWrite 0, 0, byte(ftype), @@ -488,30 +490,41 @@ func terminalReadFrameError(err error) bool { return err != nil } -// ReadFrame reads a single frame. The returned Frame is only valid -// until the next call to ReadFrame. +// ReadFrameHeader reads the header of the next frame. +// It reads the 9-byte fixed frame header, and does not read any portion of the +// frame payload. The caller is responsible for consuming the payload, either +// with ReadFrameForHeader or directly from the Framer's io.Reader. // -// If the frame is larger than previously set with SetMaxReadFrameSize, the -// returned error is ErrFrameTooLarge. Other errors may be of type -// ConnectionError, StreamError, or anything else from the underlying -// reader. +// If the frame is larger than previously set with SetMaxReadFrameSize, it +// returns the frame header and ErrFrameTooLarge. // -// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID -// indicates the stream responsible for the error. -func (fr *Framer) ReadFrame() (Frame, error) { +// If the returned FrameHeader.StreamID is non-zero, it indicates the stream +// responsible for the error. +func (fr *Framer) ReadFrameHeader() (FrameHeader, error) { fr.errDetail = nil - if fr.lastFrame != nil { - fr.lastFrame.invalidate() - } fh, err := readFrameHeader(fr.headerBuf[:], fr.r) if err != nil { - return nil, err + return fh, err } if fh.Length > fr.maxReadSize { if fh == invalidHTTP1LookingFrameHeader() { - return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) + return fh, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", ErrFrameTooLarge) } - return nil, ErrFrameTooLarge + return fh, ErrFrameTooLarge + } + if err := fr.checkFrameOrder(fh); err != nil { + return fh, err + } + return fh, nil +} + +// ReadFrameForHeader reads the payload for the frame with the given FrameHeader. +// +// It behaves identically to ReadFrame, other than not checking the maximum +// frame size. +func (fr *Framer) ReadFrameForHeader(fh FrameHeader) (Frame, error) { + if fr.lastFrame != nil { + fr.lastFrame.invalidate() } payload := fr.getReadBuf(fh.Length) if _, err := io.ReadFull(fr.r, payload); err != nil { @@ -527,9 +540,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { } return nil, err } - if err := fr.checkFrameOrder(f); err != nil { - return nil, err - } + fr.lastFrame = f if fr.logReads { fr.debugReadLoggerf("http2: Framer %p: read %v", fr, summarizeFrame(f)) } @@ -539,6 +550,24 @@ func (fr *Framer) ReadFrame() (Frame, error) { return f, nil } +// ReadFrame reads a single frame. The returned Frame is only valid +// until the next call to ReadFrame or ReadFrameBodyForHeader. +// +// If the frame is larger than previously set with SetMaxReadFrameSize, the +// returned error is ErrFrameTooLarge. Other errors may be of type +// ConnectionError, StreamError, or anything else from the underlying +// reader. +// +// If ReadFrame returns an error and a non-nil Frame, the Frame's StreamID +// indicates the stream responsible for the error. +func (fr *Framer) ReadFrame() (Frame, error) { + fh, err := fr.ReadFrameHeader() + if err != nil { + return nil, err + } + return fr.ReadFrameForHeader(fh) +} + // connError returns ConnectionError(code) but first // stashes away a public reason to the caller can optionally relay it // to the peer before hanging up on them. This might help others debug @@ -551,20 +580,19 @@ func (fr *Framer) connError(code ErrCode, reason string) error { // checkFrameOrder reports an error if f is an invalid frame to return // next from ReadFrame. Mostly it checks whether HEADERS and // CONTINUATION frames are contiguous. -func (fr *Framer) checkFrameOrder(f Frame) error { - last := fr.lastFrame - fr.lastFrame = f +func (fr *Framer) checkFrameOrder(fh FrameHeader) error { + lastType := fr.lastFrameType + fr.lastFrameType = fh.Type if fr.AllowIllegalReads { return nil } - fh := f.Header() if fr.lastHeaderStream != 0 { if fh.Type != FrameContinuation { return fr.connError(ErrCodeProtocol, fmt.Sprintf("got %s for stream %d; expected CONTINUATION following %s for stream %d", fh.Type, fh.StreamID, - last.Header().Type, fr.lastHeaderStream)) + lastType, fr.lastHeaderStream)) } if fh.StreamID != fr.lastHeaderStream { return fr.connError(ErrCodeProtocol, @@ -1152,7 +1180,16 @@ type PriorityFrame struct { PriorityParam } -// PriorityParam are the stream prioritzation parameters. +var defaultRFC9218Priority = PriorityParam{ + incremental: 0, + urgency: 3, +} + +// Note that HTTP/2 has had two different prioritization schemes, and +// PriorityParam struct below is a superset of both schemes. The exported +// symbols are from RFC 7540 and the non-exported ones are from RFC 9218. + +// PriorityParam are the stream prioritization parameters. type PriorityParam struct { // StreamDep is a 31-bit stream identifier for the // stream that this stream depends on. Zero means no @@ -1167,6 +1204,20 @@ type PriorityParam struct { // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 + + // "The urgency (u) parameter value is Integer (see Section 3.3.1 of + // [STRUCTURED-FIELDS]), between 0 and 7 inclusive, in descending order of + // priority. The default is 3." + urgency uint8 + + // "The incremental (i) parameter value is Boolean (see Section 3.3.6 of + // [STRUCTURED-FIELDS]). It indicates if an HTTP response can be processed + // incrementally, i.e., provide some meaningful output as chunks of the + // response arrive." + // + // We use uint8 (i.e. 0 is false, 1 is true) instead of bool so we can + // avoid unnecessary type conversions and because either type takes 1 byte. + incremental uint8 } func (p PriorityParam) IsZero() bool { diff --git a/vendor/golang.org/x/net/http2/gotrack.go b/vendor/golang.org/x/net/http2/gotrack.go index 9933c9f8c7..9921ca096d 100644 --- a/vendor/golang.org/x/net/http2/gotrack.go +++ b/vendor/golang.org/x/net/http2/gotrack.go @@ -15,21 +15,32 @@ import ( "runtime" "strconv" "sync" + "sync/atomic" ) var DebugGoroutines = os.Getenv("DEBUG_HTTP2_GOROUTINES") == "1" +// Setting DebugGoroutines to false during a test to disable goroutine debugging +// results in race detector complaints when a test leaves goroutines running before +// returning. Tests shouldn't do this, of course, but when they do it generally shows +// up as infrequent, hard-to-debug flakes. (See #66519.) +// +// Disable goroutine debugging during individual tests with an atomic bool. +// (Note that it's safe to enable/disable debugging mid-test, so the actual race condition +// here is harmless.) +var disableDebugGoroutines atomic.Bool + type goroutineLock uint64 func newGoroutineLock() goroutineLock { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return 0 } return goroutineLock(curGoroutineID()) } func (g goroutineLock) check() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() != uint64(g) { @@ -38,7 +49,7 @@ func (g goroutineLock) check() { } func (g goroutineLock) checkNotOn() { - if !DebugGoroutines { + if !DebugGoroutines || disableDebugGoroutines.Load() { return } if curGoroutineID() == uint64(g) { diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index ea5ae629fd..105fe12fef 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -15,7 +15,6 @@ package http2 // import "golang.org/x/net/http2" import ( "bufio" - "context" "crypto/tls" "errors" "fmt" @@ -35,7 +34,6 @@ var ( VerboseLogs bool logFrameWrites bool logFrameReads bool - inTests bool // Enabling extended CONNECT by causes browsers to attempt to use // WebSockets-over-HTTP/2. This results in problems when the server's websocket @@ -255,15 +253,13 @@ func (cw closeWaiter) Wait() { // idle memory usage with many connections. type bufferedWriter struct { _ incomparable - group synctestGroupInterface // immutable - conn net.Conn // immutable - bw *bufio.Writer // non-nil when data is buffered - byteTimeout time.Duration // immutable, WriteByteTimeout + conn net.Conn // immutable + bw *bufio.Writer // non-nil when data is buffered + byteTimeout time.Duration // immutable, WriteByteTimeout } -func newBufferedWriter(group synctestGroupInterface, conn net.Conn, timeout time.Duration) *bufferedWriter { +func newBufferedWriter(conn net.Conn, timeout time.Duration) *bufferedWriter { return &bufferedWriter{ - group: group, conn: conn, byteTimeout: timeout, } @@ -314,24 +310,18 @@ func (w *bufferedWriter) Flush() error { type bufferedWriterTimeoutWriter bufferedWriter func (w *bufferedWriterTimeoutWriter) Write(p []byte) (n int, err error) { - return writeWithByteTimeout(w.group, w.conn, w.byteTimeout, p) + return writeWithByteTimeout(w.conn, w.byteTimeout, p) } // writeWithByteTimeout writes to conn. // If more than timeout passes without any bytes being written to the connection, // the write fails. -func writeWithByteTimeout(group synctestGroupInterface, conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { +func writeWithByteTimeout(conn net.Conn, timeout time.Duration, p []byte) (n int, err error) { if timeout <= 0 { return conn.Write(p) } for { - var now time.Time - if group == nil { - now = time.Now() - } else { - now = group.Now() - } - conn.SetWriteDeadline(now.Add(timeout)) + conn.SetWriteDeadline(time.Now().Add(timeout)) nn, err := conn.Write(p[n:]) n += nn if n == len(p) || nn == 0 || !errors.Is(err, os.ErrDeadlineExceeded) { @@ -417,14 +407,3 @@ func (s *sorter) SortStrings(ss []string) { // makes that struct also non-comparable, and generally doesn't add // any size (as long as it's first). type incomparable [0]func() - -// synctestGroupInterface is the methods of synctestGroup used by Server and Transport. -// It's defined as an interface here to let us keep synctestGroup entirely test-only -// and not a part of non-test builds. -type synctestGroupInterface interface { - Join() - Now() time.Time - NewTimer(d time.Duration) timer - AfterFunc(d time.Duration, f func()) timer - ContextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) -} diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 51fca38f61..bdc5520ebd 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -176,44 +176,15 @@ type Server struct { // so that we don't embed a Mutex in this struct, which will make the // struct non-copyable, which might break some callers. state *serverInternalState - - // Synchronization group used for testing. - // Outside of tests, this is nil. - group synctestGroupInterface -} - -func (s *Server) markNewGoroutine() { - if s.group != nil { - s.group.Join() - } -} - -func (s *Server) now() time.Time { - if s.group != nil { - return s.group.Now() - } - return time.Now() -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (s *Server) newTimer(d time.Duration) timer { - if s.group != nil { - return s.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (s *Server) afterFunc(d time.Duration, f func()) timer { - if s.group != nil { - return s.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} } type serverInternalState struct { mu sync.Mutex activeConns map[*serverConn]struct{} + + // Pool of error channels. This is per-Server rather than global + // because channels can't be reused across synctest bubbles. + errChanPool sync.Pool } func (s *serverInternalState) registerConn(sc *serverConn) { @@ -245,6 +216,27 @@ func (s *serverInternalState) startGracefulShutdown() { s.mu.Unlock() } +// Global error channel pool used for uninitialized Servers. +// We use a per-Server pool when possible to avoid using channels across synctest bubbles. +var errChanPool = sync.Pool{ + New: func() any { return make(chan error, 1) }, +} + +func (s *serverInternalState) getErrChan() chan error { + if s == nil { + return errChanPool.Get().(chan error) // Server used without calling ConfigureServer + } + return s.errChanPool.Get().(chan error) +} + +func (s *serverInternalState) putErrChan(ch chan error) { + if s == nil { + errChanPool.Put(ch) // Server used without calling ConfigureServer + return + } + s.errChanPool.Put(ch) +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -257,7 +249,10 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } - conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + conf.state = &serverInternalState{ + activeConns: make(map[*serverConn]struct{}), + errChanPool: sync.Pool{New: func() any { return make(chan error, 1) }}, + } if h1, h2 := s, conf; h2.IdleTimeout == 0 { if h1.IdleTimeout != 0 { h2.IdleTimeout = h1.IdleTimeout @@ -423,6 +418,9 @@ func (o *ServeConnOpts) handler() http.Handler { // // The opts parameter is optional. If nil, default values are used. func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { + if opts == nil { + opts = &ServeConnOpts{} + } s.serveConn(c, opts, nil) } @@ -438,7 +436,7 @@ func (s *Server) serveConn(c net.Conn, opts *ServeConnOpts, newf func(*serverCon conn: c, baseCtx: baseCtx, remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(s.group, c, conf.WriteByteTimeout), + bw: newBufferedWriter(c, conf.WriteByteTimeout), handler: opts.handler(), streams: make(map[uint32]*stream), readFrameCh: make(chan readFrameResult), @@ -638,11 +636,11 @@ type serverConn struct { pingSent bool sentPingData [8]byte goAwayCode ErrCode - shutdownTimer timer // nil until used - idleTimer timer // nil if unused + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused readIdleTimeout time.Duration pingTimeout time.Duration - readIdleTimer timer // nil if unused + readIdleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer @@ -687,12 +685,12 @@ type stream struct { flow outflow // limits writing from Handler to client inflow inflow // what the client is allowed to POST/etc to us state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - readDeadline timer // nil if unused - writeDeadline timer // nil if unused - closeErr error // set before cw is closed + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + readDeadline *time.Timer // nil if unused + writeDeadline *time.Timer // nil if unused + closeErr error // set before cw is closed trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -848,7 +846,6 @@ type readFrameResult struct { // consumer is done with the frame. // It's run on its own goroutine. func (sc *serverConn) readFrames() { - sc.srv.markNewGoroutine() gate := make(chan struct{}) gateDone := func() { gate <- struct{}{} } for { @@ -881,7 +878,6 @@ type frameWriteResult struct { // At most one goroutine can be running writeFrameAsync at a time per // serverConn. func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest, wd *writeData) { - sc.srv.markNewGoroutine() var err error if wd == nil { err = wr.write.writeFrame(sc) @@ -965,22 +961,22 @@ func (sc *serverConn) serve(conf http2Config) { sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout > 0 { - sc.idleTimer = sc.srv.afterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() } if conf.SendPingTimeout > 0 { sc.readIdleTimeout = conf.SendPingTimeout - sc.readIdleTimer = sc.srv.afterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) + sc.readIdleTimer = time.AfterFunc(conf.SendPingTimeout, sc.onReadIdleTimer) defer sc.readIdleTimer.Stop() } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := sc.srv.afterFunc(firstSettingsTimeout, sc.onSettingsTimer) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) defer settingsTimer.Stop() - lastFrameTime := sc.srv.now() + lastFrameTime := time.Now() loopNum := 0 for { loopNum++ @@ -994,7 +990,7 @@ func (sc *serverConn) serve(conf http2Config) { case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: - lastFrameTime = sc.srv.now() + lastFrameTime = time.Now() // Process any written frames before reading new frames from the client since a // written frame could have triggered a new stream to be started. if sc.writingFrameAsync { @@ -1077,7 +1073,7 @@ func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) { } pingAt := lastFrameReadTime.Add(sc.readIdleTimeout) - now := sc.srv.now() + now := time.Now() if pingAt.After(now) { // We received frames since arming the ping timer. // Reset it for the next possible timeout. @@ -1141,10 +1137,10 @@ func (sc *serverConn) readPreface() error { errc <- nil } }() - timer := sc.srv.newTimer(prefaceTimeout) // TODO: configurable on *Server? + timer := time.NewTimer(prefaceTimeout) // TODO: configurable on *Server? defer timer.Stop() select { - case <-timer.C(): + case <-timer.C: return errPrefaceTimeout case err := <-errc: if err == nil { @@ -1156,10 +1152,6 @@ func (sc *serverConn) readPreface() error { } } -var errChanPool = sync.Pool{ - New: func() interface{} { return make(chan error, 1) }, -} - var writeDataPool = sync.Pool{ New: func() interface{} { return new(writeData) }, } @@ -1167,7 +1159,7 @@ var writeDataPool = sync.Pool{ // writeDataFromHandler writes DATA response frames from a handler on // the given stream. func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStream bool) error { - ch := errChanPool.Get().(chan error) + ch := sc.srv.state.getErrChan() writeArg := writeDataPool.Get().(*writeData) *writeArg = writeData{stream.id, data, endStream} err := sc.writeFrameFromHandler(FrameWriteRequest{ @@ -1199,7 +1191,7 @@ func (sc *serverConn) writeDataFromHandler(stream *stream, data []byte, endStrea return errStreamClosed } } - errChanPool.Put(ch) + sc.srv.state.putErrChan(ch) if frameWriteDone { writeDataPool.Put(writeArg) } @@ -1513,7 +1505,7 @@ func (sc *serverConn) goAway(code ErrCode) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = sc.srv.afterFunc(d, sc.onShutdownTimer) + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -2118,7 +2110,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // (in Go 1.8), though. That's a more sane option anyway. if sc.hs.ReadTimeout > 0 { sc.conn.SetReadDeadline(time.Time{}) - st.readDeadline = sc.srv.afterFunc(sc.hs.ReadTimeout, st.onReadTimeout) + st.readDeadline = time.AfterFunc(sc.hs.ReadTimeout, st.onReadTimeout) } return sc.scheduleHandler(id, rw, req, handler) @@ -2216,7 +2208,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream st.flow.add(sc.initialStreamSendWindowSize) st.inflow.init(sc.initialStreamRecvWindowSize) if sc.hs.WriteTimeout > 0 { - st.writeDeadline = sc.srv.afterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) } sc.streams[id] = st @@ -2405,7 +2397,6 @@ func (sc *serverConn) handlerDone() { // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { - sc.srv.markNewGoroutine() defer sc.sendServeMsg(handlerDoneMsg) didPanic := true defer func() { @@ -2454,7 +2445,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro // waiting for this frame to be written, so an http.Flush mid-handler // writes out the correct value of keys, before a handler later potentially // mutates it. - errc = errChanPool.Get().(chan error) + errc = sc.srv.state.getErrChan() } if err := sc.writeFrameFromHandler(FrameWriteRequest{ write: headerData, @@ -2466,7 +2457,7 @@ func (sc *serverConn) writeHeaders(st *stream, headerData *writeResHeaders) erro if errc != nil { select { case err := <-errc: - errChanPool.Put(errc) + sc.srv.state.putErrChan(errc) return err case <-sc.doneServing: return errClientDisconnected @@ -2573,7 +2564,7 @@ func (b *requestBody) Read(p []byte) (n int, err error) { if err == io.EOF { b.sawEOF = true } - if b.conn == nil && inTests { + if b.conn == nil { return } b.conn.noteBodyReadFromHandler(b.stream, n, err) @@ -2702,7 +2693,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { var date string if _, ok := rws.snapHeader["Date"]; !ok { // TODO(bradfitz): be faster here, like net/http? measure. - date = rws.conn.srv.now().UTC().Format(http.TimeFormat) + date = time.Now().UTC().Format(http.TimeFormat) } for _, v := range rws.snapHeader["Trailer"] { @@ -2824,7 +2815,7 @@ func (rws *responseWriterState) promoteUndeclaredTrailers() { func (w *responseWriter) SetReadDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onReadTimeout() @@ -2840,9 +2831,9 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { if deadline.IsZero() { st.readDeadline = nil } else if st.readDeadline == nil { - st.readDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onReadTimeout) + st.readDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onReadTimeout) } else { - st.readDeadline.Reset(deadline.Sub(sc.srv.now())) + st.readDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -2850,7 +2841,7 @@ func (w *responseWriter) SetReadDeadline(deadline time.Time) error { func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { st := w.rws.stream - if !deadline.IsZero() && deadline.Before(w.rws.conn.srv.now()) { + if !deadline.IsZero() && deadline.Before(time.Now()) { // If we're setting a deadline in the past, reset the stream immediately // so writes after SetWriteDeadline returns will fail. st.onWriteTimeout() @@ -2866,9 +2857,9 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error { if deadline.IsZero() { st.writeDeadline = nil } else if st.writeDeadline == nil { - st.writeDeadline = sc.srv.afterFunc(deadline.Sub(sc.srv.now()), st.onWriteTimeout) + st.writeDeadline = time.AfterFunc(deadline.Sub(time.Now()), st.onWriteTimeout) } else { - st.writeDeadline.Reset(deadline.Sub(sc.srv.now())) + st.writeDeadline.Reset(deadline.Sub(time.Now())) } }) return nil @@ -3147,7 +3138,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { method: opts.Method, url: u, header: cloneHeader(opts.Header), - done: errChanPool.Get().(chan error), + done: sc.srv.state.getErrChan(), } select { @@ -3164,7 +3155,7 @@ func (w *responseWriter) Push(target string, opts *http.PushOptions) error { case <-st.cw: return errStreamClosed case err := <-msg.done: - errChanPool.Put(msg.done) + sc.srv.state.putErrChan(msg.done) return err } } diff --git a/vendor/golang.org/x/net/http2/timer.go b/vendor/golang.org/x/net/http2/timer.go deleted file mode 100644 index 0b1c17b812..0000000000 --- a/vendor/golang.org/x/net/http2/timer.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. -package http2 - -import "time" - -// A timer is a time.Timer, as an interface which can be replaced in tests. -type timer = interface { - C() <-chan time.Time - Reset(d time.Duration) bool - Stop() bool -} - -// timeTimer adapts a time.Timer to the timer interface. -type timeTimer struct { - *time.Timer -} - -func (t timeTimer) C() <-chan time.Time { return t.Timer.C } diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index f26356b9cd..1965913e54 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -9,6 +9,7 @@ package http2 import ( "bufio" "bytes" + "compress/flate" "compress/gzip" "context" "crypto/rand" @@ -193,50 +194,6 @@ type Transport struct { type transportTestHooks struct { newclientconn func(*ClientConn) - group synctestGroupInterface -} - -func (t *Transport) markNewGoroutine() { - if t != nil && t.transportTestHooks != nil { - t.transportTestHooks.group.Join() - } -} - -func (t *Transport) now() time.Time { - if t != nil && t.transportTestHooks != nil { - return t.transportTestHooks.group.Now() - } - return time.Now() -} - -func (t *Transport) timeSince(when time.Time) time.Duration { - if t != nil && t.transportTestHooks != nil { - return t.now().Sub(when) - } - return time.Since(when) -} - -// newTimer creates a new time.Timer, or a synthetic timer in tests. -func (t *Transport) newTimer(d time.Duration) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.NewTimer(d) - } - return timeTimer{time.NewTimer(d)} -} - -// afterFunc creates a new time.AfterFunc timer, or a synthetic timer in tests. -func (t *Transport) afterFunc(d time.Duration, f func()) timer { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.AfterFunc(d, f) - } - return timeTimer{time.AfterFunc(d, f)} -} - -func (t *Transport) contextWithTimeout(ctx context.Context, d time.Duration) (context.Context, context.CancelFunc) { - if t.transportTestHooks != nil { - return t.transportTestHooks.group.ContextWithTimeout(ctx, d) - } - return context.WithTimeout(ctx, d) } func (t *Transport) maxHeaderListSize() uint32 { @@ -366,7 +323,7 @@ type ClientConn struct { readerErr error // set before readerDone is closed idleTimeout time.Duration // or 0 for never - idleTimer timer + idleTimer *time.Timer mu sync.Mutex // guards following cond *sync.Cond // hold mu; broadcast on flow/closed changes @@ -399,6 +356,7 @@ type ClientConn struct { readIdleTimeout time.Duration pingTimeout time.Duration extendedConnectAllowed bool + strictMaxConcurrentStreams bool // rstStreamPingsBlocked works around an unfortunate gRPC behavior. // gRPC strictly limits the number of PING frames that it will receive. @@ -534,14 +492,12 @@ func (cs *clientStream) closeReqBodyLocked() { cs.reqBodyClosed = make(chan struct{}) reqBodyClosed := cs.reqBodyClosed go func() { - cs.cc.t.markNewGoroutine() cs.reqBody.Close() close(reqBodyClosed) }() } type stickyErrWriter struct { - group synctestGroupInterface conn net.Conn timeout time.Duration err *error @@ -551,7 +507,7 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { if *sew.err != nil { return 0, *sew.err } - n, err = writeWithByteTimeout(sew.group, sew.conn, sew.timeout, p) + n, err = writeWithByteTimeout(sew.conn, sew.timeout, p) *sew.err = err return n, err } @@ -650,9 +606,9 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res backoff := float64(uint(1) << (uint(retry) - 1)) backoff += backoff * (0.1 * mathrand.Float64()) d := time.Second * time.Duration(backoff) - tm := t.newTimer(d) + tm := time.NewTimer(d) select { - case <-tm.C(): + case <-tm.C: t.vlogf("RoundTrip retrying after failure: %v", roundTripErr) continue case <-req.Context().Done(): @@ -699,6 +655,7 @@ var ( errClientConnUnusable = errors.New("http2: client conn not usable") errClientConnNotEstablished = errors.New("http2: client conn could not be established") errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") + errClientConnForceClosed = errors.New("http2: client connection force closed via ClientConn.Close") ) // shouldRetryRequest is called by RoundTrip when a request fails to get @@ -829,7 +786,8 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro initialWindowSize: 65535, // spec default initialStreamRecvWindowSize: conf.MaxUploadBufferPerStream, maxConcurrentStreams: initialMaxConcurrentStreams, // "infinite", per spec. Use a smaller value until we have received server settings. - peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + strictMaxConcurrentStreams: conf.StrictMaxConcurrentRequests, + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. streams: make(map[uint32]*clientStream), singleUse: singleUse, seenSettingsChan: make(chan struct{}), @@ -838,14 +796,11 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro pingTimeout: conf.PingTimeout, pings: make(map[[8]byte]chan struct{}), reqHeaderMu: make(chan struct{}, 1), - lastActive: t.now(), + lastActive: time.Now(), } - var group synctestGroupInterface if t.transportTestHooks != nil { - t.markNewGoroutine() t.transportTestHooks.newclientconn(cc) c = cc.tconn - group = t.group } if VerboseLogs { t.vlogf("http2: Transport creating client conn %p to %v", cc, c.RemoteAddr()) @@ -857,7 +812,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // TODO: adjust this writer size to account for frame size + // MTU + crypto/tls record padding. cc.bw = bufio.NewWriter(stickyErrWriter{ - group: group, conn: c, timeout: conf.WriteByteTimeout, err: &cc.werr, @@ -906,7 +860,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // Start the idle timer after the connection is fully initialized. if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d - cc.idleTimer = t.afterFunc(d, cc.onIdleTimeout) + cc.idleTimer = time.AfterFunc(d, cc.onIdleTimeout) } go cc.readLoop() @@ -917,7 +871,7 @@ func (cc *ClientConn) healthCheck() { pingTimeout := cc.pingTimeout // We don't need to periodically ping in the health check, because the readLoop of ClientConn will // trigger the healthCheck again if there is no frame received. - ctx, cancel := cc.t.contextWithTimeout(context.Background(), pingTimeout) + ctx, cancel := context.WithTimeout(context.Background(), pingTimeout) defer cancel() cc.vlogf("http2: Transport sending health check") err := cc.Ping(ctx) @@ -1067,7 +1021,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { return } var maxConcurrentOkay bool - if cc.t.StrictMaxConcurrentStreams { + if cc.strictMaxConcurrentStreams { // We'll tell the caller we can take a new request to // prevent the caller from dialing a new TCP // connection, but then we'll block later before @@ -1120,7 +1074,7 @@ func (cc *ClientConn) tooIdleLocked() bool { // times are compared based on their wall time. We don't want // to reuse a connection that's been sitting idle during // VM/laptop suspend if monotonic time was also frozen. - return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout + return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout } // onIdleTimeout is called from a time.AfterFunc goroutine. It will @@ -1186,7 +1140,6 @@ func (cc *ClientConn) Shutdown(ctx context.Context) error { done := make(chan struct{}) cancelled := false // guarded by cc.mu go func() { - cc.t.markNewGoroutine() cc.mu.Lock() defer cc.mu.Unlock() for { @@ -1257,8 +1210,7 @@ func (cc *ClientConn) closeForError(err error) { // // In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. func (cc *ClientConn) Close() error { - err := errors.New("http2: client connection force closed via ClientConn.Close") - cc.closeForError(err) + cc.closeForError(errClientConnForceClosed) return nil } @@ -1427,7 +1379,6 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream)) // // It sends the request and performs post-request cleanup (closing Request.Body, etc.). func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)) { - cs.cc.t.markNewGoroutine() err := cs.writeRequest(req, streamf) cs.cleanupWriteRequest(err) } @@ -1558,9 +1509,9 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre var respHeaderTimer <-chan time.Time var respHeaderRecv chan struct{} if d := cc.responseHeaderTimeout(); d != 0 { - timer := cc.t.newTimer(d) + timer := time.NewTimer(d) defer timer.Stop() - respHeaderTimer = timer.C() + respHeaderTimer = timer.C respHeaderRecv = cs.respHeaderRecv } // Wait until the peer half-closes its end of the stream, @@ -1753,7 +1704,7 @@ func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error { // Return a fatal error which aborts the retry loop. return errClientConnNotEstablished } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if cc.closed || !cc.canTakeNewRequestLocked() { return errClientConnUnusable } @@ -2092,10 +2043,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) { if len(cc.streams) != slen-1 { panic("forgetting unknown stream id") } - cc.lastActive = cc.t.now() + cc.lastActive = time.Now() if len(cc.streams) == 0 && cc.idleTimer != nil { cc.idleTimer.Reset(cc.idleTimeout) - cc.lastIdle = cc.t.now() + cc.lastIdle = time.Now() } // Wake up writeRequestBody via clientStream.awaitFlowControl and // wake up RoundTrip if there is a pending request. @@ -2121,7 +2072,6 @@ type clientConnReadLoop struct { // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - cc.t.markNewGoroutine() rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() @@ -2188,9 +2138,9 @@ func (rl *clientConnReadLoop) cleanup() { if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout { unusedWaitTime = cc.idleTimeout } - idleTime := cc.t.now().Sub(cc.lastActive) + idleTime := time.Now().Sub(cc.lastActive) if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle { - cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() { + cc.idleTimer = time.AfterFunc(unusedWaitTime-idleTime, func() { cc.t.connPool().MarkDead(cc) }) } else { @@ -2250,9 +2200,9 @@ func (rl *clientConnReadLoop) run() error { cc := rl.cc gotSettings := false readIdleTimeout := cc.readIdleTimeout - var t timer + var t *time.Timer if readIdleTimeout != 0 { - t = cc.t.afterFunc(readIdleTimeout, cc.healthCheck) + t = time.AfterFunc(readIdleTimeout, cc.healthCheck) } for { f, err := cc.fr.ReadFrame() @@ -2998,7 +2948,6 @@ func (cc *ClientConn) Ping(ctx context.Context) error { var pingError error errc := make(chan struct{}) go func() { - cc.t.markNewGoroutine() cc.wmu.Lock() defer cc.wmu.Unlock() if pingError = cc.fr.WritePing(false, p); pingError != nil { @@ -3128,35 +3077,102 @@ type erringRoundTripper struct{ err error } func (rt erringRoundTripper) RoundTripErr() error { return rt.err } func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) { return nil, rt.err } +var errConcurrentReadOnResBody = errors.New("http2: concurrent read on response body") + // gzipReader wraps a response body so it can lazily -// call gzip.NewReader on the first call to Read +// get gzip.Reader from the pool on the first call to Read. +// After Close is called it puts gzip.Reader to the pool immediately +// if there is no Read in progress or later when Read completes. type gzipReader struct { _ incomparable body io.ReadCloser // underlying Response.Body - zr *gzip.Reader // lazily-initialized gzip reader - zerr error // sticky error + mu sync.Mutex // guards zr and zerr + zr *gzip.Reader // stores gzip reader from the pool between reads + zerr error // sticky gzip reader init error or sentinel value to detect concurrent read and read after close } -func (gz *gzipReader) Read(p []byte) (n int, err error) { +type eofReader struct{} + +func (eofReader) Read([]byte) (int, error) { return 0, io.EOF } +func (eofReader) ReadByte() (byte, error) { return 0, io.EOF } + +var gzipPool = sync.Pool{New: func() any { return new(gzip.Reader) }} + +// gzipPoolGet gets a gzip.Reader from the pool and resets it to read from r. +func gzipPoolGet(r io.Reader) (*gzip.Reader, error) { + zr := gzipPool.Get().(*gzip.Reader) + if err := zr.Reset(r); err != nil { + gzipPoolPut(zr) + return nil, err + } + return zr, nil +} + +// gzipPoolPut puts a gzip.Reader back into the pool. +func gzipPoolPut(zr *gzip.Reader) { + // Reset will allocate bufio.Reader if we pass it anything + // other than a flate.Reader, so ensure that it's getting one. + var r flate.Reader = eofReader{} + zr.Reset(r) + gzipPool.Put(zr) +} + +// acquire returns a gzip.Reader for reading response body. +// The reader must be released after use. +func (gz *gzipReader) acquire() (*gzip.Reader, error) { + gz.mu.Lock() + defer gz.mu.Unlock() if gz.zerr != nil { - return 0, gz.zerr + return nil, gz.zerr } if gz.zr == nil { - gz.zr, err = gzip.NewReader(gz.body) - if err != nil { - gz.zerr = err - return 0, err + gz.zr, gz.zerr = gzipPoolGet(gz.body) + if gz.zerr != nil { + return nil, gz.zerr } } - return gz.zr.Read(p) + ret := gz.zr + gz.zr, gz.zerr = nil, errConcurrentReadOnResBody + return ret, nil } -func (gz *gzipReader) Close() error { - if err := gz.body.Close(); err != nil { - return err +// release returns the gzip.Reader to the pool if Close was called during Read. +func (gz *gzipReader) release(zr *gzip.Reader) { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == errConcurrentReadOnResBody { + gz.zr, gz.zerr = zr, nil + } else { // fs.ErrClosed + gzipPoolPut(zr) + } +} + +// close returns the gzip.Reader to the pool immediately or +// signals release to do so after Read completes. +func (gz *gzipReader) close() { + gz.mu.Lock() + defer gz.mu.Unlock() + if gz.zerr == nil && gz.zr != nil { + gzipPoolPut(gz.zr) + gz.zr = nil } gz.zerr = fs.ErrClosed - return nil +} + +func (gz *gzipReader) Read(p []byte) (n int, err error) { + zr, err := gz.acquire() + if err != nil { + return 0, err + } + defer gz.release(zr) + + return zr.Read(p) +} + +func (gz *gzipReader) Close() error { + gz.close() + + return gz.body.Close() } type errorReader struct{ err error } @@ -3228,7 +3244,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) { cc.mu.Lock() ci.WasIdle = len(cc.streams) == 0 && reused if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = cc.t.timeSince(cc.lastActive) + ci.IdleTime = time.Since(cc.lastActive) } cc.mu.Unlock() diff --git a/vendor/golang.org/x/net/http2/writesched.go b/vendor/golang.org/x/net/http2/writesched.go index cc893adc29..7de27be525 100644 --- a/vendor/golang.org/x/net/http2/writesched.go +++ b/vendor/golang.org/x/net/http2/writesched.go @@ -42,6 +42,8 @@ type OpenStreamOptions struct { // PusherID is zero if the stream was initiated by the client. Otherwise, // PusherID names the stream that pushed the newly opened stream. PusherID uint32 + // priority is used to set the priority of the newly opened stream. + priority PriorityParam } // FrameWriteRequest is a request to write a frame. @@ -183,45 +185,75 @@ func (wr *FrameWriteRequest) replyToWriter(err error) { } // writeQueue is used by implementations of WriteScheduler. +// +// Each writeQueue contains a queue of FrameWriteRequests, meant to store all +// FrameWriteRequests associated with a given stream. This is implemented as a +// two-stage queue: currQueue[currPos:] and nextQueue. Removing an item is done +// by incrementing currPos of currQueue. Adding an item is done by appending it +// to the nextQueue. If currQueue is empty when trying to remove an item, we +// can swap currQueue and nextQueue to remedy the situation. +// This two-stage queue is analogous to the use of two lists in Okasaki's +// purely functional queue but without the overhead of reversing the list when +// swapping stages. +// +// writeQueue also contains prev and next, this can be used by implementations +// of WriteScheduler to construct data structures that represent the order of +// writing between different streams (e.g. circular linked list). type writeQueue struct { - s []FrameWriteRequest + currQueue []FrameWriteRequest + nextQueue []FrameWriteRequest + currPos int + prev, next *writeQueue } -func (q *writeQueue) empty() bool { return len(q.s) == 0 } +func (q *writeQueue) empty() bool { + return (len(q.currQueue) - q.currPos + len(q.nextQueue)) == 0 +} func (q *writeQueue) push(wr FrameWriteRequest) { - q.s = append(q.s, wr) + q.nextQueue = append(q.nextQueue, wr) } func (q *writeQueue) shift() FrameWriteRequest { - if len(q.s) == 0 { + if q.empty() { panic("invalid use of queue") } - wr := q.s[0] - // TODO: less copy-happy queue. - copy(q.s, q.s[1:]) - q.s[len(q.s)-1] = FrameWriteRequest{} - q.s = q.s[:len(q.s)-1] + if q.currPos >= len(q.currQueue) { + q.currQueue, q.currPos, q.nextQueue = q.nextQueue, 0, q.currQueue[:0] + } + wr := q.currQueue[q.currPos] + q.currQueue[q.currPos] = FrameWriteRequest{} + q.currPos++ return wr } +func (q *writeQueue) peek() *FrameWriteRequest { + if q.currPos < len(q.currQueue) { + return &q.currQueue[q.currPos] + } + if len(q.nextQueue) > 0 { + return &q.nextQueue[0] + } + return nil +} + // consume consumes up to n bytes from q.s[0]. If the frame is // entirely consumed, it is removed from the queue. If the frame // is partially consumed, the frame is kept with the consumed // bytes removed. Returns true iff any bytes were consumed. func (q *writeQueue) consume(n int32) (FrameWriteRequest, bool) { - if len(q.s) == 0 { + if q.empty() { return FrameWriteRequest{}, false } - consumed, rest, numresult := q.s[0].Consume(n) + consumed, rest, numresult := q.peek().Consume(n) switch numresult { case 0: return FrameWriteRequest{}, false case 1: q.shift() case 2: - q.s[0] = rest + *q.peek() = rest } return consumed, true } @@ -230,10 +262,15 @@ type writeQueuePool []*writeQueue // put inserts an unused writeQueue into the pool. func (p *writeQueuePool) put(q *writeQueue) { - for i := range q.s { - q.s[i] = FrameWriteRequest{} + for i := range q.currQueue { + q.currQueue[i] = FrameWriteRequest{} + } + for i := range q.nextQueue { + q.nextQueue[i] = FrameWriteRequest{} } - q.s = q.s[:0] + q.currQueue = q.currQueue[:0] + q.nextQueue = q.nextQueue[:0] + q.currPos = 0 *p = append(*p, q) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go similarity index 77% rename from vendor/golang.org/x/net/http2/writesched_priority.go rename to vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go index f6783339d1..4e33c29a24 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc7540.go @@ -11,7 +11,7 @@ import ( ) // RFC 7540, Section 5.3.5: the default weight is 16. -const priorityDefaultWeight = 15 // 16 = 15 + 1 +const priorityDefaultWeightRFC7540 = 15 // 16 = 15 + 1 // PriorityWriteSchedulerConfig configures a priorityWriteScheduler. type PriorityWriteSchedulerConfig struct { @@ -66,8 +66,8 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler } } - ws := &priorityWriteScheduler{ - nodes: make(map[uint32]*priorityNode), + ws := &priorityWriteSchedulerRFC7540{ + nodes: make(map[uint32]*priorityNodeRFC7540), maxClosedNodesInTree: cfg.MaxClosedNodesInTree, maxIdleNodesInTree: cfg.MaxIdleNodesInTree, enableWriteThrottle: cfg.ThrottleOutOfOrderWrites, @@ -81,32 +81,32 @@ func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler return ws } -type priorityNodeState int +type priorityNodeStateRFC7540 int const ( - priorityNodeOpen priorityNodeState = iota - priorityNodeClosed - priorityNodeIdle + priorityNodeOpenRFC7540 priorityNodeStateRFC7540 = iota + priorityNodeClosedRFC7540 + priorityNodeIdleRFC7540 ) -// priorityNode is a node in an HTTP/2 priority tree. +// priorityNodeRFC7540 is a node in an HTTP/2 priority tree. // Each node is associated with a single stream ID. // See RFC 7540, Section 5.3. -type priorityNode struct { - q writeQueue // queue of pending frames to write - id uint32 // id of the stream, or 0 for the root of the tree - weight uint8 // the actual weight is weight+1, so the value is in [1,256] - state priorityNodeState // open | closed | idle - bytes int64 // number of bytes written by this node, or 0 if closed - subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree +type priorityNodeRFC7540 struct { + q writeQueue // queue of pending frames to write + id uint32 // id of the stream, or 0 for the root of the tree + weight uint8 // the actual weight is weight+1, so the value is in [1,256] + state priorityNodeStateRFC7540 // open | closed | idle + bytes int64 // number of bytes written by this node, or 0 if closed + subtreeBytes int64 // sum(node.bytes) of all nodes in this subtree // These links form the priority tree. - parent *priorityNode - kids *priorityNode // start of the kids list - prev, next *priorityNode // doubly-linked list of siblings + parent *priorityNodeRFC7540 + kids *priorityNodeRFC7540 // start of the kids list + prev, next *priorityNodeRFC7540 // doubly-linked list of siblings } -func (n *priorityNode) setParent(parent *priorityNode) { +func (n *priorityNodeRFC7540) setParent(parent *priorityNodeRFC7540) { if n == parent { panic("setParent to self") } @@ -141,7 +141,7 @@ func (n *priorityNode) setParent(parent *priorityNode) { } } -func (n *priorityNode) addBytes(b int64) { +func (n *priorityNodeRFC7540) addBytes(b int64) { n.bytes += b for ; n != nil; n = n.parent { n.subtreeBytes += b @@ -154,7 +154,7 @@ func (n *priorityNode) addBytes(b int64) { // // f(n, openParent) takes two arguments: the node to visit, n, and a bool that is true // if any ancestor p of n is still open (ignoring the root node). -func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f func(*priorityNode, bool) bool) bool { +func (n *priorityNodeRFC7540) walkReadyInOrder(openParent bool, tmp *[]*priorityNodeRFC7540, f func(*priorityNodeRFC7540, bool) bool) bool { if !n.q.empty() && f(n, openParent) { return true } @@ -165,7 +165,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f // Don't consider the root "open" when updating openParent since // we can't send data frames on the root stream (only control frames). if n.id != 0 { - openParent = openParent || (n.state == priorityNodeOpen) + openParent = openParent || (n.state == priorityNodeOpenRFC7540) } // Common case: only one kid or all kids have the same weight. @@ -195,7 +195,7 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f *tmp = append(*tmp, n.kids) n.kids.setParent(nil) } - sort.Sort(sortPriorityNodeSiblings(*tmp)) + sort.Sort(sortPriorityNodeSiblingsRFC7540(*tmp)) for i := len(*tmp) - 1; i >= 0; i-- { (*tmp)[i].setParent(n) // setParent inserts at the head of n.kids } @@ -207,15 +207,15 @@ func (n *priorityNode) walkReadyInOrder(openParent bool, tmp *[]*priorityNode, f return false } -type sortPriorityNodeSiblings []*priorityNode +type sortPriorityNodeSiblingsRFC7540 []*priorityNodeRFC7540 -func (z sortPriorityNodeSiblings) Len() int { return len(z) } -func (z sortPriorityNodeSiblings) Swap(i, k int) { z[i], z[k] = z[k], z[i] } -func (z sortPriorityNodeSiblings) Less(i, k int) bool { +func (z sortPriorityNodeSiblingsRFC7540) Len() int { return len(z) } +func (z sortPriorityNodeSiblingsRFC7540) Swap(i, k int) { z[i], z[k] = z[k], z[i] } +func (z sortPriorityNodeSiblingsRFC7540) Less(i, k int) bool { // Prefer the subtree that has sent fewer bytes relative to its weight. // See sections 5.3.2 and 5.3.4. - wi, bi := float64(z[i].weight+1), float64(z[i].subtreeBytes) - wk, bk := float64(z[k].weight+1), float64(z[k].subtreeBytes) + wi, bi := float64(z[i].weight)+1, float64(z[i].subtreeBytes) + wk, bk := float64(z[k].weight)+1, float64(z[k].subtreeBytes) if bi == 0 && bk == 0 { return wi >= wk } @@ -225,13 +225,13 @@ func (z sortPriorityNodeSiblings) Less(i, k int) bool { return bi/bk <= wi/wk } -type priorityWriteScheduler struct { +type priorityWriteSchedulerRFC7540 struct { // root is the root of the priority tree, where root.id = 0. // The root queues control frames that are not associated with any stream. - root priorityNode + root priorityNodeRFC7540 // nodes maps stream ids to priority tree nodes. - nodes map[uint32]*priorityNode + nodes map[uint32]*priorityNodeRFC7540 // maxID is the maximum stream id in nodes. maxID uint32 @@ -239,7 +239,7 @@ type priorityWriteScheduler struct { // lists of nodes that have been closed or are idle, but are kept in // the tree for improved prioritization. When the lengths exceed either // maxClosedNodesInTree or maxIdleNodesInTree, old nodes are discarded. - closedNodes, idleNodes []*priorityNode + closedNodes, idleNodes []*priorityNodeRFC7540 // From the config. maxClosedNodesInTree int @@ -248,19 +248,19 @@ type priorityWriteScheduler struct { enableWriteThrottle bool // tmp is scratch space for priorityNode.walkReadyInOrder to reduce allocations. - tmp []*priorityNode + tmp []*priorityNodeRFC7540 // pool of empty queues for reuse. queuePool writeQueuePool } -func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStreamOptions) { +func (ws *priorityWriteSchedulerRFC7540) OpenStream(streamID uint32, options OpenStreamOptions) { // The stream may be currently idle but cannot be opened or closed. if curr := ws.nodes[streamID]; curr != nil { - if curr.state != priorityNodeIdle { + if curr.state != priorityNodeIdleRFC7540 { panic(fmt.Sprintf("stream %d already opened", streamID)) } - curr.state = priorityNodeOpen + curr.state = priorityNodeOpenRFC7540 return } @@ -272,11 +272,11 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream if parent == nil { parent = &ws.root } - n := &priorityNode{ + n := &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeOpen, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeOpenRFC7540, } n.setParent(parent) ws.nodes[streamID] = n @@ -285,24 +285,23 @@ func (ws *priorityWriteScheduler) OpenStream(streamID uint32, options OpenStream } } -func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { +func (ws *priorityWriteSchedulerRFC7540) CloseStream(streamID uint32) { if streamID == 0 { panic("violation of WriteScheduler interface: cannot close stream 0") } if ws.nodes[streamID] == nil { panic(fmt.Sprintf("violation of WriteScheduler interface: unknown stream %d", streamID)) } - if ws.nodes[streamID].state != priorityNodeOpen { + if ws.nodes[streamID].state != priorityNodeOpenRFC7540 { panic(fmt.Sprintf("violation of WriteScheduler interface: stream %d already closed", streamID)) } n := ws.nodes[streamID] - n.state = priorityNodeClosed + n.state = priorityNodeClosedRFC7540 n.addBytes(-n.bytes) q := n.q ws.queuePool.put(&q) - n.q.s = nil if ws.maxClosedNodesInTree > 0 { ws.addClosedOrIdleNode(&ws.closedNodes, ws.maxClosedNodesInTree, n) } else { @@ -310,7 +309,7 @@ func (ws *priorityWriteScheduler) CloseStream(streamID uint32) { } } -func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority PriorityParam) { +func (ws *priorityWriteSchedulerRFC7540) AdjustStream(streamID uint32, priority PriorityParam) { if streamID == 0 { panic("adjustPriority on root") } @@ -324,11 +323,11 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit return } ws.maxID = streamID - n = &priorityNode{ + n = &priorityNodeRFC7540{ q: *ws.queuePool.get(), id: streamID, - weight: priorityDefaultWeight, - state: priorityNodeIdle, + weight: priorityDefaultWeightRFC7540, + state: priorityNodeIdleRFC7540, } n.setParent(&ws.root) ws.nodes[streamID] = n @@ -340,7 +339,7 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit parent := ws.nodes[priority.StreamDep] if parent == nil { n.setParent(&ws.root) - n.weight = priorityDefaultWeight + n.weight = priorityDefaultWeightRFC7540 return } @@ -381,8 +380,8 @@ func (ws *priorityWriteScheduler) AdjustStream(streamID uint32, priority Priorit n.weight = priority.Weight } -func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { - var n *priorityNode +func (ws *priorityWriteSchedulerRFC7540) Push(wr FrameWriteRequest) { + var n *priorityNodeRFC7540 if wr.isControl() { n = &ws.root } else { @@ -401,8 +400,8 @@ func (ws *priorityWriteScheduler) Push(wr FrameWriteRequest) { n.q.push(wr) } -func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { - ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNode, openParent bool) bool { +func (ws *priorityWriteSchedulerRFC7540) Pop() (wr FrameWriteRequest, ok bool) { + ws.root.walkReadyInOrder(false, &ws.tmp, func(n *priorityNodeRFC7540, openParent bool) bool { limit := int32(math.MaxInt32) if openParent { limit = ws.writeThrottleLimit @@ -428,7 +427,7 @@ func (ws *priorityWriteScheduler) Pop() (wr FrameWriteRequest, ok bool) { return wr, ok } -func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, maxSize int, n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) addClosedOrIdleNode(list *[]*priorityNodeRFC7540, maxSize int, n *priorityNodeRFC7540) { if maxSize == 0 { return } @@ -442,7 +441,7 @@ func (ws *priorityWriteScheduler) addClosedOrIdleNode(list *[]*priorityNode, max *list = append(*list, n) } -func (ws *priorityWriteScheduler) removeNode(n *priorityNode) { +func (ws *priorityWriteSchedulerRFC7540) removeNode(n *priorityNodeRFC7540) { for n.kids != nil { n.kids.setParent(n.parent) } diff --git a/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go new file mode 100644 index 0000000000..cb4cadc32d --- /dev/null +++ b/vendor/golang.org/x/net/http2/writesched_priority_rfc9218.go @@ -0,0 +1,209 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "fmt" + "math" +) + +type streamMetadata struct { + location *writeQueue + priority PriorityParam +} + +type priorityWriteSchedulerRFC9218 struct { + // control contains control frames (SETTINGS, PING, etc.). + control writeQueue + + // heads contain the head of a circular list of streams. + // We put these heads within a nested array that represents urgency and + // incremental, as defined in + // https://www.rfc-editor.org/rfc/rfc9218.html#name-priority-parameters. + // 8 represents u=0 up to u=7, and 2 represents i=false and i=true. + heads [8][2]*writeQueue + + // streams contains a mapping between each stream ID and their metadata, so + // we can quickly locate them when needing to, for example, adjust their + // priority. + streams map[uint32]streamMetadata + + // queuePool are empty queues for reuse. + queuePool writeQueuePool + + // prioritizeIncremental is used to determine whether we should prioritize + // incremental streams or not, when urgency is the same in a given Pop() + // call. + prioritizeIncremental bool +} + +func newPriorityWriteSchedulerRFC9218() WriteScheduler { + ws := &priorityWriteSchedulerRFC9218{ + streams: make(map[uint32]streamMetadata), + } + return ws +} + +func (ws *priorityWriteSchedulerRFC9218) OpenStream(streamID uint32, opt OpenStreamOptions) { + if ws.streams[streamID].location != nil { + panic(fmt.Errorf("stream %d already opened", streamID)) + } + q := ws.queuePool.get() + ws.streams[streamID] = streamMetadata{ + location: q, + priority: opt.priority, + } + + u, i := opt.priority.urgency, opt.priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } +} + +func (ws *priorityWriteSchedulerRFC9218) CloseStream(streamID uint32) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + delete(ws.streams, streamID) + ws.queuePool.put(q) +} + +func (ws *priorityWriteSchedulerRFC9218) AdjustStream(streamID uint32, priority PriorityParam) { + metadata := ws.streams[streamID] + q, u, i := metadata.location, metadata.priority.urgency, metadata.priority.incremental + if q == nil { + return + } + + // Remove stream from current location. + if q.next == q { + // This was the only open stream. + ws.heads[u][i] = nil + } else { + q.prev.next = q.next + q.next.prev = q.prev + if ws.heads[u][i] == q { + ws.heads[u][i] = q.next + } + } + + // Insert stream to the new queue. + u, i = priority.urgency, priority.incremental + if ws.heads[u][i] == nil { + ws.heads[u][i] = q + q.next = q + q.prev = q + } else { + // Queues are stored in a ring. + // Insert the new stream before ws.head, putting it at the end of the list. + q.prev = ws.heads[u][i].prev + q.next = ws.heads[u][i] + q.prev.next = q + q.next.prev = q + } + + // Update the metadata. + ws.streams[streamID] = streamMetadata{ + location: q, + priority: priority, + } +} + +func (ws *priorityWriteSchedulerRFC9218) Push(wr FrameWriteRequest) { + if wr.isControl() { + ws.control.push(wr) + return + } + q := ws.streams[wr.StreamID()].location + if q == nil { + // This is a closed stream. + // wr should not be a HEADERS or DATA frame. + // We push the request onto the control queue. + if wr.DataSize() > 0 { + panic("add DATA on non-open stream") + } + ws.control.push(wr) + return + } + q.push(wr) +} + +func (ws *priorityWriteSchedulerRFC9218) Pop() (FrameWriteRequest, bool) { + // Control and RST_STREAM frames first. + if !ws.control.empty() { + return ws.control.shift(), true + } + + // On the next Pop(), we want to prioritize incremental if we prioritized + // non-incremental request of the same urgency this time. Vice-versa. + // i.e. when there are incremental and non-incremental requests at the same + // priority, we give 50% of our bandwidth to the incremental ones in + // aggregate and 50% to the first non-incremental one (since + // non-incremental streams do not use round-robin writes). + ws.prioritizeIncremental = !ws.prioritizeIncremental + + // Always prioritize lowest u (i.e. highest urgency level). + for u := range ws.heads { + for i := range ws.heads[u] { + // When we want to prioritize incremental, we try to pop i=true + // first before i=false when u is the same. + if ws.prioritizeIncremental { + i = (i + 1) % 2 + } + q := ws.heads[u][i] + if q == nil { + continue + } + for { + if wr, ok := q.consume(math.MaxInt32); ok { + if i == 1 { + // For incremental streams, we update head to q.next so + // we can round-robin between multiple streams that can + // immediately benefit from partial writes. + ws.heads[u][i] = q.next + } else { + // For non-incremental streams, we try to finish one to + // completion rather than doing round-robin. However, + // we update head here so that if q.consume() is !ok + // (e.g. the stream has no more frame to consume), head + // is updated to the next q that has frames to consume + // on future iterations. This way, we do not prioritize + // writing to unavailable stream on next Pop() calls, + // preventing head-of-line blocking. + ws.heads[u][i] = q + } + return wr, true + } + q = q.next + if q == ws.heads[u][i] { + break + } + } + + } + } + return FrameWriteRequest{}, false +} diff --git a/vendor/golang.org/x/net/http2/writesched_roundrobin.go b/vendor/golang.org/x/net/http2/writesched_roundrobin.go index 54fe86322d..737cff9ecb 100644 --- a/vendor/golang.org/x/net/http2/writesched_roundrobin.go +++ b/vendor/golang.org/x/net/http2/writesched_roundrobin.go @@ -25,7 +25,7 @@ type roundRobinWriteScheduler struct { } // newRoundRobinWriteScheduler constructs a new write scheduler. -// The round robin scheduler priorizes control frames +// The round robin scheduler prioritizes control frames // like SETTINGS and PING over DATA frames. // When there are no control frames to send, it performs a round-robin // selection from the ready streams. diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go index 4b70553179..1e10f89ebf 100644 --- a/vendor/golang.org/x/net/internal/httpcommon/request.go +++ b/vendor/golang.org/x/net/internal/httpcommon/request.go @@ -51,7 +51,7 @@ type EncodeHeadersParam struct { DefaultUserAgent string } -// EncodeHeadersParam is the result of EncodeHeaders. +// EncodeHeadersResult is the result of EncodeHeaders. type EncodeHeadersResult struct { HasBody bool HasTrailers bool @@ -399,7 +399,7 @@ type ServerRequestResult struct { // If the request should be rejected, this is a short string suitable for passing // to the http2 package's CountError function. - // It might be a bit odd to return errors this way rather than returing an error, + // It might be a bit odd to return errors this way rather than returning an error, // but this ensures we don't forget to include a CountError reason. InvalidReason string } diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go index 1d8cffae8c..2f45dbc86e 100644 --- a/vendor/golang.org/x/sync/errgroup/errgroup.go +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -3,7 +3,7 @@ // license that can be found in the LICENSE file. // Package errgroup provides synchronization, error propagation, and Context -// cancelation for groups of goroutines working on subtasks of a common task. +// cancellation for groups of goroutines working on subtasks of a common task. // // [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks // returning errors. diff --git a/vendor/golang.org/x/text/unicode/bidi/core.go b/vendor/golang.org/x/text/unicode/bidi/core.go index 9d2ae547b5..fb8273236d 100644 --- a/vendor/golang.org/x/text/unicode/bidi/core.go +++ b/vendor/golang.org/x/text/unicode/bidi/core.go @@ -427,13 +427,6 @@ type isolatingRunSequence struct { func (i *isolatingRunSequence) Len() int { return len(i.indexes) } -func maxLevel(a, b level) level { - if a > b { - return a - } - return b -} - // Rule X10, second bullet: Determine the start-of-sequence (sos) and end-of-sequence (eos) types, // either L or R, for each isolating run sequence. func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { @@ -474,8 +467,8 @@ func (p *paragraph) isolatingRunSequence(indexes []int) *isolatingRunSequence { indexes: indexes, types: types, level: level, - sos: typeForLevel(maxLevel(prevLevel, level)), - eos: typeForLevel(maxLevel(succLevel, level)), + sos: typeForLevel(max(prevLevel, level)), + eos: typeForLevel(max(succLevel, level)), } } diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go index 5e5601aa46..5bacc0fa49 100644 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -209,48 +209,46 @@ func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) // DeleteNamedImport deletes the import with the given name and path from the file f, if present. // If there are duplicate import declarations, all matching ones are deleted. func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { - var delspecs []*ast.ImportSpec - var delcomments []*ast.CommentGroup + var ( + delspecs = make(map[*ast.ImportSpec]bool) + delcomments = make(map[*ast.CommentGroup]bool) + ) // Find the import nodes that import path, if any. for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) + gen, ok := f.Decls[i].(*ast.GenDecl) if !ok || gen.Tok != token.IMPORT { continue } for j := 0; j < len(gen.Specs); j++ { - spec := gen.Specs[j] - impspec := spec.(*ast.ImportSpec) + impspec := gen.Specs[j].(*ast.ImportSpec) if importName(impspec) != name || importPath(impspec) != path { continue } // We found an import spec that imports path. // Delete it. - delspecs = append(delspecs, impspec) + delspecs[impspec] = true deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] + gen.Specs = slices.Delete(gen.Specs, j, j+1) // If this was the last import spec in this decl, // delete the decl, too. if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] + f.Decls = slices.Delete(f.Decls, i, i+1) i-- break } else if len(gen.Specs) == 1 { if impspec.Doc != nil { - delcomments = append(delcomments, impspec.Doc) + delcomments[impspec.Doc] = true } if impspec.Comment != nil { - delcomments = append(delcomments, impspec.Comment) + delcomments[impspec.Comment] = true } for _, cg := range f.Comments { // Found comment on the same line as the import spec. if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { - delcomments = append(delcomments, cg) + delcomments[cg] = true break } } @@ -294,38 +292,21 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del } // Delete imports from f.Imports. - for i := 0; i < len(f.Imports); i++ { - imp := f.Imports[i] - for j, del := range delspecs { - if imp == del { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - copy(delspecs[j:], delspecs[j+1:]) - delspecs = delspecs[:len(delspecs)-1] - i-- - break - } - } + before := len(f.Imports) + f.Imports = slices.DeleteFunc(f.Imports, func(imp *ast.ImportSpec) bool { + _, ok := delspecs[imp] + return ok + }) + if len(f.Imports)+len(delspecs) != before { + // This can happen when the AST is invalid (i.e. imports differ between f.Decls and f.Imports). + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) } // Delete comments from f.Comments. - for i := 0; i < len(f.Comments); i++ { - cg := f.Comments[i] - for j, del := range delcomments { - if cg == del { - copy(f.Comments[i:], f.Comments[i+1:]) - f.Comments = f.Comments[:len(f.Comments)-1] - copy(delcomments[j:], delcomments[j+1:]) - delcomments = delcomments[:len(delcomments)-1] - i-- - break - } - } - } - - if len(delspecs) > 0 { - panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) - } + f.Comments = slices.DeleteFunc(f.Comments, func(cg *ast.CommentGroup) bool { + _, ok := delcomments[cg] + return ok + }) return } diff --git a/vendor/golang.org/x/tools/go/ast/edge/edge.go b/vendor/golang.org/x/tools/go/ast/edge/edge.go new file mode 100644 index 0000000000..4f6ccfd6e5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/edge/edge.go @@ -0,0 +1,295 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package edge defines identifiers for each field of an ast.Node +// struct type that refers to another Node. +package edge + +import ( + "fmt" + "go/ast" + "reflect" +) + +// A Kind describes a field of an ast.Node struct. +type Kind uint8 + +// String returns a description of the edge kind. +func (k Kind) String() string { + if k == Invalid { + return "" + } + info := fieldInfos[k] + return fmt.Sprintf("%v.%s", info.nodeType.Elem().Name(), info.name) +} + +// NodeType returns the pointer-to-struct type of the ast.Node implementation. +func (k Kind) NodeType() reflect.Type { return fieldInfos[k].nodeType } + +// FieldName returns the name of the field. +func (k Kind) FieldName() string { return fieldInfos[k].name } + +// FieldType returns the declared type of the field. +func (k Kind) FieldType() reflect.Type { return fieldInfos[k].fieldType } + +// Get returns the direct child of n identified by (k, idx). +// n's type must match k.NodeType(). +// idx must be a valid slice index, or -1 for a non-slice. +func (k Kind) Get(n ast.Node, idx int) ast.Node { + if k.NodeType() != reflect.TypeOf(n) { + panic(fmt.Sprintf("%v.Get(%T): invalid node type", k, n)) + } + v := reflect.ValueOf(n).Elem().Field(fieldInfos[k].index) + if idx != -1 { + v = v.Index(idx) // asserts valid index + } else { + // (The type assertion below asserts that v is not a slice.) + } + return v.Interface().(ast.Node) // may be nil +} + +const ( + Invalid Kind = iota // for nodes at the root of the traversal + + // Kinds are sorted alphabetically. + // Numbering is not stable. + // Each is named Type_Field, where Type is the + // ast.Node struct type and Field is the name of the field + + ArrayType_Elt + ArrayType_Len + AssignStmt_Lhs + AssignStmt_Rhs + BinaryExpr_X + BinaryExpr_Y + BlockStmt_List + BranchStmt_Label + CallExpr_Args + CallExpr_Fun + CaseClause_Body + CaseClause_List + ChanType_Value + CommClause_Body + CommClause_Comm + CommentGroup_List + CompositeLit_Elts + CompositeLit_Type + DeclStmt_Decl + DeferStmt_Call + Ellipsis_Elt + ExprStmt_X + FieldList_List + Field_Comment + Field_Doc + Field_Names + Field_Tag + Field_Type + File_Decls + File_Doc + File_Name + ForStmt_Body + ForStmt_Cond + ForStmt_Init + ForStmt_Post + FuncDecl_Body + FuncDecl_Doc + FuncDecl_Name + FuncDecl_Recv + FuncDecl_Type + FuncLit_Body + FuncLit_Type + FuncType_Params + FuncType_Results + FuncType_TypeParams + GenDecl_Doc + GenDecl_Specs + GoStmt_Call + IfStmt_Body + IfStmt_Cond + IfStmt_Else + IfStmt_Init + ImportSpec_Comment + ImportSpec_Doc + ImportSpec_Name + ImportSpec_Path + IncDecStmt_X + IndexExpr_Index + IndexExpr_X + IndexListExpr_Indices + IndexListExpr_X + InterfaceType_Methods + KeyValueExpr_Key + KeyValueExpr_Value + LabeledStmt_Label + LabeledStmt_Stmt + MapType_Key + MapType_Value + ParenExpr_X + RangeStmt_Body + RangeStmt_Key + RangeStmt_Value + RangeStmt_X + ReturnStmt_Results + SelectStmt_Body + SelectorExpr_Sel + SelectorExpr_X + SendStmt_Chan + SendStmt_Value + SliceExpr_High + SliceExpr_Low + SliceExpr_Max + SliceExpr_X + StarExpr_X + StructType_Fields + SwitchStmt_Body + SwitchStmt_Init + SwitchStmt_Tag + TypeAssertExpr_Type + TypeAssertExpr_X + TypeSpec_Comment + TypeSpec_Doc + TypeSpec_Name + TypeSpec_Type + TypeSpec_TypeParams + TypeSwitchStmt_Assign + TypeSwitchStmt_Body + TypeSwitchStmt_Init + UnaryExpr_X + ValueSpec_Comment + ValueSpec_Doc + ValueSpec_Names + ValueSpec_Type + ValueSpec_Values + + maxKind +) + +// Assert that the encoding fits in 7 bits, +// as the inspector relies on this. +// (We are currently at 104.) +var _ = [1 << 7]struct{}{}[maxKind] + +type fieldInfo struct { + nodeType reflect.Type // pointer-to-struct type of ast.Node implementation + name string + index int + fieldType reflect.Type +} + +func info[N ast.Node](fieldName string) fieldInfo { + nodePtrType := reflect.TypeFor[N]() + f, ok := nodePtrType.Elem().FieldByName(fieldName) + if !ok { + panic(fieldName) + } + return fieldInfo{nodePtrType, fieldName, f.Index[0], f.Type} +} + +var fieldInfos = [...]fieldInfo{ + Invalid: {}, + ArrayType_Elt: info[*ast.ArrayType]("Elt"), + ArrayType_Len: info[*ast.ArrayType]("Len"), + AssignStmt_Lhs: info[*ast.AssignStmt]("Lhs"), + AssignStmt_Rhs: info[*ast.AssignStmt]("Rhs"), + BinaryExpr_X: info[*ast.BinaryExpr]("X"), + BinaryExpr_Y: info[*ast.BinaryExpr]("Y"), + BlockStmt_List: info[*ast.BlockStmt]("List"), + BranchStmt_Label: info[*ast.BranchStmt]("Label"), + CallExpr_Args: info[*ast.CallExpr]("Args"), + CallExpr_Fun: info[*ast.CallExpr]("Fun"), + CaseClause_Body: info[*ast.CaseClause]("Body"), + CaseClause_List: info[*ast.CaseClause]("List"), + ChanType_Value: info[*ast.ChanType]("Value"), + CommClause_Body: info[*ast.CommClause]("Body"), + CommClause_Comm: info[*ast.CommClause]("Comm"), + CommentGroup_List: info[*ast.CommentGroup]("List"), + CompositeLit_Elts: info[*ast.CompositeLit]("Elts"), + CompositeLit_Type: info[*ast.CompositeLit]("Type"), + DeclStmt_Decl: info[*ast.DeclStmt]("Decl"), + DeferStmt_Call: info[*ast.DeferStmt]("Call"), + Ellipsis_Elt: info[*ast.Ellipsis]("Elt"), + ExprStmt_X: info[*ast.ExprStmt]("X"), + FieldList_List: info[*ast.FieldList]("List"), + Field_Comment: info[*ast.Field]("Comment"), + Field_Doc: info[*ast.Field]("Doc"), + Field_Names: info[*ast.Field]("Names"), + Field_Tag: info[*ast.Field]("Tag"), + Field_Type: info[*ast.Field]("Type"), + File_Decls: info[*ast.File]("Decls"), + File_Doc: info[*ast.File]("Doc"), + File_Name: info[*ast.File]("Name"), + ForStmt_Body: info[*ast.ForStmt]("Body"), + ForStmt_Cond: info[*ast.ForStmt]("Cond"), + ForStmt_Init: info[*ast.ForStmt]("Init"), + ForStmt_Post: info[*ast.ForStmt]("Post"), + FuncDecl_Body: info[*ast.FuncDecl]("Body"), + FuncDecl_Doc: info[*ast.FuncDecl]("Doc"), + FuncDecl_Name: info[*ast.FuncDecl]("Name"), + FuncDecl_Recv: info[*ast.FuncDecl]("Recv"), + FuncDecl_Type: info[*ast.FuncDecl]("Type"), + FuncLit_Body: info[*ast.FuncLit]("Body"), + FuncLit_Type: info[*ast.FuncLit]("Type"), + FuncType_Params: info[*ast.FuncType]("Params"), + FuncType_Results: info[*ast.FuncType]("Results"), + FuncType_TypeParams: info[*ast.FuncType]("TypeParams"), + GenDecl_Doc: info[*ast.GenDecl]("Doc"), + GenDecl_Specs: info[*ast.GenDecl]("Specs"), + GoStmt_Call: info[*ast.GoStmt]("Call"), + IfStmt_Body: info[*ast.IfStmt]("Body"), + IfStmt_Cond: info[*ast.IfStmt]("Cond"), + IfStmt_Else: info[*ast.IfStmt]("Else"), + IfStmt_Init: info[*ast.IfStmt]("Init"), + ImportSpec_Comment: info[*ast.ImportSpec]("Comment"), + ImportSpec_Doc: info[*ast.ImportSpec]("Doc"), + ImportSpec_Name: info[*ast.ImportSpec]("Name"), + ImportSpec_Path: info[*ast.ImportSpec]("Path"), + IncDecStmt_X: info[*ast.IncDecStmt]("X"), + IndexExpr_Index: info[*ast.IndexExpr]("Index"), + IndexExpr_X: info[*ast.IndexExpr]("X"), + IndexListExpr_Indices: info[*ast.IndexListExpr]("Indices"), + IndexListExpr_X: info[*ast.IndexListExpr]("X"), + InterfaceType_Methods: info[*ast.InterfaceType]("Methods"), + KeyValueExpr_Key: info[*ast.KeyValueExpr]("Key"), + KeyValueExpr_Value: info[*ast.KeyValueExpr]("Value"), + LabeledStmt_Label: info[*ast.LabeledStmt]("Label"), + LabeledStmt_Stmt: info[*ast.LabeledStmt]("Stmt"), + MapType_Key: info[*ast.MapType]("Key"), + MapType_Value: info[*ast.MapType]("Value"), + ParenExpr_X: info[*ast.ParenExpr]("X"), + RangeStmt_Body: info[*ast.RangeStmt]("Body"), + RangeStmt_Key: info[*ast.RangeStmt]("Key"), + RangeStmt_Value: info[*ast.RangeStmt]("Value"), + RangeStmt_X: info[*ast.RangeStmt]("X"), + ReturnStmt_Results: info[*ast.ReturnStmt]("Results"), + SelectStmt_Body: info[*ast.SelectStmt]("Body"), + SelectorExpr_Sel: info[*ast.SelectorExpr]("Sel"), + SelectorExpr_X: info[*ast.SelectorExpr]("X"), + SendStmt_Chan: info[*ast.SendStmt]("Chan"), + SendStmt_Value: info[*ast.SendStmt]("Value"), + SliceExpr_High: info[*ast.SliceExpr]("High"), + SliceExpr_Low: info[*ast.SliceExpr]("Low"), + SliceExpr_Max: info[*ast.SliceExpr]("Max"), + SliceExpr_X: info[*ast.SliceExpr]("X"), + StarExpr_X: info[*ast.StarExpr]("X"), + StructType_Fields: info[*ast.StructType]("Fields"), + SwitchStmt_Body: info[*ast.SwitchStmt]("Body"), + SwitchStmt_Init: info[*ast.SwitchStmt]("Init"), + SwitchStmt_Tag: info[*ast.SwitchStmt]("Tag"), + TypeAssertExpr_Type: info[*ast.TypeAssertExpr]("Type"), + TypeAssertExpr_X: info[*ast.TypeAssertExpr]("X"), + TypeSpec_Comment: info[*ast.TypeSpec]("Comment"), + TypeSpec_Doc: info[*ast.TypeSpec]("Doc"), + TypeSpec_Name: info[*ast.TypeSpec]("Name"), + TypeSpec_Type: info[*ast.TypeSpec]("Type"), + TypeSpec_TypeParams: info[*ast.TypeSpec]("TypeParams"), + TypeSwitchStmt_Assign: info[*ast.TypeSwitchStmt]("Assign"), + TypeSwitchStmt_Body: info[*ast.TypeSwitchStmt]("Body"), + TypeSwitchStmt_Init: info[*ast.TypeSwitchStmt]("Init"), + UnaryExpr_X: info[*ast.UnaryExpr]("X"), + ValueSpec_Comment: info[*ast.ValueSpec]("Comment"), + ValueSpec_Doc: info[*ast.ValueSpec]("Doc"), + ValueSpec_Names: info[*ast.ValueSpec]("Names"), + ValueSpec_Type: info[*ast.ValueSpec]("Type"), + ValueSpec_Values: info[*ast.ValueSpec]("Values"), +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/cursor.go b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go new file mode 100644 index 0000000000..7e72d3c284 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/cursor.go @@ -0,0 +1,502 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +import ( + "fmt" + "go/ast" + "go/token" + "iter" + "reflect" + + "golang.org/x/tools/go/ast/edge" +) + +// A Cursor represents an [ast.Node]. It is immutable. +// +// Two Cursors compare equal if they represent the same node. +// +// Call [Inspector.Root] to obtain a valid cursor for the virtual root +// node of the traversal. +// +// Use the following methods to navigate efficiently around the tree: +// - for ancestors, use [Cursor.Parent] and [Cursor.Enclosing]; +// - for children, use [Cursor.Child], [Cursor.Children], +// [Cursor.FirstChild], and [Cursor.LastChild]; +// - for siblings, use [Cursor.PrevSibling] and [Cursor.NextSibling]; +// - for descendants, use [Cursor.FindByPos], [Cursor.FindNode], +// [Cursor.Inspect], and [Cursor.Preorder]. +// +// Use the [Cursor.ChildAt] and [Cursor.ParentEdge] methods for +// information about the edges in a tree: which field (and slice +// element) of the parent node holds the child. +type Cursor struct { + in *Inspector + index int32 // index of push node; -1 for virtual root node +} + +// Root returns a cursor for the virtual root node, +// whose children are the files provided to [New]. +// +// Its [Cursor.Node] method return nil. +func (in *Inspector) Root() Cursor { + return Cursor{in, -1} +} + +// At returns the cursor at the specified index in the traversal, +// which must have been obtained from [Cursor.Index] on a Cursor +// belonging to the same Inspector (see [Cursor.Inspector]). +func (in *Inspector) At(index int32) Cursor { + if index < 0 { + panic("negative index") + } + if int(index) >= len(in.events) { + panic("index out of range for this inspector") + } + if in.events[index].index < index { + panic("invalid index") // (a push, not a pop) + } + return Cursor{in, index} +} + +// Inspector returns the cursor's Inspector. +func (c Cursor) Inspector() *Inspector { return c.in } + +// Index returns the index of this cursor position within the package. +// +// Clients should not assume anything about the numeric Index value +// except that it increases monotonically throughout the traversal. +// It is provided for use with [At]. +// +// Index must not be called on the Root node. +func (c Cursor) Index() int32 { + if c.index < 0 { + panic("Index called on Root node") + } + return c.index +} + +// Node returns the node at the current cursor position, +// or nil for the cursor returned by [Inspector.Root]. +func (c Cursor) Node() ast.Node { + if c.index < 0 { + return nil + } + return c.in.events[c.index].node +} + +// String returns information about the cursor's node, if any. +func (c Cursor) String() string { + if c.in == nil { + return "(invalid)" + } + if c.index < 0 { + return "(root)" + } + return reflect.TypeOf(c.Node()).String() +} + +// indices return the [start, end) half-open interval of event indices. +func (c Cursor) indices() (int32, int32) { + if c.index < 0 { + return 0, int32(len(c.in.events)) // root: all events + } else { + return c.index, c.in.events[c.index].index + 1 // just one subtree + } +} + +// Preorder returns an iterator over the nodes of the subtree +// represented by c in depth-first order. Each node in the sequence is +// represented by a Cursor that allows access to the Node, but may +// also be used to start a new traversal, or to obtain the stack of +// nodes enclosing the cursor. +// +// The traversal sequence is determined by [ast.Inspect]. The types +// argument, if non-empty, enables type-based filtering of events. The +// function f if is called only for nodes whose type matches an +// element of the types slice. +// +// If you need control over descent into subtrees, +// or need both pre- and post-order notifications, use [Cursor.Inspect] +func (c Cursor) Preorder(types ...ast.Node) iter.Seq[Cursor] { + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain types: skip. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// Inspect visits the nodes of the subtree represented by c in +// depth-first order. It calls f(n) for each node n before it +// visits n's children. If f returns true, Inspect invokes f +// recursively for each of the non-nil children of the node. +// +// Each node is represented by a Cursor that allows access to the +// Node, but may also be used to start a new traversal, or to obtain +// the stack of nodes enclosing the cursor. +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +func (c Cursor) Inspect(types []ast.Node, f func(c Cursor) (descend bool)) { + mask := maskOf(types) + events := c.in.events + for i, limit := c.indices(); i < limit; { + ev := events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 && !f(Cursor{c.in, i}) || + events[pop].typ&mask == 0 { + // The user opted not to descend, or the + // subtree does not contain types: + // skip past the pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Enclosing returns an iterator over the nodes enclosing the current +// current node, starting with the Cursor itself. +// +// Enclosing must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// The types argument, if non-empty, enables type-based filtering of +// events: the sequence includes only enclosing nodes whose type +// matches an element of the types slice. +func (c Cursor) Enclosing(types ...ast.Node) iter.Seq[Cursor] { + if c.index < 0 { + panic("Cursor.Enclosing called on Root node") + } + + mask := maskOf(types) + + return func(yield func(Cursor) bool) { + events := c.in.events + for i := c.index; i >= 0; i = events[i].parent { + if events[i].typ&mask != 0 && !yield(Cursor{c.in, i}) { + break + } + } + } +} + +// Parent returns the parent of the current node. +// +// Parent must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Parent() Cursor { + if c.index < 0 { + panic("Cursor.Parent called on Root node") + } + + return Cursor{c.in, c.in.events[c.index].parent} +} + +// ParentEdge returns the identity of the field in the parent node +// that holds this cursor's node, and if it is a list, the index within it. +// +// For example, f(x, y) is a CallExpr whose three children are Idents. +// f has edge kind [edge.CallExpr_Fun] and index -1. +// x and y have kind [edge.CallExpr_Args] and indices 0 and 1, respectively. +// +// If called on a child of the Root node, it returns ([edge.Invalid], -1). +// +// ParentEdge must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) ParentEdge() (edge.Kind, int) { + if c.index < 0 { + panic("Cursor.ParentEdge called on Root node") + } + events := c.in.events + pop := events[c.index].index + return unpackEdgeKindAndIndex(events[pop].parent) +} + +// ChildAt returns the cursor for the child of the +// current node identified by its edge and index. +// The index must be -1 if the edge.Kind is not a slice. +// The indicated child node must exist. +// +// ChildAt must not be called on the Root node (whose [Cursor.Node] returns nil). +// +// Invariant: c.Parent().ChildAt(c.ParentEdge()) == c. +func (c Cursor) ChildAt(k edge.Kind, idx int) Cursor { + target := packEdgeKindAndIndex(k, idx) + + // Unfortunately there's no shortcut to looping. + events := c.in.events + i := c.index + 1 + for { + pop := events[i].index + if pop < i { + break + } + if events[pop].parent == target { + return Cursor{c.in, i} + } + i = pop + 1 + } + panic(fmt.Sprintf("ChildAt(%v, %d): no such child of %v", k, idx, c)) +} + +// Child returns the cursor for n, which must be a direct child of c's Node. +// +// Child must not be called on the Root node (whose [Cursor.Node] returns nil). +func (c Cursor) Child(n ast.Node) Cursor { + if c.index < 0 { + panic("Cursor.Child called on Root node") + } + + if false { + // reference implementation + for child := range c.Children() { + if child.Node() == n { + return child + } + } + + } else { + // optimized implementation + events := c.in.events + for i := c.index + 1; events[i].index > i; i = events[i].index + 1 { + if events[i].node == n { + return Cursor{c.in, i} + } + } + } + panic(fmt.Sprintf("Child(%T): not a child of %v", n, c)) +} + +// NextSibling returns the cursor for the next sibling node in the same list +// (for example, of files, decls, specs, statements, fields, or expressions) as +// the current node. It returns (zero, false) if the node is the last node in +// the list, or is not part of a list. +// +// NextSibling must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) NextSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.NextSibling called on Root node") + } + + events := c.in.events + i := events[c.index].index + 1 // after corresponding pop + if i < int32(len(events)) { + if events[i].index > i { // push? + return Cursor{c.in, i}, true + } + } + return Cursor{}, false +} + +// PrevSibling returns the cursor for the previous sibling node in the +// same list (for example, of files, decls, specs, statements, fields, +// or expressions) as the current node. It returns zero if the node is +// the first node in the list, or is not part of a list. +// +// It must not be called on the Root node. +// +// See note at [Cursor.Children]. +func (c Cursor) PrevSibling() (Cursor, bool) { + if c.index < 0 { + panic("Cursor.PrevSibling called on Root node") + } + + events := c.in.events + i := c.index - 1 + if i >= 0 { + if j := events[i].index; j < i { // pop? + return Cursor{c.in, j}, true + } + } + return Cursor{}, false +} + +// FirstChild returns the first direct child of the current node, +// or zero if it has no children. +func (c Cursor) FirstChild() (Cursor, bool) { + events := c.in.events + i := c.index + 1 // i=0 if c is root + if i < int32(len(events)) && events[i].index > i { // push? + return Cursor{c.in, i}, true + } + return Cursor{}, false +} + +// LastChild returns the last direct child of the current node, +// or zero if it has no children. +func (c Cursor) LastChild() (Cursor, bool) { + events := c.in.events + if c.index < 0 { // root? + if len(events) > 0 { + // return push of final event (a pop) + return Cursor{c.in, events[len(events)-1].index}, true + } + } else { + j := events[c.index].index - 1 // before corresponding pop + // Inv: j == c.index if c has no children + // or j is last child's pop. + if j > c.index { // c has children + return Cursor{c.in, events[j].index}, true + } + } + return Cursor{}, false +} + +// Children returns an iterator over the direct children of the +// current node, if any. +// +// When using Children, NextChild, and PrevChild, bear in mind that a +// Node's children may come from different fields, some of which may +// be lists of nodes without a distinguished intervening container +// such as [ast.BlockStmt]. +// +// For example, [ast.CaseClause] has a field List of expressions and a +// field Body of statements, so the children of a CaseClause are a mix +// of expressions and statements. Other nodes that have "uncontained" +// list fields include: +// +// - [ast.ValueSpec] (Names, Values) +// - [ast.CompositeLit] (Type, Elts) +// - [ast.IndexListExpr] (X, Indices) +// - [ast.CallExpr] (Fun, Args) +// - [ast.AssignStmt] (Lhs, Rhs) +// +// So, do not assume that the previous sibling of an ast.Stmt is also +// an ast.Stmt, or if it is, that they are executed sequentially, +// unless you have established that, say, its parent is a BlockStmt +// or its [Cursor.ParentEdge] is [edge.BlockStmt_List]. +// For example, given "for S1; ; S2 {}", the predecessor of S2 is S1, +// even though they are not executed in sequence. +func (c Cursor) Children() iter.Seq[Cursor] { + return func(yield func(Cursor) bool) { + c, ok := c.FirstChild() + for ok && yield(c) { + c, ok = c.NextSibling() + } + } +} + +// Contains reports whether c contains or is equal to c2. +// +// Both Cursors must belong to the same [Inspector]; +// neither may be its Root node. +func (c Cursor) Contains(c2 Cursor) bool { + if c.in != c2.in { + panic("different inspectors") + } + events := c.in.events + return c.index <= c2.index && events[c2.index].index <= events[c.index].index +} + +// FindNode returns the cursor for node n if it belongs to the subtree +// rooted at c. It returns zero if n is not found. +func (c Cursor) FindNode(n ast.Node) (Cursor, bool) { + + // FindNode is equivalent to this code, + // but more convenient and 15-20% faster: + if false { + for candidate := range c.Preorder(n) { + if candidate.Node() == n { + return candidate, true + } + } + return Cursor{}, false + } + + // TODO(adonovan): opt: should we assume Node.Pos is accurate + // and combine type-based filtering with position filtering + // like FindByPos? + + mask := maskOf([]ast.Node{n}) + events := c.in.events + + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + if ev.typ&mask != 0 && ev.node == n { + return Cursor{c.in, i}, true + } + pop := ev.index + if events[pop].typ&mask == 0 { + // Subtree does not contain type of n: skip. + i = pop + } + } + } + return Cursor{}, false +} + +// FindByPos returns the cursor for the innermost node n in the tree +// rooted at c such that n.Pos() <= start && end <= n.End(). +// (For an *ast.File, it uses the bounds n.FileStart-n.FileEnd.) +// +// It returns zero if none is found. +// Precondition: start <= end. +// +// See also [astutil.PathEnclosingInterval], which +// tolerates adjoining whitespace. +func (c Cursor) FindByPos(start, end token.Pos) (Cursor, bool) { + if end < start { + panic("end < start") + } + events := c.in.events + + // This algorithm could be implemented using c.Inspect, + // but it is about 2.5x slower. + + best := int32(-1) // push index of latest (=innermost) node containing range + for i, limit := c.indices(); i < limit; i++ { + ev := events[i] + if ev.index > i { // push? + n := ev.node + var nodeEnd token.Pos + if file, ok := n.(*ast.File); ok { + nodeEnd = file.FileEnd + // Note: files may be out of Pos order. + if file.FileStart > start { + i = ev.index // disjoint, after; skip to next file + continue + } + } else { + nodeEnd = n.End() + if n.Pos() > start { + break // disjoint, after; stop + } + } + // Inv: node.{Pos,FileStart} <= start + if end <= nodeEnd { + // node fully contains target range + best = i + } else if nodeEnd < start { + i = ev.index // disjoint, before; skip forward + } + } + } + if best >= 0 { + return Cursor{c.in, best}, true + } + return Cursor{}, false +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go new file mode 100644 index 0000000000..a703cdfcf9 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go @@ -0,0 +1,311 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package inspector provides helper functions for traversal over the +// syntax trees of a package, including node filtering by type, and +// materialization of the traversal stack. +// +// During construction, the inspector does a complete traversal and +// builds a list of push/pop events and their node type. Subsequent +// method calls that request a traversal scan this list, rather than walk +// the AST, and perform type filtering using efficient bit sets. +// This representation is sometimes called a "balanced parenthesis tree." +// +// Experiments suggest the inspector's traversals are about 2.5x faster +// than [ast.Inspect], but it may take around 5 traversals for this +// benefit to amortize the inspector's construction cost. +// If efficiency is the primary concern, do not use Inspector for +// one-off traversals. +// +// The [Cursor] type provides a more flexible API for efficient +// navigation of syntax trees in all four "cardinal directions". For +// example, traversals may be nested, so you can find each node of +// type A and then search within it for nodes of type B. Or you can +// traverse from a node to its immediate neighbors: its parent, its +// previous and next sibling, or its first and last child. We +// recommend using methods of Cursor in preference to Inspector where +// possible. +package inspector + +// There are four orthogonal features in a traversal: +// 1 type filtering +// 2 pruning +// 3 postorder calls to f +// 4 stack +// Rather than offer all of them in the API, +// only a few combinations are exposed: +// - Preorder is the fastest and has fewest features, +// but is the most commonly needed traversal. +// - Nodes and WithStack both provide pruning and postorder calls, +// even though few clients need it, because supporting two versions +// is not justified. +// More combinations could be supported by expressing them as +// wrappers around a more generic traversal, but this was measured +// and found to degrade performance significantly (30%). + +import ( + "go/ast" + + "golang.org/x/tools/go/ast/edge" +) + +// An Inspector provides methods for inspecting +// (traversing) the syntax trees of a package. +type Inspector struct { + events []event +} + +func packEdgeKindAndIndex(ek edge.Kind, index int) int32 { + return int32(uint32(index+1)<<7 | uint32(ek)) +} + +// unpackEdgeKindAndIndex unpacks the edge kind and edge index (within +// an []ast.Node slice) from the parent field of a pop event. +func unpackEdgeKindAndIndex(x int32) (edge.Kind, int) { + // The "parent" field of a pop node holds the + // edge Kind in the lower 7 bits and the index+1 + // in the upper 25. + return edge.Kind(x & 0x7f), int(x>>7) - 1 +} + +// New returns an Inspector for the specified syntax trees. +func New(files []*ast.File) *Inspector { + return &Inspector{traverse(files)} +} + +// An event represents a push or a pop +// of an ast.Node during a traversal. +type event struct { + node ast.Node + typ uint64 // typeOf(node) on push event, or union of typ strictly between push and pop events on pop events + index int32 // index of corresponding push or pop event + parent int32 // index of parent's push node (push nodes only), or packed edge kind/index (pop nodes only) +} + +// TODO: Experiment with storing only the second word of event.node (unsafe.Pointer). +// Type can be recovered from the sole bit in typ. +// [Tried this, wasn't faster. --adonovan] + +// Preorder visits all the nodes of the files supplied to New in +// depth-first order. It calls f(n) for each node n before it visits +// n's children. +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f is called only for nodes whose type +// matches an element of the types slice. +// +// The [Cursor.Preorder] method provides a richer alternative interface. +// Example: +// +// for c := range in.Root().Preorder(types) { ... } +func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { + // Because it avoids postorder calls to f, and the pruning + // check, Preorder is almost twice as fast as Nodes. The two + // features seem to contribute similar slowdowns (~1.4x each). + + // This function is equivalent to the PreorderSeq call below, + // but to avoid the additional dynamic call (which adds 13-35% + // to the benchmarks), we expand it out. + // + // in.PreorderSeq(types...)(func(n ast.Node) bool { + // f(n) + // return true + // }) + + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + f(ev.node) + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } +} + +// Nodes visits the nodes of the files supplied to New in depth-first +// order. It calls f(n, true) for each node n before it visits n's +// children. If f returns true, Nodes invokes f recursively for each +// of the non-nil children of the node, followed by a call of +// f(n, false). +// +// The complete traversal sequence is determined by [ast.Inspect]. +// The types argument, if non-empty, enables type-based filtering of +// events. The function f if is called only for nodes whose type +// matches an element of the types slice. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// ... +// return true +// } +func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) { + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + if ev.typ&mask != 0 { + if !f(ev.node, true) { + i = pop + 1 // jump to corresponding pop + 1 + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false) + } + } + i++ + } +} + +// WithStack visits nodes in a similar manner to Nodes, but it +// supplies each call to f an additional argument, the current +// traversal stack. The stack's first element is the outermost node, +// an *ast.File; its last is the innermost, n. +// +// The [Cursor.Inspect] method provides a richer alternative interface. +// Example: +// +// in.Root().Inspect(types, func(c Cursor) bool { +// stack := slices.Collect(c.Enclosing()) +// ... +// return true +// }) +func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) { + mask := maskOf(types) + var stack []ast.Node + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + pop := ev.index + stack = append(stack, ev.node) + if ev.typ&mask != 0 { + if !f(ev.node, true, stack) { + i = pop + 1 + stack = stack[:len(stack)-1] + continue + } + } + if in.events[pop].typ&mask == 0 { + // Subtrees does not contain types: skip them. + i = pop + continue + } + } else { + // pop + push := ev.index + if in.events[push].typ&mask != 0 { + f(ev.node, false, stack) + } + stack = stack[:len(stack)-1] + } + i++ + } +} + +// traverse builds the table of events representing a traversal. +func traverse(files []*ast.File) []event { + // Preallocate approximate number of events + // based on source file extent of the declarations. + // (We use End-Pos not FileStart-FileEnd to neglect + // the effect of long doc comments.) + // This makes traverse faster by 4x (!). + var extent int + for _, f := range files { + extent += int(f.End() - f.Pos()) + } + // This estimate is based on the net/http package. + capacity := min(extent*33/100, 1e6) // impose some reasonable maximum (1M) + + v := &visitor{ + events: make([]event, 0, capacity), + stack: []item{{index: -1}}, // include an extra event so file nodes have a parent + } + for _, file := range files { + walk(v, edge.Invalid, -1, file) + } + return v.events +} + +type visitor struct { + events []event + stack []item +} + +type item struct { + index int32 // index of current node's push event + parentIndex int32 // index of parent node's push event + typAccum uint64 // accumulated type bits of current node's descendants + edgeKindAndIndex int32 // edge.Kind and index, bit packed +} + +func (v *visitor) push(ek edge.Kind, eindex int, node ast.Node) { + var ( + index = int32(len(v.events)) + parentIndex = v.stack[len(v.stack)-1].index + ) + v.events = append(v.events, event{ + node: node, + parent: parentIndex, + typ: typeOf(node), + index: 0, // (pop index is set later by visitor.pop) + }) + v.stack = append(v.stack, item{ + index: index, + parentIndex: parentIndex, + edgeKindAndIndex: packEdgeKindAndIndex(ek, eindex), + }) + + // 2B nodes ought to be enough for anyone! + if int32(len(v.events)) < 0 { + panic("event index exceeded int32") + } + + // 32M elements in an []ast.Node ought to be enough for anyone! + if ek2, eindex2 := unpackEdgeKindAndIndex(packEdgeKindAndIndex(ek, eindex)); ek2 != ek || eindex2 != eindex { + panic("Node slice index exceeded uint25") + } +} + +func (v *visitor) pop(node ast.Node) { + top := len(v.stack) - 1 + current := v.stack[top] + + push := &v.events[current.index] + parent := &v.stack[top-1] + + push.index = int32(len(v.events)) // make push event refer to pop + parent.typAccum |= current.typAccum | push.typ // accumulate type bits into parent + + v.stack = v.stack[:top] + + v.events = append(v.events, event{ + node: node, + typ: current.typAccum, + index: current.index, + parent: current.edgeKindAndIndex, // see [unpackEdgeKindAndIndex] + }) +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/iter.go b/vendor/golang.org/x/tools/go/ast/inspector/iter.go new file mode 100644 index 0000000000..c576dc70ac --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/iter.go @@ -0,0 +1,85 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build go1.23 + +package inspector + +import ( + "go/ast" + "iter" +) + +// PreorderSeq returns an iterator that visits all the +// nodes of the files supplied to New in depth-first order. +// It visits each node n before n's children. +// The complete traversal sequence is determined by ast.Inspect. +// +// The types argument, if non-empty, enables type-based +// filtering of events: only nodes whose type matches an +// element of the types slice are included in the sequence. +func (in *Inspector) PreorderSeq(types ...ast.Node) iter.Seq[ast.Node] { + + // This implementation is identical to Preorder, + // except that it supports breaking out of the loop. + + return func(yield func(ast.Node) bool) { + mask := maskOf(types) + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} + +// All[N] returns an iterator over all the nodes of type N. +// N must be a pointer-to-struct type that implements ast.Node. +// +// Example: +// +// for call := range All[*ast.CallExpr](in) { ... } +func All[N interface { + *S + ast.Node +}, S any](in *Inspector) iter.Seq[N] { + + // To avoid additional dynamic call overheads, + // we duplicate rather than call the logic of PreorderSeq. + + mask := typeOf((N)(nil)) + return func(yield func(N) bool) { + for i := int32(0); i < int32(len(in.events)); { + ev := in.events[i] + if ev.index > i { + // push + if ev.typ&mask != 0 { + if !yield(ev.node.(N)) { + break + } + } + pop := ev.index + if in.events[pop].typ&mask == 0 { + // Subtrees do not contain types: skip them and pop. + i = pop + 1 + continue + } + } + i++ + } + } +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go new file mode 100644 index 0000000000..9852331a3d --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go @@ -0,0 +1,227 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file defines func typeOf(ast.Node) uint64. +// +// The initial map-based implementation was too slow; +// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 + +import ( + "go/ast" + "math" +) + +const ( + nArrayType = iota + nAssignStmt + nBadDecl + nBadExpr + nBadStmt + nBasicLit + nBinaryExpr + nBlockStmt + nBranchStmt + nCallExpr + nCaseClause + nChanType + nCommClause + nComment + nCommentGroup + nCompositeLit + nDeclStmt + nDeferStmt + nEllipsis + nEmptyStmt + nExprStmt + nField + nFieldList + nFile + nForStmt + nFuncDecl + nFuncLit + nFuncType + nGenDecl + nGoStmt + nIdent + nIfStmt + nImportSpec + nIncDecStmt + nIndexExpr + nIndexListExpr + nInterfaceType + nKeyValueExpr + nLabeledStmt + nMapType + nPackage + nParenExpr + nRangeStmt + nReturnStmt + nSelectStmt + nSelectorExpr + nSendStmt + nSliceExpr + nStarExpr + nStructType + nSwitchStmt + nTypeAssertExpr + nTypeSpec + nTypeSwitchStmt + nUnaryExpr + nValueSpec +) + +// typeOf returns a distinct single-bit value that represents the type of n. +// +// Various implementations were benchmarked with BenchmarkNewInspector: +// +// GOGC=off +// - type switch 4.9-5.5ms 2.1ms +// - binary search over a sorted list of types 5.5-5.9ms 2.5ms +// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms +// - linear scan, unordered list 6.4ms 2.7ms +// - hash table 6.5ms 3.1ms +// +// A perfect hash seemed like overkill. +// +// The compiler's switch statement is the clear winner +// as it produces a binary tree in code, +// with constant conditions and good branch prediction. +// (Sadly it is the most verbose in source code.) +// Binary search suffered from poor branch prediction. +func typeOf(n ast.Node) uint64 { + // Fast path: nearly half of all nodes are identifiers. + if _, ok := n.(*ast.Ident); ok { + return 1 << nIdent + } + + // These cases include all nodes encountered by ast.Inspect. + switch n.(type) { + case *ast.ArrayType: + return 1 << nArrayType + case *ast.AssignStmt: + return 1 << nAssignStmt + case *ast.BadDecl: + return 1 << nBadDecl + case *ast.BadExpr: + return 1 << nBadExpr + case *ast.BadStmt: + return 1 << nBadStmt + case *ast.BasicLit: + return 1 << nBasicLit + case *ast.BinaryExpr: + return 1 << nBinaryExpr + case *ast.BlockStmt: + return 1 << nBlockStmt + case *ast.BranchStmt: + return 1 << nBranchStmt + case *ast.CallExpr: + return 1 << nCallExpr + case *ast.CaseClause: + return 1 << nCaseClause + case *ast.ChanType: + return 1 << nChanType + case *ast.CommClause: + return 1 << nCommClause + case *ast.Comment: + return 1 << nComment + case *ast.CommentGroup: + return 1 << nCommentGroup + case *ast.CompositeLit: + return 1 << nCompositeLit + case *ast.DeclStmt: + return 1 << nDeclStmt + case *ast.DeferStmt: + return 1 << nDeferStmt + case *ast.Ellipsis: + return 1 << nEllipsis + case *ast.EmptyStmt: + return 1 << nEmptyStmt + case *ast.ExprStmt: + return 1 << nExprStmt + case *ast.Field: + return 1 << nField + case *ast.FieldList: + return 1 << nFieldList + case *ast.File: + return 1 << nFile + case *ast.ForStmt: + return 1 << nForStmt + case *ast.FuncDecl: + return 1 << nFuncDecl + case *ast.FuncLit: + return 1 << nFuncLit + case *ast.FuncType: + return 1 << nFuncType + case *ast.GenDecl: + return 1 << nGenDecl + case *ast.GoStmt: + return 1 << nGoStmt + case *ast.Ident: + return 1 << nIdent + case *ast.IfStmt: + return 1 << nIfStmt + case *ast.ImportSpec: + return 1 << nImportSpec + case *ast.IncDecStmt: + return 1 << nIncDecStmt + case *ast.IndexExpr: + return 1 << nIndexExpr + case *ast.IndexListExpr: + return 1 << nIndexListExpr + case *ast.InterfaceType: + return 1 << nInterfaceType + case *ast.KeyValueExpr: + return 1 << nKeyValueExpr + case *ast.LabeledStmt: + return 1 << nLabeledStmt + case *ast.MapType: + return 1 << nMapType + case *ast.Package: + return 1 << nPackage + case *ast.ParenExpr: + return 1 << nParenExpr + case *ast.RangeStmt: + return 1 << nRangeStmt + case *ast.ReturnStmt: + return 1 << nReturnStmt + case *ast.SelectStmt: + return 1 << nSelectStmt + case *ast.SelectorExpr: + return 1 << nSelectorExpr + case *ast.SendStmt: + return 1 << nSendStmt + case *ast.SliceExpr: + return 1 << nSliceExpr + case *ast.StarExpr: + return 1 << nStarExpr + case *ast.StructType: + return 1 << nStructType + case *ast.SwitchStmt: + return 1 << nSwitchStmt + case *ast.TypeAssertExpr: + return 1 << nTypeAssertExpr + case *ast.TypeSpec: + return 1 << nTypeSpec + case *ast.TypeSwitchStmt: + return 1 << nTypeSwitchStmt + case *ast.UnaryExpr: + return 1 << nUnaryExpr + case *ast.ValueSpec: + return 1 << nValueSpec + } + return 0 +} + +func maskOf(nodes []ast.Node) uint64 { + if len(nodes) == 0 { + return math.MaxUint64 // match all node types + } + var mask uint64 + for _, n := range nodes { + mask |= typeOf(n) + } + return mask +} diff --git a/vendor/golang.org/x/tools/go/ast/inspector/walk.go b/vendor/golang.org/x/tools/go/ast/inspector/walk.go new file mode 100644 index 0000000000..5f1c93c8a7 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/inspector/walk.go @@ -0,0 +1,341 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package inspector + +// This file is a fork of ast.Inspect to reduce unnecessary dynamic +// calls and to gather edge information. +// +// Consistency with the original is ensured by TestInspectAllNodes. + +import ( + "fmt" + "go/ast" + + "golang.org/x/tools/go/ast/edge" +) + +func walkList[N ast.Node](v *visitor, ek edge.Kind, list []N) { + for i, node := range list { + walk(v, ek, i, node) + } +} + +func walk(v *visitor, ek edge.Kind, index int, node ast.Node) { + v.push(ek, index, node) + + // walk children + // (the order of the cases matches the order + // of the corresponding node types in ast.go) + switch n := node.(type) { + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + walkList(v, edge.CommentGroup_List, n.List) + + case *ast.Field: + if n.Doc != nil { + walk(v, edge.Field_Doc, -1, n.Doc) + } + walkList(v, edge.Field_Names, n.Names) + if n.Type != nil { + walk(v, edge.Field_Type, -1, n.Type) + } + if n.Tag != nil { + walk(v, edge.Field_Tag, -1, n.Tag) + } + if n.Comment != nil { + walk(v, edge.Field_Comment, -1, n.Comment) + } + + case *ast.FieldList: + walkList(v, edge.FieldList_List, n.List) + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + if n.Elt != nil { + walk(v, edge.Ellipsis_Elt, -1, n.Elt) + } + + case *ast.FuncLit: + walk(v, edge.FuncLit_Type, -1, n.Type) + walk(v, edge.FuncLit_Body, -1, n.Body) + + case *ast.CompositeLit: + if n.Type != nil { + walk(v, edge.CompositeLit_Type, -1, n.Type) + } + walkList(v, edge.CompositeLit_Elts, n.Elts) + + case *ast.ParenExpr: + walk(v, edge.ParenExpr_X, -1, n.X) + + case *ast.SelectorExpr: + walk(v, edge.SelectorExpr_X, -1, n.X) + walk(v, edge.SelectorExpr_Sel, -1, n.Sel) + + case *ast.IndexExpr: + walk(v, edge.IndexExpr_X, -1, n.X) + walk(v, edge.IndexExpr_Index, -1, n.Index) + + case *ast.IndexListExpr: + walk(v, edge.IndexListExpr_X, -1, n.X) + walkList(v, edge.IndexListExpr_Indices, n.Indices) + + case *ast.SliceExpr: + walk(v, edge.SliceExpr_X, -1, n.X) + if n.Low != nil { + walk(v, edge.SliceExpr_Low, -1, n.Low) + } + if n.High != nil { + walk(v, edge.SliceExpr_High, -1, n.High) + } + if n.Max != nil { + walk(v, edge.SliceExpr_Max, -1, n.Max) + } + + case *ast.TypeAssertExpr: + walk(v, edge.TypeAssertExpr_X, -1, n.X) + if n.Type != nil { + walk(v, edge.TypeAssertExpr_Type, -1, n.Type) + } + + case *ast.CallExpr: + walk(v, edge.CallExpr_Fun, -1, n.Fun) + walkList(v, edge.CallExpr_Args, n.Args) + + case *ast.StarExpr: + walk(v, edge.StarExpr_X, -1, n.X) + + case *ast.UnaryExpr: + walk(v, edge.UnaryExpr_X, -1, n.X) + + case *ast.BinaryExpr: + walk(v, edge.BinaryExpr_X, -1, n.X) + walk(v, edge.BinaryExpr_Y, -1, n.Y) + + case *ast.KeyValueExpr: + walk(v, edge.KeyValueExpr_Key, -1, n.Key) + walk(v, edge.KeyValueExpr_Value, -1, n.Value) + + // Types + case *ast.ArrayType: + if n.Len != nil { + walk(v, edge.ArrayType_Len, -1, n.Len) + } + walk(v, edge.ArrayType_Elt, -1, n.Elt) + + case *ast.StructType: + walk(v, edge.StructType_Fields, -1, n.Fields) + + case *ast.FuncType: + if n.TypeParams != nil { + walk(v, edge.FuncType_TypeParams, -1, n.TypeParams) + } + if n.Params != nil { + walk(v, edge.FuncType_Params, -1, n.Params) + } + if n.Results != nil { + walk(v, edge.FuncType_Results, -1, n.Results) + } + + case *ast.InterfaceType: + walk(v, edge.InterfaceType_Methods, -1, n.Methods) + + case *ast.MapType: + walk(v, edge.MapType_Key, -1, n.Key) + walk(v, edge.MapType_Value, -1, n.Value) + + case *ast.ChanType: + walk(v, edge.ChanType_Value, -1, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + walk(v, edge.DeclStmt_Decl, -1, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + walk(v, edge.LabeledStmt_Label, -1, n.Label) + walk(v, edge.LabeledStmt_Stmt, -1, n.Stmt) + + case *ast.ExprStmt: + walk(v, edge.ExprStmt_X, -1, n.X) + + case *ast.SendStmt: + walk(v, edge.SendStmt_Chan, -1, n.Chan) + walk(v, edge.SendStmt_Value, -1, n.Value) + + case *ast.IncDecStmt: + walk(v, edge.IncDecStmt_X, -1, n.X) + + case *ast.AssignStmt: + walkList(v, edge.AssignStmt_Lhs, n.Lhs) + walkList(v, edge.AssignStmt_Rhs, n.Rhs) + + case *ast.GoStmt: + walk(v, edge.GoStmt_Call, -1, n.Call) + + case *ast.DeferStmt: + walk(v, edge.DeferStmt_Call, -1, n.Call) + + case *ast.ReturnStmt: + walkList(v, edge.ReturnStmt_Results, n.Results) + + case *ast.BranchStmt: + if n.Label != nil { + walk(v, edge.BranchStmt_Label, -1, n.Label) + } + + case *ast.BlockStmt: + walkList(v, edge.BlockStmt_List, n.List) + + case *ast.IfStmt: + if n.Init != nil { + walk(v, edge.IfStmt_Init, -1, n.Init) + } + walk(v, edge.IfStmt_Cond, -1, n.Cond) + walk(v, edge.IfStmt_Body, -1, n.Body) + if n.Else != nil { + walk(v, edge.IfStmt_Else, -1, n.Else) + } + + case *ast.CaseClause: + walkList(v, edge.CaseClause_List, n.List) + walkList(v, edge.CaseClause_Body, n.Body) + + case *ast.SwitchStmt: + if n.Init != nil { + walk(v, edge.SwitchStmt_Init, -1, n.Init) + } + if n.Tag != nil { + walk(v, edge.SwitchStmt_Tag, -1, n.Tag) + } + walk(v, edge.SwitchStmt_Body, -1, n.Body) + + case *ast.TypeSwitchStmt: + if n.Init != nil { + walk(v, edge.TypeSwitchStmt_Init, -1, n.Init) + } + walk(v, edge.TypeSwitchStmt_Assign, -1, n.Assign) + walk(v, edge.TypeSwitchStmt_Body, -1, n.Body) + + case *ast.CommClause: + if n.Comm != nil { + walk(v, edge.CommClause_Comm, -1, n.Comm) + } + walkList(v, edge.CommClause_Body, n.Body) + + case *ast.SelectStmt: + walk(v, edge.SelectStmt_Body, -1, n.Body) + + case *ast.ForStmt: + if n.Init != nil { + walk(v, edge.ForStmt_Init, -1, n.Init) + } + if n.Cond != nil { + walk(v, edge.ForStmt_Cond, -1, n.Cond) + } + if n.Post != nil { + walk(v, edge.ForStmt_Post, -1, n.Post) + } + walk(v, edge.ForStmt_Body, -1, n.Body) + + case *ast.RangeStmt: + if n.Key != nil { + walk(v, edge.RangeStmt_Key, -1, n.Key) + } + if n.Value != nil { + walk(v, edge.RangeStmt_Value, -1, n.Value) + } + walk(v, edge.RangeStmt_X, -1, n.X) + walk(v, edge.RangeStmt_Body, -1, n.Body) + + // Declarations + case *ast.ImportSpec: + if n.Doc != nil { + walk(v, edge.ImportSpec_Doc, -1, n.Doc) + } + if n.Name != nil { + walk(v, edge.ImportSpec_Name, -1, n.Name) + } + walk(v, edge.ImportSpec_Path, -1, n.Path) + if n.Comment != nil { + walk(v, edge.ImportSpec_Comment, -1, n.Comment) + } + + case *ast.ValueSpec: + if n.Doc != nil { + walk(v, edge.ValueSpec_Doc, -1, n.Doc) + } + walkList(v, edge.ValueSpec_Names, n.Names) + if n.Type != nil { + walk(v, edge.ValueSpec_Type, -1, n.Type) + } + walkList(v, edge.ValueSpec_Values, n.Values) + if n.Comment != nil { + walk(v, edge.ValueSpec_Comment, -1, n.Comment) + } + + case *ast.TypeSpec: + if n.Doc != nil { + walk(v, edge.TypeSpec_Doc, -1, n.Doc) + } + walk(v, edge.TypeSpec_Name, -1, n.Name) + if n.TypeParams != nil { + walk(v, edge.TypeSpec_TypeParams, -1, n.TypeParams) + } + walk(v, edge.TypeSpec_Type, -1, n.Type) + if n.Comment != nil { + walk(v, edge.TypeSpec_Comment, -1, n.Comment) + } + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + if n.Doc != nil { + walk(v, edge.GenDecl_Doc, -1, n.Doc) + } + walkList(v, edge.GenDecl_Specs, n.Specs) + + case *ast.FuncDecl: + if n.Doc != nil { + walk(v, edge.FuncDecl_Doc, -1, n.Doc) + } + if n.Recv != nil { + walk(v, edge.FuncDecl_Recv, -1, n.Recv) + } + walk(v, edge.FuncDecl_Name, -1, n.Name) + walk(v, edge.FuncDecl_Type, -1, n.Type) + if n.Body != nil { + walk(v, edge.FuncDecl_Body, -1, n.Body) + } + + case *ast.File: + if n.Doc != nil { + walk(v, edge.File_Doc, -1, n.Doc) + } + walk(v, edge.File_Name, -1, n.Name) + walkList(v, edge.File_Decls, n.Decls) + // don't walk n.Comments - they have been + // visited already through the individual + // nodes + + default: + // (includes *ast.Package) + panic(fmt.Sprintf("Walk: unexpected node type %T", n)) + } + + v.pop(node) +} diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 89f89dd2dc..680a70ca8f 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -364,12 +364,6 @@ type jsonPackage struct { DepsErrors []*packagesinternal.PackageError } -type jsonPackageError struct { - ImportStack []string - Pos string - Err string -} - func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go index df14ffd94d..af6a60d75f 100644 --- a/vendor/golang.org/x/tools/go/packages/visit.go +++ b/vendor/golang.org/x/tools/go/packages/visit.go @@ -5,9 +5,11 @@ package packages import ( + "cmp" "fmt" + "iter" "os" - "sort" + "slices" ) // Visit visits all the packages in the import graph whose roots are @@ -16,6 +18,20 @@ import ( // package's dependencies have been visited (postorder). // The boolean result of pre(pkg) determines whether // the imports of package pkg are visited. +// +// Example: +// +// pkgs, err := Load(...) +// if err != nil { ... } +// Visit(pkgs, nil, func(pkg *Package) { +// log.Println(pkg) +// }) +// +// In most cases, it is more convenient to use [Postorder]: +// +// for pkg := range Postorder(pkgs) { +// log.Println(pkg) +// } func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { seen := make(map[*Package]bool) var visit func(*Package) @@ -24,13 +40,8 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { seen[pkg] = true if pre == nil || pre(pkg) { - paths := make([]string, 0, len(pkg.Imports)) - for path := range pkg.Imports { - paths = append(paths, path) - } - sort.Strings(paths) // Imports is a map, this makes visit stable - for _, path := range paths { - visit(pkg.Imports[path]) + for _, imp := range sorted(pkg.Imports) { // for determinism + visit(imp) } } @@ -50,7 +61,7 @@ func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) { func PrintErrors(pkgs []*Package) int { var n int errModules := make(map[*Module]bool) - Visit(pkgs, nil, func(pkg *Package) { + for pkg := range Postorder(pkgs) { for _, err := range pkg.Errors { fmt.Fprintln(os.Stderr, err) n++ @@ -63,6 +74,60 @@ func PrintErrors(pkgs []*Package) int { fmt.Fprintln(os.Stderr, mod.Error.Err) n++ } - }) + } return n } + +// Postorder returns an iterator over the the packages in +// the import graph whose roots are pkg. +// Packages are enumerated in dependencies-first order. +func Postorder(pkgs []*Package) iter.Seq[*Package] { + return func(yield func(*Package) bool) { + seen := make(map[*Package]bool) + var visit func(*Package) bool + visit = func(pkg *Package) bool { + if !seen[pkg] { + seen[pkg] = true + for _, imp := range sorted(pkg.Imports) { // for determinism + if !visit(imp) { + return false + } + } + if !yield(pkg) { + return false + } + } + return true + } + for _, pkg := range pkgs { + if !visit(pkg) { + break + } + } + } +} + +// -- copied from golang.org.x/tools/gopls/internal/util/moremaps -- + +// sorted returns an iterator over the entries of m in key order. +func sorted[M ~map[K]V, K cmp.Ordered, V any](m M) iter.Seq2[K, V] { + // TODO(adonovan): use maps.Sorted if proposal #68598 is accepted. + return func(yield func(K, V) bool) { + keys := keySlice(m) + slices.Sort(keys) + for _, k := range keys { + if !yield(k, m[k]) { + break + } + } + } +} + +// KeySlice returns the keys of the map M, like slices.Collect(maps.Keys(m)). +func keySlice[M ~map[K]V, K comparable, V any](m M) []K { + r := make([]K, 0, len(m)) + for k := range m { + r = append(r, k) + } + return r +} diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go index d3c2913bef..6c0c74968f 100644 --- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go +++ b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go @@ -698,7 +698,10 @@ func Object(pkg *types.Package, p Path) (types.Object, error) { } else if false && aliases.Enabled() { // The Enabled check is too expensive, so for now we // simply assume that aliases are not enabled. - // TODO(adonovan): replace with "if true {" when go1.24 is assured. + // + // Now that go1.24 is assured, we should be able to + // replace this with "if true {", but it causes tests + // to fail. TODO(adonovan): investigate. return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t) } diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index b6d542c64e..f035a0b6be 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -11,7 +11,6 @@ import ( "fmt" "go/types" "hash/maphash" - "unsafe" "golang.org/x/tools/internal/typeparams" ) @@ -380,22 +379,8 @@ var theSeed = maphash.MakeSeed() func (hasher) hashTypeName(tname *types.TypeName) uint32 { // Since types.Identical uses == to compare TypeNames, // the Hash function uses maphash.Comparable. - // TODO(adonovan): or will, when it becomes available in go1.24. - // In the meantime we use the pointer's numeric value. - // - // hash := maphash.Comparable(theSeed, tname) - // - // (Another approach would be to hash the name and package - // path, and whether or not it is a package-level typename. It - // is rare for a package to define multiple local types with - // the same name.) - ptr := uintptr(unsafe.Pointer(tname)) - if unsafe.Sizeof(ptr) == 8 { - hash := uint64(ptr) - return uint32(hash ^ (hash >> 32)) - } else { - return uint32(ptr) - } + hash := maphash.Comparable(theSeed, tname) + return uint32(hash ^ (hash >> 32)) } // shallowHash computes a hash of t without looking at any of its diff --git a/vendor/golang.org/x/tools/imports/forward.go b/vendor/golang.org/x/tools/imports/forward.go index cb6db8893f..22ae777726 100644 --- a/vendor/golang.org/x/tools/imports/forward.go +++ b/vendor/golang.org/x/tools/imports/forward.go @@ -69,9 +69,3 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { } return intimp.Process(filename, src, intopt) } - -// VendorlessPath returns the devendorized version of the import path ipath. -// For example, VendorlessPath("foo/bar/vendor/a/b") returns "a/b". -func VendorlessPath(ipath string) string { - return intimp.VendorlessPath(ipath) -} diff --git a/vendor/golang.org/x/tools/internal/event/core/event.go b/vendor/golang.org/x/tools/internal/event/core/event.go index a6cf0e64a4..ade5d1e799 100644 --- a/vendor/golang.org/x/tools/internal/event/core/event.go +++ b/vendor/golang.org/x/tools/internal/event/core/event.go @@ -28,11 +28,6 @@ type Event struct { dynamic []label.Label // dynamically sized storage for remaining labels } -// eventLabelMap implements label.Map for a the labels of an Event. -type eventLabelMap struct { - event Event -} - func (ev Event) At() time.Time { return ev.at } func (ev Event) Format(f fmt.State, r rune) { diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go index 780873e3ae..4a4357d2bd 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go @@ -569,7 +569,6 @@ func (p *iexporter) exportName(obj types.Object) (res string) { type iexporter struct { fset *token.FileSet - out *bytes.Buffer version int shallow bool // don't put types from other packages in the index diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go deleted file mode 100644 index 7586bfaca6..0000000000 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport_go122.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2024 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.22 && !go1.24 - -package gcimporter - -import ( - "go/token" - "go/types" - "unsafe" -) - -// TODO(rfindley): delete this workaround once go1.24 is assured. - -func init() { - // Update markBlack so that it correctly sets the color - // of imported TypeNames. - // - // See the doc comment for markBlack for details. - - type color uint32 - const ( - white color = iota - black - grey - ) - type object struct { - _ *types.Scope - _ token.Pos - _ *types.Package - _ string - _ types.Type - _ uint32 - color_ color - _ token.Pos - } - type typeName struct { - object - } - - // If the size of types.TypeName changes, this will fail to compile. - const delta = int64(unsafe.Sizeof(typeName{})) - int64(unsafe.Sizeof(types.TypeName{})) - var _ [-delta * delta]int - - markBlack = func(obj *types.TypeName) { - type uP = unsafe.Pointer - var ptr *typeName - *(*uP)(uP(&ptr)) = uP(obj) - ptr.color_ = black - } -} diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 50b6ca51a6..1b4dc0cb5d 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -16,6 +16,7 @@ import ( "go/types" "io/fs" "io/ioutil" + "maps" "os" "path" "path/filepath" @@ -27,8 +28,6 @@ import ( "unicode" "unicode/utf8" - "maps" - "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/event" "golang.org/x/tools/internal/gocommand" @@ -43,7 +42,7 @@ var importToGroup = []func(localPrefix, importPath string) (num int, ok bool){ if localPrefix == "" { return } - for _, p := range strings.Split(localPrefix, ",") { + for p := range strings.SplitSeq(localPrefix, ",") { if strings.HasPrefix(importPath, p) || strings.TrimSuffix(p, "/") == importPath { return 3, true } @@ -1251,7 +1250,6 @@ func ImportPathToAssumedName(importPath string) string { // gopathResolver implements resolver for GOPATH workspaces. type gopathResolver struct { env *ProcessEnv - walked bool cache *DirInfoCache scanSema chan struct{} // scanSema prevents concurrent scans. } diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go index fe24db9b13..8e9702d84b 100644 --- a/vendor/golang.org/x/tools/internal/modindex/symbols.go +++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go @@ -206,8 +206,7 @@ func isDeprecated(doc *ast.CommentGroup) bool { // go.dev/wiki/Deprecated Paragraph starting 'Deprecated:' // This code fails for /* Deprecated: */, but it's the code from // gopls/internal/analysis/deprecated - lines := strings.Split(doc.Text(), "\n\n") - for _, line := range lines { + for line := range strings.SplitSeq(doc.Text(), "\n\n") { if strings.HasPrefix(line, "Deprecated:") { return true } diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go index 77cf8d2181..96ad6c5821 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/deps.go +++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go @@ -12,348 +12,354 @@ type pkginfo struct { } var deps = [...]pkginfo{ - {"archive/tar", "\x03j\x03E5\x01\v\x01#\x01\x01\x02\x05\n\x02\x01\x02\x02\v"}, - {"archive/zip", "\x02\x04`\a\x16\x0205\x01+\x05\x01\x11\x03\x02\r\x04"}, - {"bufio", "\x03j}F\x13"}, - {"bytes", "m+R\x03\fH\x02\x02"}, + {"archive/tar", "\x03k\x03E;\x01\n\x01$\x01\x01\x02\x05\b\x02\x01\x02\x02\f"}, + {"archive/zip", "\x02\x04a\a\x03\x12\x021;\x01+\x05\x01\x0f\x03\x02\x0e\x04"}, + {"bufio", "\x03k\x83\x01D\x14"}, + {"bytes", "n*Y\x03\fG\x02\x02"}, {"cmp", ""}, - {"compress/bzip2", "\x02\x02\xe6\x01C"}, - {"compress/flate", "\x02k\x03z\r\x025\x01\x03"}, - {"compress/gzip", "\x02\x04`\a\x03\x15eU"}, - {"compress/lzw", "\x02k\x03z"}, - {"compress/zlib", "\x02\x04`\a\x03\x13\x01f"}, - {"container/heap", "\xae\x02"}, + {"compress/bzip2", "\x02\x02\xed\x01A"}, + {"compress/flate", "\x02l\x03\x80\x01\f\x033\x01\x03"}, + {"compress/gzip", "\x02\x04a\a\x03\x14lT"}, + {"compress/lzw", "\x02l\x03\x80\x01"}, + {"compress/zlib", "\x02\x04a\a\x03\x12\x01m"}, + {"container/heap", "\xb3\x02"}, {"container/list", ""}, {"container/ring", ""}, - {"context", "m\\i\x01\f"}, - {"crypto", "\x83\x01gE"}, - {"crypto/aes", "\x10\n\a\x8e\x02"}, - {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1c,Q"}, - {"crypto/des", "\x10\x13\x1d-,\x96\x01\x03"}, - {"crypto/dsa", "@\x04)}\x0e"}, - {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1c}"}, - {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1c}\x0e\x04L\x01"}, - {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1c}E"}, - {"crypto/elliptic", "0=}\x0e:"}, - {"crypto/fips140", " \x05\x90\x01"}, - {"crypto/hkdf", "-\x12\x01-\x16"}, - {"crypto/hmac", "\x1a\x14\x11\x01\x112"}, + {"context", "n\\m\x01\r"}, + {"crypto", "\x83\x01nC"}, + {"crypto/aes", "\x10\n\a\x93\x02"}, + {"crypto/cipher", "\x03\x1e\x01\x01\x1e\x11\x1c+X"}, + {"crypto/des", "\x10\x13\x1e-+\x9b\x01\x03"}, + {"crypto/dsa", "A\x04)\x83\x01\r"}, + {"crypto/ecdh", "\x03\v\f\x0e\x04\x15\x04\r\x1c\x83\x01"}, + {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\a\v\x05\x01\x04\f\x01\x1c\x83\x01\r\x05K\x01"}, + {"crypto/ed25519", "\x0e\x1c\x11\x06\n\a\x1c\x83\x01C"}, + {"crypto/elliptic", "0>\x83\x01\r9"}, + {"crypto/fips140", " \x05"}, + {"crypto/hkdf", "-\x13\x01-\x15"}, + {"crypto/hmac", "\x1a\x14\x12\x01\x111"}, {"crypto/internal/boring", "\x0e\x02\rf"}, - {"crypto/internal/boring/bbig", "\x1a\xde\x01M"}, - {"crypto/internal/boring/bcache", "\xb3\x02\x12"}, + {"crypto/internal/boring/bbig", "\x1a\xe4\x01M"}, + {"crypto/internal/boring/bcache", "\xb8\x02\x13"}, {"crypto/internal/boring/sig", ""}, - {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x19\x06\x13\x12#\a\t\x11\x11\x11\x1b\x01\f\r\x05\n"}, - {"crypto/internal/entropy", "E"}, - {"crypto/internal/fips140", ">/}9\r\x15"}, - {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05*\x8c\x016"}, - {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06*\x8a\x01"}, - {"crypto/internal/fips140/alias", "\xc5\x02"}, - {"crypto/internal/fips140/bigmod", "%\x17\x01\x06*\x8c\x01"}, - {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xac\x01["}, - {"crypto/internal/fips140/check/checktest", "%\xfe\x01\""}, - {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01(}\x0f9"}, - {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f1}\x0f9"}, - {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x067}H"}, - {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v7\xc2\x01\x03"}, - {"crypto/internal/fips140/edwards25519", "%\a\f\x041\x8c\x019"}, - {"crypto/internal/fips140/edwards25519/field", "%\x13\x041\x8c\x01"}, - {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x069"}, - {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x017"}, - {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x041"}, - {"crypto/internal/fips140/nistec", "%\f\a\x041\x8c\x01*\x0f\x13"}, - {"crypto/internal/fips140/nistec/fiat", "%\x135\x8c\x01"}, - {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x069"}, - {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x025}H"}, - {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06*\x8c\x01"}, - {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x010\x8c\x01L"}, - {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06*\x8c\x01"}, - {"crypto/internal/fips140/ssh", " \x05"}, - {"crypto/internal/fips140/subtle", "#"}, - {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x027"}, - {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b1"}, + {"crypto/internal/cryptotest", "\x03\r\n\x06$\x0e\x19\x06\x12\x12 \x04\a\t\x16\x01\x11\x11\x1b\x01\a\x05\b\x03\x05\v"}, + {"crypto/internal/entropy", "F"}, + {"crypto/internal/fips140", "?/\x15\xa7\x01\v\x16"}, + {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x05\x01\x01\x05*\x92\x014"}, + {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x05\x01\x06*\x8f\x01"}, + {"crypto/internal/fips140/alias", "\xcb\x02"}, + {"crypto/internal/fips140/bigmod", "%\x18\x01\x06*\x92\x01"}, + {"crypto/internal/fips140/check", " \x0e\x06\t\x02\xb2\x01Z"}, + {"crypto/internal/fips140/check/checktest", "%\x85\x02!"}, + {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x05\b\x01(\x83\x01\x0f7"}, + {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\r1\x83\x01\x0f7"}, + {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x068\x15nF"}, + {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v8\xc6\x01\x03"}, + {"crypto/internal/fips140/edwards25519", "%\a\f\x051\x92\x017"}, + {"crypto/internal/fips140/edwards25519/field", "%\x13\x051\x92\x01"}, + {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x06:\x15"}, + {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x018\x15"}, + {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x051"}, + {"crypto/internal/fips140/nistec", "%\f\a\x051\x92\x01*\r\x14"}, + {"crypto/internal/fips140/nistec/fiat", "%\x136\x92\x01"}, + {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x06:\x15"}, + {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x026\x15nF"}, + {"crypto/internal/fips140/sha256", "\x03\x1d\x1d\x01\x06*\x15}"}, + {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x05\x010\x92\x01K"}, + {"crypto/internal/fips140/sha512", "\x03\x1d\x1d\x01\x06*\x15}"}, + {"crypto/internal/fips140/ssh", "%^"}, + {"crypto/internal/fips140/subtle", "#\x1a\xc3\x01"}, + {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x028\x15"}, + {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\t1\x15"}, + {"crypto/internal/fips140cache", "\xaa\x02\r&"}, {"crypto/internal/fips140deps", ""}, {"crypto/internal/fips140deps/byteorder", "\x99\x01"}, - {"crypto/internal/fips140deps/cpu", "\xad\x01\a"}, - {"crypto/internal/fips140deps/godebug", "\xb5\x01"}, - {"crypto/internal/fips140hash", "5\x1a4\xc2\x01"}, - {"crypto/internal/fips140only", "'\r\x01\x01M25"}, + {"crypto/internal/fips140deps/cpu", "\xae\x01\a"}, + {"crypto/internal/fips140deps/godebug", "\xb6\x01"}, + {"crypto/internal/fips140hash", "5\x1b3\xc8\x01"}, + {"crypto/internal/fips140only", "'\r\x01\x01M3;"}, {"crypto/internal/fips140test", ""}, - {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d#,`N"}, - {"crypto/internal/impl", "\xb0\x02"}, - {"crypto/internal/randutil", "\xea\x01\x12"}, - {"crypto/internal/sysrand", "mi!\x1f\r\x0f\x01\x01\v\x06"}, - {"crypto/internal/sysrand/internal/seccomp", "m"}, - {"crypto/md5", "\x0e2-\x16\x16`"}, + {"crypto/internal/hpke", "\x0e\x01\x01\x03\x053#+gM"}, + {"crypto/internal/impl", "\xb5\x02"}, + {"crypto/internal/randutil", "\xf1\x01\x12"}, + {"crypto/internal/sysrand", "nn! \r\r\x01\x01\f\x06"}, + {"crypto/internal/sysrand/internal/seccomp", "n"}, + {"crypto/md5", "\x0e3-\x15\x16g"}, {"crypto/mlkem", "/"}, - {"crypto/pbkdf2", "2\r\x01-\x16"}, - {"crypto/rand", "\x1a\x06\a\x19\x04\x01(}\x0eM"}, - {"crypto/rc4", "#\x1d-\xc2\x01"}, - {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1c\x03\x1325\r\x01"}, - {"crypto/sha1", "\x0e\f&-\x16\x16\x14L"}, + {"crypto/pbkdf2", "2\x0e\x01-\x15"}, + {"crypto/rand", "\x1a\x06\a\x1a\x04\x01(\x83\x01\rM"}, + {"crypto/rc4", "#\x1e-\xc6\x01"}, + {"crypto/rsa", "\x0e\f\x01\t\x0f\r\x01\x04\x06\a\x1c\x03\x123;\f\x01"}, + {"crypto/sha1", "\x0e\f'\x03*\x15\x16\x15R"}, {"crypto/sha256", "\x0e\f\x1aO"}, - {"crypto/sha3", "\x0e'N\xc2\x01"}, + {"crypto/sha3", "\x0e'N\xc8\x01"}, {"crypto/sha512", "\x0e\f\x1cM"}, - {"crypto/subtle", "8\x96\x01U"}, - {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x14\b5\x16\x16\r\n\x01\x01\x01\x02\x01\f\x06\x02\x01"}, - {"crypto/tls/internal/fips140tls", " \x93\x02"}, - {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x032\x01\x02\t\x01\x01\x01\a\x10\x05\x01\x06\x02\x05\f\x01\x02\r\x02\x01\x01\x02\x03\x01"}, - {"crypto/x509/pkix", "c\x06\a\x88\x01G"}, - {"database/sql", "\x03\nJ\x16\x03z\f\x06\"\x05\n\x02\x03\x01\f\x02\x02\x02"}, - {"database/sql/driver", "\r`\x03\xae\x01\x11\x10"}, - {"debug/buildinfo", "\x03W\x02\x01\x01\b\a\x03`\x18\x02\x01+\x0f "}, - {"debug/dwarf", "\x03c\a\x03z1\x13\x01\x01"}, - {"debug/elf", "\x03\x06P\r\a\x03`\x19\x01,\x19\x01\x15"}, - {"debug/gosym", "\x03c\n\xbe\x01\x01\x01\x02"}, - {"debug/macho", "\x03\x06P\r\n`\x1a,\x19\x01"}, - {"debug/pe", "\x03\x06P\r\a\x03`\x1a,\x19\x01\x15"}, - {"debug/plan9obj", "f\a\x03`\x1a,"}, - {"embed", "m+:\x18\x01T"}, + {"crypto/subtle", "8\x9b\x01W"}, + {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x02\x01\x01\a\x01\r\n\x01\t\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x12\x16\x15\b;\x16\x16\r\b\x01\x01\x01\x02\x01\r\x06\x02\x01\x0f"}, + {"crypto/tls/internal/fips140tls", "\x17\xa1\x02"}, + {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x012\x05\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x038\x01\x02\b\x01\x01\x02\a\x10\x05\x01\x06\x02\x05\n\x01\x02\x0e\x02\x01\x01\x02\x03\x01"}, + {"crypto/x509/pkix", "d\x06\a\x8d\x01G"}, + {"database/sql", "\x03\nK\x16\x03\x80\x01\v\a\"\x05\b\x02\x03\x01\r\x02\x02\x02"}, + {"database/sql/driver", "\ra\x03\xb4\x01\x0f\x11"}, + {"debug/buildinfo", "\x03X\x02\x01\x01\b\a\x03e\x19\x02\x01+\x0f\x1f"}, + {"debug/dwarf", "\x03d\a\x03\x80\x011\x11\x01\x01"}, + {"debug/elf", "\x03\x06Q\r\a\x03e\x1a\x01,\x17\x01\x16"}, + {"debug/gosym", "\x03d\n\xc2\x01\x01\x01\x02"}, + {"debug/macho", "\x03\x06Q\r\ne\x1b,\x17\x01"}, + {"debug/pe", "\x03\x06Q\r\a\x03e\x1b,\x17\x01\x16"}, + {"debug/plan9obj", "g\a\x03e\x1b,"}, + {"embed", "n*@\x19\x01S"}, {"embed/internal/embedtest", ""}, {"encoding", ""}, - {"encoding/ascii85", "\xea\x01E"}, - {"encoding/asn1", "\x03j\x03\x87\x01\x01&\x0f\x02\x01\x0f\x03\x01"}, - {"encoding/base32", "\xea\x01C\x02"}, - {"encoding/base64", "\x99\x01QC\x02"}, - {"encoding/binary", "m}\r'\x0f\x05"}, - {"encoding/csv", "\x02\x01j\x03zF\x11\x02"}, - {"encoding/gob", "\x02_\x05\a\x03`\x1a\f\x01\x02\x1d\b\x14\x01\x0e\x02"}, - {"encoding/hex", "m\x03zC\x03"}, - {"encoding/json", "\x03\x01]\x04\b\x03z\r'\x0f\x02\x01\x02\x0f\x01\x01\x02"}, - {"encoding/pem", "\x03b\b}C\x03"}, - {"encoding/xml", "\x02\x01^\f\x03z4\x05\f\x01\x02\x0f\x02"}, - {"errors", "\xc9\x01|"}, - {"expvar", "jK9\t\n\x15\r\n\x02\x03\x01\x10"}, - {"flag", "a\f\x03z,\b\x05\n\x02\x01\x0f"}, - {"fmt", "mE8\r\x1f\b\x0f\x02\x03\x11"}, - {"go/ast", "\x03\x01l\x0f\x01j\x03)\b\x0f\x02\x01"}, - {"go/ast/internal/tests", ""}, - {"go/build", "\x02\x01j\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x12\x01+\x01\x04\x01\a\n\x02\x01\x11\x02\x02"}, - {"go/build/constraint", "m\xc2\x01\x01\x11\x02"}, - {"go/constant", "p\x10w\x01\x016\x01\x02\x11"}, - {"go/doc", "\x04l\x01\x06\t=-1\x12\x02\x01\x11\x02"}, - {"go/doc/comment", "\x03m\xbd\x01\x01\x01\x01\x11\x02"}, - {"go/format", "\x03m\x01\f\x01\x02jF"}, - {"go/importer", "s\a\x01\x01\x04\x01i9"}, - {"go/internal/gccgoimporter", "\x02\x01W\x13\x03\x05\v\x01g\x02,\x01\x05\x13\x01\v\b"}, - {"go/internal/gcimporter", "\x02n\x10\x01/\x05\x0e',\x17\x03\x02"}, - {"go/internal/srcimporter", "p\x01\x02\n\x03\x01i,\x01\x05\x14\x02\x13"}, - {"go/parser", "\x03j\x03\x01\x03\v\x01j\x01+\x06\x14"}, - {"go/printer", "p\x01\x03\x03\tj\r\x1f\x17\x02\x01\x02\n\x05\x02"}, - {"go/scanner", "\x03m\x10j2\x12\x01\x12\x02"}, - {"go/token", "\x04l\xbd\x01\x02\x03\x01\x0e\x02"}, - {"go/types", "\x03\x01\x06c\x03\x01\x04\b\x03\x02\x15\x1e\x06+\x04\x03\n%\a\n\x01\x01\x01\x02\x01\x0e\x02\x02"}, - {"go/version", "\xba\x01v"}, - {"hash", "\xea\x01"}, - {"hash/adler32", "m\x16\x16"}, - {"hash/crc32", "m\x16\x16\x14\x85\x01\x01\x12"}, - {"hash/crc64", "m\x16\x16\x99\x01"}, - {"hash/fnv", "m\x16\x16`"}, - {"hash/maphash", "\x94\x01\x05\x1b\x03@N"}, - {"html", "\xb0\x02\x02\x11"}, - {"html/template", "\x03g\x06\x19,5\x01\v \x05\x01\x02\x03\x0e\x01\x02\v\x01\x03\x02"}, - {"image", "\x02k\x1f^\x0f6\x03\x01"}, + {"encoding/ascii85", "\xf1\x01C"}, + {"encoding/asn1", "\x03k\x03\x8c\x01\x01'\r\x02\x01\x10\x03\x01"}, + {"encoding/base32", "\xf1\x01A\x02"}, + {"encoding/base64", "\x99\x01XA\x02"}, + {"encoding/binary", "n\x83\x01\f(\r\x05"}, + {"encoding/csv", "\x02\x01k\x03\x80\x01D\x12\x02"}, + {"encoding/gob", "\x02`\x05\a\x03e\x1b\v\x01\x03\x1d\b\x12\x01\x0f\x02"}, + {"encoding/hex", "n\x03\x80\x01A\x03"}, + {"encoding/json", "\x03\x01^\x04\b\x03\x80\x01\f(\r\x02\x01\x02\x10\x01\x01\x02"}, + {"encoding/pem", "\x03c\b\x83\x01A\x03"}, + {"encoding/xml", "\x02\x01_\f\x03\x80\x014\x05\n\x01\x02\x10\x02"}, + {"errors", "\xca\x01\x81\x01"}, + {"expvar", "kK?\b\v\x15\r\b\x02\x03\x01\x11"}, + {"flag", "b\f\x03\x80\x01,\b\x05\b\x02\x01\x10"}, + {"fmt", "nE>\f \b\r\x02\x03\x12"}, + {"go/ast", "\x03\x01m\x0e\x01q\x03)\b\r\x02\x01"}, + {"go/build", "\x02\x01k\x03\x01\x02\x02\a\x02\x01\x17\x1f\x04\x02\t\x19\x13\x01+\x01\x04\x01\a\b\x02\x01\x12\x02\x02"}, + {"go/build/constraint", "n\xc6\x01\x01\x12\x02"}, + {"go/constant", "q\x0f}\x01\x024\x01\x02\x12"}, + {"go/doc", "\x04m\x01\x05\t>31\x10\x02\x01\x12\x02"}, + {"go/doc/comment", "\x03n\xc1\x01\x01\x01\x01\x12\x02"}, + {"go/format", "\x03n\x01\v\x01\x02qD"}, + {"go/importer", "s\a\x01\x01\x04\x01p9"}, + {"go/internal/gccgoimporter", "\x02\x01X\x13\x03\x04\v\x01n\x02,\x01\x05\x11\x01\f\b"}, + {"go/internal/gcimporter", "\x02o\x0f\x010\x05\x0e-,\x15\x03\x02"}, + {"go/internal/srcimporter", "q\x01\x01\n\x03\x01p,\x01\x05\x12\x02\x14"}, + {"go/parser", "\x03k\x03\x01\x02\v\x01q\x01+\x06\x12"}, + {"go/printer", "q\x01\x02\x03\tq\f \x15\x02\x01\x02\v\x05\x02"}, + {"go/scanner", "\x03n\x0fq2\x10\x01\x13\x02"}, + {"go/token", "\x04m\x83\x01>\x02\x03\x01\x0f\x02"}, + {"go/types", "\x03\x01\x06d\x03\x01\x03\b\x03\x02\x15\x1f\x061\x04\x03\t \x06\a\b\x01\x01\x01\x02\x01\x0f\x02\x02"}, + {"go/version", "\xbb\x01z"}, + {"hash", "\xf1\x01"}, + {"hash/adler32", "n\x15\x16"}, + {"hash/crc32", "n\x15\x16\x15\x89\x01\x01\x13"}, + {"hash/crc64", "n\x15\x16\x9e\x01"}, + {"hash/fnv", "n\x15\x16g"}, + {"hash/maphash", "\x83\x01\x11!\x03\x93\x01"}, + {"html", "\xb5\x02\x02\x12"}, + {"html/template", "\x03h\x06\x18-;\x01\n!\x05\x01\x02\x03\f\x01\x02\f\x01\x03\x02"}, + {"image", "\x02l\x1ee\x0f4\x03\x01"}, {"image/color", ""}, {"image/color/palette", "\x8c\x01"}, {"image/draw", "\x8b\x01\x01\x04"}, - {"image/gif", "\x02\x01\x05e\x03\x1b\x01\x01\x01\vQ"}, + {"image/gif", "\x02\x01\x05f\x03\x1a\x01\x01\x01\vX"}, {"image/internal/imageutil", "\x8b\x01"}, - {"image/jpeg", "\x02k\x1e\x01\x04Z"}, - {"image/png", "\x02\a]\n\x13\x02\x06\x01^E"}, - {"index/suffixarray", "\x03c\a}\r*\f\x01"}, - {"internal/abi", "\xb4\x01\x91\x01"}, - {"internal/asan", "\xc5\x02"}, - {"internal/bisect", "\xa3\x02\x0f\x01"}, - {"internal/buildcfg", "pG_\x06\x02\x05\f\x01"}, - {"internal/bytealg", "\xad\x01\x98\x01"}, + {"image/jpeg", "\x02l\x1d\x01\x04a"}, + {"image/png", "\x02\a^\n\x12\x02\x06\x01eC"}, + {"index/suffixarray", "\x03d\a\x83\x01\f+\n\x01"}, + {"internal/abi", "\xb5\x01\x96\x01"}, + {"internal/asan", "\xcb\x02"}, + {"internal/bisect", "\xaa\x02\r\x01"}, + {"internal/buildcfg", "qGe\x06\x02\x05\n\x01"}, + {"internal/bytealg", "\xae\x01\x9d\x01"}, {"internal/byteorder", ""}, {"internal/cfg", ""}, - {"internal/chacha8rand", "\x99\x01\x1b\x91\x01"}, + {"internal/cgrouptest", "q[Q\x06\x0f\x02\x01\x04\x01"}, + {"internal/chacha8rand", "\x99\x01\x15\a\x96\x01"}, {"internal/copyright", ""}, {"internal/coverage", ""}, {"internal/coverage/calloc", ""}, - {"internal/coverage/cfile", "j\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x01\x1f,\x06\a\f\x01\x03\f\x06"}, - {"internal/coverage/cformat", "\x04l-\x04I\f7\x01\x02\f"}, - {"internal/coverage/cmerge", "p-Z"}, - {"internal/coverage/decodecounter", "f\n-\v\x02@,\x19\x16"}, - {"internal/coverage/decodemeta", "\x02d\n\x17\x16\v\x02@,"}, - {"internal/coverage/encodecounter", "\x02d\n-\f\x01\x02>\f \x17"}, - {"internal/coverage/encodemeta", "\x02\x01c\n\x13\x04\x16\r\x02>,/"}, - {"internal/coverage/pods", "\x04l-y\x06\x05\f\x02\x01"}, - {"internal/coverage/rtcov", "\xc5\x02"}, - {"internal/coverage/slicereader", "f\nz["}, - {"internal/coverage/slicewriter", "pz"}, - {"internal/coverage/stringtab", "p8\x04>"}, + {"internal/coverage/cfile", "k\x06\x16\x17\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x02$,\x06\a\n\x01\x03\r\x06"}, + {"internal/coverage/cformat", "\x04m-\x04O\v6\x01\x02\r"}, + {"internal/coverage/cmerge", "q-_"}, + {"internal/coverage/decodecounter", "g\n-\v\x02F,\x17\x17"}, + {"internal/coverage/decodemeta", "\x02e\n\x16\x17\v\x02F,"}, + {"internal/coverage/encodecounter", "\x02e\n-\f\x01\x02D\v!\x15"}, + {"internal/coverage/encodemeta", "\x02\x01d\n\x12\x04\x17\r\x02D,."}, + {"internal/coverage/pods", "\x04m-\x7f\x06\x05\n\x02\x01"}, + {"internal/coverage/rtcov", "\xcb\x02"}, + {"internal/coverage/slicereader", "g\n\x80\x01Z"}, + {"internal/coverage/slicewriter", "q\x80\x01"}, + {"internal/coverage/stringtab", "q8\x04D"}, {"internal/coverage/test", ""}, {"internal/coverage/uleb128", ""}, - {"internal/cpu", "\xc5\x02"}, - {"internal/dag", "\x04l\xbd\x01\x03"}, - {"internal/diff", "\x03m\xbe\x01\x02"}, - {"internal/exportdata", "\x02\x01j\x03\x03]\x1a,\x01\x05\x13\x01\x02"}, - {"internal/filepathlite", "m+:\x19B"}, - {"internal/fmtsort", "\x04\x9a\x02\x0f"}, - {"internal/fuzz", "\x03\nA\x18\x04\x03\x03\x01\f\x0355\r\x02\x1d\x01\x05\x02\x05\f\x01\x02\x01\x01\v\x04\x02"}, + {"internal/cpu", "\xcb\x02"}, + {"internal/dag", "\x04m\xc1\x01\x03"}, + {"internal/diff", "\x03n\xc2\x01\x02"}, + {"internal/exportdata", "\x02\x01k\x03\x02c\x1b,\x01\x05\x11\x01\x02"}, + {"internal/filepathlite", "n*@\x1a@"}, + {"internal/fmtsort", "\x04\xa1\x02\r"}, + {"internal/fuzz", "\x03\nB\x18\x04\x03\x03\x01\v\x036;\f\x03\x1d\x01\x05\x02\x05\n\x01\x02\x01\x01\f\x04\x02"}, {"internal/goarch", ""}, - {"internal/godebug", "\x96\x01 |\x01\x12"}, + {"internal/godebug", "\x96\x01!\x80\x01\x01\x13"}, {"internal/godebugs", ""}, {"internal/goexperiment", ""}, {"internal/goos", ""}, - {"internal/goroot", "\x96\x02\x01\x05\x14\x02"}, + {"internal/goroot", "\x9d\x02\x01\x05\x12\x02"}, {"internal/gover", "\x04"}, {"internal/goversion", ""}, {"internal/itoa", ""}, - {"internal/lazyregexp", "\x96\x02\v\x0f\x02"}, - {"internal/lazytemplate", "\xea\x01,\x1a\x02\v"}, - {"internal/msan", "\xc5\x02"}, + {"internal/lazyregexp", "\x9d\x02\v\r\x02"}, + {"internal/lazytemplate", "\xf1\x01,\x18\x02\f"}, + {"internal/msan", "\xcb\x02"}, {"internal/nettrace", ""}, - {"internal/obscuretestdata", "e\x85\x01,"}, - {"internal/oserror", "m"}, - {"internal/pkgbits", "\x03K\x18\a\x03\x05\vj\x0e\x1e\r\f\x01"}, + {"internal/obscuretestdata", "f\x8b\x01,"}, + {"internal/oserror", "n"}, + {"internal/pkgbits", "\x03L\x18\a\x03\x04\vq\r\x1f\r\n\x01"}, {"internal/platform", ""}, - {"internal/poll", "mO\x1a\x149\x0f\x01\x01\v\x06"}, - {"internal/profile", "\x03\x04f\x03z7\r\x01\x01\x0f"}, + {"internal/poll", "nO\x1f\x159\r\x01\x01\f\x06"}, + {"internal/profile", "\x03\x04g\x03\x80\x017\v\x01\x01\x10"}, {"internal/profilerecord", ""}, - {"internal/race", "\x94\x01\xb1\x01"}, - {"internal/reflectlite", "\x94\x01 3<\""}, - {"internal/runtime/atomic", "\xc5\x02"}, - {"internal/runtime/exithook", "\xca\x01{"}, - {"internal/runtime/maps", "\x94\x01\x01\x1f\v\t\x05\x01w"}, - {"internal/runtime/math", "\xb4\x01"}, - {"internal/runtime/sys", "\xb4\x01\x04"}, - {"internal/runtime/syscall", "\xc5\x02"}, - {"internal/saferio", "\xea\x01["}, - {"internal/singleflight", "\xb2\x02"}, - {"internal/stringslite", "\x98\x01\xad\x01"}, - {"internal/sync", "\x94\x01 \x14k\x12"}, - {"internal/synctest", "\xc5\x02"}, - {"internal/syscall/execenv", "\xb4\x02"}, - {"internal/syscall/unix", "\xa3\x02\x10\x01\x11"}, - {"internal/sysinfo", "\x02\x01\xaa\x01=,\x1a\x02"}, + {"internal/race", "\x94\x01\xb7\x01"}, + {"internal/reflectlite", "\x94\x01!9\b\x13\x01\a\x03E;\x01\x03\a\x01\x03\x02\x02\x01\x02\x06\x02\x01\x01\n\x01\x01\x05\x01\x02\x05\b\x01\x01\x01\x02\x01\r\x02\x02\x02\b\x01\x01\x01"}, + {"net/http/cgi", "\x02Q\x1b\x03\x80\x01\x04\a\v\x01\x13\x01\x01\x01\x04\x01\x05\x02\b\x02\x01\x10\x0e"}, + {"net/http/cookiejar", "\x04j\x03\x96\x01\x01\b\f\x16\x03\x02\x0e\x04"}, + {"net/http/fcgi", "\x02\x01\nZ\a\x03\x80\x01\x16\x01\x01\x14\x18\x02\x0e"}, + {"net/http/httptest", "\x02\x01\nF\x02\x1b\x01\x80\x01\x04\x12\x01\n\t\x02\x17\x01\x02\x0e\x0e"}, + {"net/http/httptrace", "\rFnF\x14\n "}, + {"net/http/httputil", "\x02\x01\na\x03\x80\x01\x04\x0f\x03\x01\x05\x02\x01\v\x01\x19\x02\x0e\x0e"}, + {"net/http/internal", "\x02\x01k\x03\x80\x01"}, + {"net/http/internal/ascii", "\xb5\x02\x12"}, + {"net/http/internal/httpcommon", "\ra\x03\x9c\x01\x0e\x01\x17\x01\x01\x02\x1c\x02"}, + {"net/http/internal/testcert", "\xb5\x02"}, + {"net/http/pprof", "\x02\x01\nd\x18-\x11*\x04\x13\x14\x01\r\x04\x03\x01\x02\x01\x10"}, {"net/internal/cgotest", ""}, - {"net/internal/socktest", "p\xc2\x01\x02"}, - {"net/mail", "\x02k\x03z\x04\x0f\x03\x14\x1c\x02\r\x04"}, - {"net/netip", "\x04i+\x01#;\x026\x15"}, - {"net/rpc", "\x02f\x05\x03\x10\n`\x04\x12\x01\x1d\x0f\x03\x02"}, - {"net/rpc/jsonrpc", "j\x03\x03z\x16\x11!"}, - {"net/smtp", "\x19.\v\x13\b\x03z\x16\x14\x1c"}, - {"net/textproto", "\x02\x01j\x03z\r\t/\x01\x02\x13"}, - {"net/url", "m\x03\x86\x01%\x12\x02\x01\x15"}, - {"os", "m+\x01\x18\x03\b\t\r\x03\x01\x04\x10\x018\n\x05\x01\x01\v\x06"}, - {"os/exec", "\x03\n`H \x01\x14\x01+\x06\a\f\x01\x04\v"}, - {"os/exec/internal/fdtest", "\xb4\x02"}, - {"os/signal", "\r\x89\x02\x17\x05\x02"}, - {"os/user", "\x02\x01j\x03z,\r\f\x01\x02"}, - {"path", "m+\xab\x01"}, - {"path/filepath", "m+\x19:+\r\n\x03\x04\x0f"}, - {"plugin", "m"}, - {"reflect", "m'\x04\x1c\b\f\x04\x02\x19\x10,\f\x03\x0f\x02\x02"}, + {"net/internal/socktest", "q\xc6\x01\x02"}, + {"net/mail", "\x02l\x03\x80\x01\x04\x0f\x03\x14\x1a\x02\x0e\x04"}, + {"net/netip", "\x04j*\x01$@\x034\x16"}, + {"net/rpc", "\x02g\x05\x03\x0f\ng\x04\x12\x01\x1d\r\x03\x02"}, + {"net/rpc/jsonrpc", "k\x03\x03\x80\x01\x16\x11\x1f"}, + {"net/smtp", "\x19/\v\x13\b\x03\x80\x01\x16\x14\x1a"}, + {"net/textproto", "\x02\x01k\x03\x80\x01\f\n-\x01\x02\x14"}, + {"net/url", "n\x03\x8b\x01&\x10\x02\x01\x16"}, + {"os", "n*\x01\x19\x03\b\t\x12\x03\x01\x05\x10\x018\b\x05\x01\x01\f\x06"}, + {"os/exec", "\x03\naH%\x01\x15\x01+\x06\a\n\x01\x04\f"}, + {"os/exec/internal/fdtest", "\xb9\x02"}, + {"os/signal", "\r\x90\x02\x15\x05\x02"}, + {"os/user", "\x02\x01k\x03\x80\x01,\r\n\x01\x02"}, + {"path", "n*\xb1\x01"}, + {"path/filepath", "n*\x1a@+\r\b\x03\x04\x10"}, + {"plugin", "n"}, + {"reflect", "n&\x04\x1d\b\f\x06\x04\x1b\x06\t-\n\x03\x10\x02\x02"}, {"reflect/internal/example1", ""}, {"reflect/internal/example2", ""}, - {"regexp", "\x03\xe7\x018\v\x02\x01\x02\x0f\x02"}, - {"regexp/syntax", "\xad\x02\x01\x01\x01\x11\x02"}, - {"runtime", "\x94\x01\x04\x01\x02\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x03\x0fd"}, - {"runtime/coverage", "\x9f\x01K"}, - {"runtime/debug", "pUQ\r\n\x02\x01\x0f\x06"}, - {"runtime/internal/startlinetest", ""}, - {"runtime/internal/wasitest", ""}, - {"runtime/metrics", "\xb6\x01A,\""}, - {"runtime/pprof", "\x02\x01\x01\x03\x06Y\a\x03$3#\r\x1f\r\n\x01\x01\x01\x02\x02\b\x03\x06"}, - {"runtime/race", "\xab\x02"}, + {"regexp", "\x03\xee\x018\t\x02\x01\x02\x10\x02"}, + {"regexp/syntax", "\xb2\x02\x01\x01\x01\x02\x10\x02"}, + {"runtime", "\x94\x01\x04\x01\x03\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x02\x01\x01\x04\x10c"}, + {"runtime/coverage", "\xa0\x01Q"}, + {"runtime/debug", "qUW\r\b\x02\x01\x10\x06"}, + {"runtime/metrics", "\xb7\x01F-!"}, + {"runtime/pprof", "\x02\x01\x01\x03\x06Z\a\x03#4)\f \r\b\x01\x01\x01\x02\x02\t\x03\x06"}, + {"runtime/race", "\xb0\x02"}, {"runtime/race/internal/amd64v1", ""}, - {"runtime/trace", "\rcz9\x0f\x01\x12"}, - {"slices", "\x04\xe9\x01\fL"}, - {"sort", "\xc9\x0104"}, - {"strconv", "m+:%\x02J"}, - {"strings", "m'\x04:\x18\x03\f9\x0f\x02\x02"}, + {"runtime/trace", "\ra\x03w\t9\b\x05\x01\r\x06"}, + {"slices", "\x04\xf0\x01\fK"}, + {"sort", "\xca\x0162"}, + {"strconv", "n*@%\x03I"}, + {"strings", "n&\x04@\x19\x03\f7\x10\x02\x02"}, {"structs", ""}, - {"sync", "\xc8\x01\vP\x10\x12"}, - {"sync/atomic", "\xc5\x02"}, - {"syscall", "m(\x03\x01\x1b\b\x03\x03\x06\aT\n\x05\x01\x12"}, - {"testing", "\x03\n`\x02\x01X\x0f\x13\r\x04\x1b\x06\x02\x05\x02\a\x01\x02\x01\x02\x01\f\x02\x02\x02"}, - {"testing/fstest", "m\x03z\x01\v%\x12\x03\b\a"}, - {"testing/internal/testdeps", "\x02\v\xa6\x01'\x10,\x03\x05\x03\b\a\x02\r"}, - {"testing/iotest", "\x03j\x03z\x04"}, - {"testing/quick", "o\x01\x87\x01\x04#\x12\x0f"}, - {"testing/slogtest", "\r`\x03\x80\x01.\x05\x12\n"}, - {"text/scanner", "\x03mz,+\x02"}, - {"text/tabwriter", "pzY"}, - {"text/template", "m\x03B8\x01\v\x1f\x01\x05\x01\x02\x05\r\x02\f\x03\x02"}, - {"text/template/parse", "\x03m\xb3\x01\f\x01\x11\x02"}, - {"time", "m+\x1d\x1d'*\x0f\x02\x11"}, - {"time/tzdata", "m\xc7\x01\x11"}, + {"sync", "\xc9\x01\x10\x01P\x0e\x13"}, + {"sync/atomic", "\xcb\x02"}, + {"syscall", "n'\x03\x01\x1c\b\x03\x03\x06\vV\b\x05\x01\x13"}, + {"testing", "\x03\na\x02\x01X\x14\x14\f\x05\x1b\x06\x02\x05\x02\x05\x01\x02\x01\x02\x01\r\x02\x02\x02"}, + {"testing/fstest", "n\x03\x80\x01\x01\n&\x10\x03\b\b"}, + {"testing/internal/testdeps", "\x02\v\xa7\x01-\x10,\x03\x05\x03\x06\a\x02\x0e"}, + {"testing/iotest", "\x03k\x03\x80\x01\x04"}, + {"testing/quick", "p\x01\x8c\x01\x05#\x10\x10"}, + {"testing/slogtest", "\ra\x03\x86\x01.\x05\x10\v"}, + {"testing/synctest", "\xda\x01`\x11"}, + {"text/scanner", "\x03n\x80\x01,*\x02"}, + {"text/tabwriter", "q\x80\x01X"}, + {"text/template", "n\x03B>\x01\n \x01\x05\x01\x02\x05\v\x02\r\x03\x02"}, + {"text/template/parse", "\x03n\xb9\x01\n\x01\x12\x02"}, + {"time", "n*\x1e\"(*\r\x02\x12"}, + {"time/tzdata", "n\xcb\x01\x12"}, {"unicode", ""}, {"unicode/utf16", ""}, {"unicode/utf8", ""}, - {"unique", "\x94\x01>\x01P\x0f\x13\x12"}, + {"unique", "\x94\x01!#\x01Q\r\x01\x13\x12"}, {"unsafe", ""}, - {"vendor/golang.org/x/crypto/chacha20", "\x10V\a\x8c\x01*'"}, - {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10V\a\xd9\x01\x04\x01\a"}, - {"vendor/golang.org/x/crypto/cryptobyte", "c\n\x03\x88\x01&!\n"}, + {"vendor/golang.org/x/crypto/chacha20", "\x10W\a\x92\x01*&"}, + {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10W\a\xde\x01\x04\x01\a"}, + {"vendor/golang.org/x/crypto/cryptobyte", "d\n\x03\x8d\x01' \n"}, {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""}, - {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"}, - {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x15\x93\x01"}, - {"vendor/golang.org/x/net/dns/dnsmessage", "m"}, - {"vendor/golang.org/x/net/http/httpguts", "\x80\x02\x14\x1c\x13\r"}, - {"vendor/golang.org/x/net/http/httpproxy", "m\x03\x90\x01\x15\x01\x1a\x13\r"}, - {"vendor/golang.org/x/net/http2/hpack", "\x03j\x03zH"}, - {"vendor/golang.org/x/net/idna", "p\x87\x019\x13\x10\x02\x01"}, - {"vendor/golang.org/x/net/nettest", "\x03c\a\x03z\x11\x05\x16\x01\f\f\x01\x02\x02\x01\n"}, - {"vendor/golang.org/x/sys/cpu", "\x96\x02\r\f\x01\x15"}, - {"vendor/golang.org/x/text/secure/bidirule", "m\xd6\x01\x11\x01"}, - {"vendor/golang.org/x/text/transform", "\x03j}Y"}, - {"vendor/golang.org/x/text/unicode/bidi", "\x03\be~@\x15"}, - {"vendor/golang.org/x/text/unicode/norm", "f\nzH\x11\x11"}, - {"weak", "\x94\x01\x8f\x01\""}, + {"vendor/golang.org/x/crypto/internal/alias", "\xcb\x02"}, + {"vendor/golang.org/x/crypto/internal/poly1305", "R\x15\x99\x01"}, + {"vendor/golang.org/x/net/dns/dnsmessage", "n"}, + {"vendor/golang.org/x/net/http/httpguts", "\x87\x02\x14\x1a\x14\r"}, + {"vendor/golang.org/x/net/http/httpproxy", "n\x03\x96\x01\x10\x05\x01\x18\x14\r"}, + {"vendor/golang.org/x/net/http2/hpack", "\x03k\x03\x80\x01F"}, + {"vendor/golang.org/x/net/idna", "q\x8c\x018\x14\x10\x02\x01"}, + {"vendor/golang.org/x/net/nettest", "\x03d\a\x03\x80\x01\x11\x05\x16\x01\f\n\x01\x02\x02\x01\v"}, + {"vendor/golang.org/x/sys/cpu", "\x9d\x02\r\n\x01\x16"}, + {"vendor/golang.org/x/text/secure/bidirule", "n\xdb\x01\x11\x01"}, + {"vendor/golang.org/x/text/transform", "\x03k\x83\x01X"}, + {"vendor/golang.org/x/text/unicode/bidi", "\x03\bf\x84\x01>\x16"}, + {"vendor/golang.org/x/text/unicode/norm", "g\n\x80\x01F\x12\x11"}, + {"weak", "\x94\x01\x96\x01!"}, } diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index 64f0326b64..c1faa50d36 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -502,6 +502,7 @@ var PackageSymbols = map[string][]Symbol{ {"MD4", Const, 0, ""}, {"MD5", Const, 0, ""}, {"MD5SHA1", Const, 0, ""}, + {"MessageSigner", Type, 25, ""}, {"PrivateKey", Type, 0, ""}, {"PublicKey", Type, 2, ""}, {"RIPEMD160", Const, 0, ""}, @@ -517,6 +518,7 @@ var PackageSymbols = map[string][]Symbol{ {"SHA512", Const, 0, ""}, {"SHA512_224", Const, 5, ""}, {"SHA512_256", Const, 5, ""}, + {"SignMessage", Func, 25, "func(signer Signer, rand io.Reader, msg []byte, opts SignerOpts) (signature []byte, err error)"}, {"Signer", Type, 4, ""}, {"SignerOpts", Type, 4, ""}, }, @@ -600,10 +602,12 @@ var PackageSymbols = map[string][]Symbol{ {"X25519", Func, 20, "func() Curve"}, }, "crypto/ecdsa": { + {"(*PrivateKey).Bytes", Method, 25, ""}, {"(*PrivateKey).ECDH", Method, 20, ""}, {"(*PrivateKey).Equal", Method, 15, ""}, {"(*PrivateKey).Public", Method, 4, ""}, {"(*PrivateKey).Sign", Method, 4, ""}, + {"(*PublicKey).Bytes", Method, 25, ""}, {"(*PublicKey).ECDH", Method, 20, ""}, {"(*PublicKey).Equal", Method, 15, ""}, {"(PrivateKey).Add", Method, 0, ""}, @@ -619,6 +623,8 @@ var PackageSymbols = map[string][]Symbol{ {"(PublicKey).ScalarBaseMult", Method, 0, ""}, {"(PublicKey).ScalarMult", Method, 0, ""}, {"GenerateKey", Func, 0, "func(c elliptic.Curve, rand io.Reader) (*PrivateKey, error)"}, + {"ParseRawPrivateKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PrivateKey, error)"}, + {"ParseUncompressedPublicKey", Func, 25, "func(curve elliptic.Curve, data []byte) (*PublicKey, error)"}, {"PrivateKey", Type, 0, ""}, {"PrivateKey.D", Field, 0, ""}, {"PrivateKey.PublicKey", Field, 0, ""}, @@ -815,6 +821,7 @@ var PackageSymbols = map[string][]Symbol{ "crypto/sha3": { {"(*SHA3).AppendBinary", Method, 24, ""}, {"(*SHA3).BlockSize", Method, 24, ""}, + {"(*SHA3).Clone", Method, 25, ""}, {"(*SHA3).MarshalBinary", Method, 24, ""}, {"(*SHA3).Reset", Method, 24, ""}, {"(*SHA3).Size", Method, 24, ""}, @@ -967,6 +974,7 @@ var PackageSymbols = map[string][]Symbol{ {"Config.GetCertificate", Field, 4, ""}, {"Config.GetClientCertificate", Field, 8, ""}, {"Config.GetConfigForClient", Field, 8, ""}, + {"Config.GetEncryptedClientHelloKeys", Field, 25, ""}, {"Config.InsecureSkipVerify", Field, 0, ""}, {"Config.KeyLogWriter", Field, 8, ""}, {"Config.MaxVersion", Field, 2, ""}, @@ -5463,6 +5471,7 @@ var PackageSymbols = map[string][]Symbol{ {"ParenExpr.X", Field, 0, ""}, {"Pkg", Const, 0, ""}, {"Preorder", Func, 23, "func(root Node) iter.Seq[Node]"}, + {"PreorderStack", Func, 25, "func(root Node, stack []Node, f func(n Node, stack []Node) bool)"}, {"Print", Func, 0, "func(fset *token.FileSet, x any) error"}, {"RECV", Const, 0, ""}, {"RangeStmt", Type, 0, ""}, @@ -5933,6 +5942,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).SetLines", Method, 0, ""}, {"(*File).SetLinesForContent", Method, 0, ""}, {"(*File).Size", Method, 0, ""}, + {"(*FileSet).AddExistingFiles", Method, 25, ""}, {"(*FileSet).AddFile", Method, 0, ""}, {"(*FileSet).Base", Method, 0, ""}, {"(*FileSet).File", Method, 0, ""}, @@ -6382,7 +6392,7 @@ var PackageSymbols = map[string][]Symbol{ {"Label", Type, 5, ""}, {"LocalVar", Const, 25, ""}, {"LookupFieldOrMethod", Func, 5, "func(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool)"}, - {"LookupSelection", Func, 25, ""}, + {"LookupSelection", Func, 25, "func(T Type, addressable bool, pkg *Package, name string) (Selection, bool)"}, {"Map", Type, 5, ""}, {"MethodExpr", Const, 5, ""}, {"MethodSet", Type, 5, ""}, @@ -6490,9 +6500,11 @@ var PackageSymbols = map[string][]Symbol{ {"Lang", Func, 22, "func(x string) string"}, }, "hash": { + {"Cloner", Type, 25, ""}, {"Hash", Type, 0, ""}, {"Hash32", Type, 0, ""}, {"Hash64", Type, 0, ""}, + {"XOF", Type, 25, ""}, }, "hash/adler32": { {"Checksum", Func, 0, "func(data []byte) uint32"}, @@ -6533,6 +6545,7 @@ var PackageSymbols = map[string][]Symbol{ }, "hash/maphash": { {"(*Hash).BlockSize", Method, 14, ""}, + {"(*Hash).Clone", Method, 25, ""}, {"(*Hash).Reset", Method, 14, ""}, {"(*Hash).Seed", Method, 14, ""}, {"(*Hash).SetSeed", Method, 14, ""}, @@ -7133,7 +7146,7 @@ var PackageSymbols = map[string][]Symbol{ {"FormatFileInfo", Func, 21, "func(info FileInfo) string"}, {"Glob", Func, 16, "func(fsys FS, pattern string) (matches []string, err error)"}, {"GlobFS", Type, 16, ""}, - {"Lstat", Func, 25, ""}, + {"Lstat", Func, 25, "func(fsys FS, name string) (FileInfo, error)"}, {"ModeAppend", Const, 16, ""}, {"ModeCharDevice", Const, 16, ""}, {"ModeDevice", Const, 16, ""}, @@ -7158,7 +7171,7 @@ var PackageSymbols = map[string][]Symbol{ {"ReadDirFile", Type, 16, ""}, {"ReadFile", Func, 16, "func(fsys FS, name string) ([]byte, error)"}, {"ReadFileFS", Type, 16, ""}, - {"ReadLink", Func, 25, ""}, + {"ReadLink", Func, 25, "func(fsys FS, name string) (string, error)"}, {"ReadLinkFS", Type, 25, ""}, {"SkipAll", Var, 20, ""}, {"SkipDir", Var, 16, ""}, @@ -7275,6 +7288,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Record).Attrs", Method, 21, ""}, {"(Record).Clone", Method, 21, ""}, {"(Record).NumAttrs", Method, 21, ""}, + {"(Record).Source", Method, 25, ""}, {"(Value).Any", Method, 21, ""}, {"(Value).Bool", Method, 21, ""}, {"(Value).Duration", Method, 21, ""}, @@ -7306,6 +7320,7 @@ var PackageSymbols = map[string][]Symbol{ {"Float64", Func, 21, "func(key string, v float64) Attr"}, {"Float64Value", Func, 21, "func(v float64) Value"}, {"Group", Func, 21, "func(key string, args ...any) Attr"}, + {"GroupAttrs", Func, 25, "func(key string, attrs ...Attr) Attr"}, {"GroupValue", Func, 21, "func(as ...Attr) Value"}, {"Handler", Type, 21, ""}, {"HandlerOptions", Type, 21, ""}, @@ -7916,7 +7931,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Writer).WriteField", Method, 0, ""}, {"ErrMessageTooLarge", Var, 9, ""}, {"File", Type, 0, ""}, - {"FileContentDisposition", Func, 25, ""}, + {"FileContentDisposition", Func, 25, "func(fieldname string, filename string) string"}, {"FileHeader", Type, 0, ""}, {"FileHeader.Filename", Field, 0, ""}, {"FileHeader.Header", Field, 0, ""}, @@ -8294,6 +8309,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*Client).PostForm", Method, 0, ""}, {"(*Cookie).String", Method, 0, ""}, {"(*Cookie).Valid", Method, 18, ""}, + {"(*CrossOriginProtection).AddInsecureBypassPattern", Method, 25, ""}, + {"(*CrossOriginProtection).AddTrustedOrigin", Method, 25, ""}, + {"(*CrossOriginProtection).Check", Method, 25, ""}, + {"(*CrossOriginProtection).Handler", Method, 25, ""}, + {"(*CrossOriginProtection).SetDenyHandler", Method, 25, ""}, {"(*MaxBytesError).Error", Method, 19, ""}, {"(*ProtocolError).Error", Method, 0, ""}, {"(*ProtocolError).Is", Method, 21, ""}, @@ -8388,6 +8408,7 @@ var PackageSymbols = map[string][]Symbol{ {"Cookie.Unparsed", Field, 0, ""}, {"Cookie.Value", Field, 0, ""}, {"CookieJar", Type, 0, ""}, + {"CrossOriginProtection", Type, 25, ""}, {"DefaultClient", Var, 0, ""}, {"DefaultMaxHeaderBytes", Const, 0, ""}, {"DefaultMaxIdleConnsPerHost", Const, 0, ""}, @@ -8460,6 +8481,7 @@ var PackageSymbols = map[string][]Symbol{ {"MethodPost", Const, 6, ""}, {"MethodPut", Const, 6, ""}, {"MethodTrace", Const, 6, ""}, + {"NewCrossOriginProtection", Func, 25, "func() *CrossOriginProtection"}, {"NewFileTransport", Func, 0, "func(fs FileSystem) RoundTripper"}, {"NewFileTransportFS", Func, 22, "func(fsys fs.FS) RoundTripper"}, {"NewRequest", Func, 0, "func(method string, url string, body io.Reader) (*Request, error)"}, @@ -9174,15 +9196,19 @@ var PackageSymbols = map[string][]Symbol{ {"(*Root).Link", Method, 25, ""}, {"(*Root).Lstat", Method, 24, ""}, {"(*Root).Mkdir", Method, 24, ""}, + {"(*Root).MkdirAll", Method, 25, ""}, {"(*Root).Name", Method, 24, ""}, {"(*Root).Open", Method, 24, ""}, {"(*Root).OpenFile", Method, 24, ""}, {"(*Root).OpenRoot", Method, 24, ""}, + {"(*Root).ReadFile", Method, 25, ""}, {"(*Root).Readlink", Method, 25, ""}, {"(*Root).Remove", Method, 24, ""}, + {"(*Root).RemoveAll", Method, 25, ""}, {"(*Root).Rename", Method, 25, ""}, {"(*Root).Stat", Method, 24, ""}, {"(*Root).Symlink", Method, 25, ""}, + {"(*Root).WriteFile", Method, 25, ""}, {"(*SyscallError).Error", Method, 0, ""}, {"(*SyscallError).Timeout", Method, 10, ""}, {"(*SyscallError).Unwrap", Method, 13, ""}, @@ -9623,6 +9649,7 @@ var PackageSymbols = map[string][]Symbol{ {"StructTag", Type, 0, ""}, {"Swapper", Func, 8, "func(slice any) func(i int, j int)"}, {"Type", Type, 0, ""}, + {"TypeAssert", Func, 25, "func[T any](v Value) (T, bool)"}, {"TypeFor", Func, 22, "func[T any]() Type"}, {"TypeOf", Func, 0, "func(i any) Type"}, {"Uint", Const, 0, ""}, @@ -9909,6 +9936,7 @@ var PackageSymbols = map[string][]Symbol{ {"SetBlockProfileRate", Func, 1, "func(rate int)"}, {"SetCPUProfileRate", Func, 0, "func(hz int)"}, {"SetCgoTraceback", Func, 7, "func(version int, traceback unsafe.Pointer, context unsafe.Pointer, symbolizer unsafe.Pointer)"}, + {"SetDefaultGOMAXPROCS", Func, 25, "func()"}, {"SetFinalizer", Func, 0, "func(obj any, finalizer any)"}, {"SetMutexProfileFraction", Func, 8, "func(rate int) int"}, {"Stack", Func, 0, "func(buf []byte, all bool) int"}, @@ -10021,11 +10049,20 @@ var PackageSymbols = map[string][]Symbol{ {"WriteHeapProfile", Func, 0, "func(w io.Writer) error"}, }, "runtime/trace": { + {"(*FlightRecorder).Enabled", Method, 25, ""}, + {"(*FlightRecorder).Start", Method, 25, ""}, + {"(*FlightRecorder).Stop", Method, 25, ""}, + {"(*FlightRecorder).WriteTo", Method, 25, ""}, {"(*Region).End", Method, 11, ""}, {"(*Task).End", Method, 11, ""}, + {"FlightRecorder", Type, 25, ""}, + {"FlightRecorderConfig", Type, 25, ""}, + {"FlightRecorderConfig.MaxBytes", Field, 25, ""}, + {"FlightRecorderConfig.MinAge", Field, 25, ""}, {"IsEnabled", Func, 11, "func() bool"}, {"Log", Func, 11, "func(ctx context.Context, category string, message string)"}, {"Logf", Func, 11, "func(ctx context.Context, category string, format string, args ...any)"}, + {"NewFlightRecorder", Func, 25, "func(cfg FlightRecorderConfig) *FlightRecorder"}, {"NewTask", Func, 11, "func(pctx context.Context, taskType string) (ctx context.Context, task *Task)"}, {"Region", Type, 11, ""}, {"Start", Func, 5, "func(w io.Writer) error"}, @@ -16642,6 +16679,7 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0, ""}, }, "testing": { + {"(*B).Attr", Method, 25, ""}, {"(*B).Chdir", Method, 24, ""}, {"(*B).Cleanup", Method, 14, ""}, {"(*B).Context", Method, 24, ""}, @@ -16658,6 +16696,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).Logf", Method, 0, ""}, {"(*B).Loop", Method, 24, ""}, {"(*B).Name", Method, 8, ""}, + {"(*B).Output", Method, 25, ""}, {"(*B).ReportAllocs", Method, 1, ""}, {"(*B).ReportMetric", Method, 13, ""}, {"(*B).ResetTimer", Method, 0, ""}, @@ -16674,6 +16713,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0, ""}, {"(*B).TempDir", Method, 15, ""}, {"(*F).Add", Method, 18, ""}, + {"(*F).Attr", Method, 25, ""}, {"(*F).Chdir", Method, 24, ""}, {"(*F).Cleanup", Method, 18, ""}, {"(*F).Context", Method, 24, ""}, @@ -16689,6 +16729,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).Log", Method, 18, ""}, {"(*F).Logf", Method, 18, ""}, {"(*F).Name", Method, 18, ""}, + {"(*F).Output", Method, 25, ""}, {"(*F).Setenv", Method, 18, ""}, {"(*F).Skip", Method, 18, ""}, {"(*F).SkipNow", Method, 18, ""}, @@ -16697,6 +16738,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18, ""}, {"(*M).Run", Method, 4, ""}, {"(*PB).Next", Method, 3, ""}, + {"(*T).Attr", Method, 25, ""}, {"(*T).Chdir", Method, 24, ""}, {"(*T).Cleanup", Method, 14, ""}, {"(*T).Context", Method, 24, ""}, @@ -16712,6 +16754,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*T).Log", Method, 0, ""}, {"(*T).Logf", Method, 0, ""}, {"(*T).Name", Method, 8, ""}, + {"(*T).Output", Method, 25, ""}, {"(*T).Parallel", Method, 0, ""}, {"(*T).Run", Method, 7, ""}, {"(*T).Setenv", Method, 17, ""}, @@ -16834,6 +16877,10 @@ var PackageSymbols = map[string][]Symbol{ {"Run", Func, 22, "func(t *testing.T, newHandler func(*testing.T) slog.Handler, result func(*testing.T) map[string]any)"}, {"TestHandler", Func, 21, "func(h slog.Handler, results func() []map[string]any) error"}, }, + "testing/synctest": { + {"Test", Func, 25, "func(t *testing.T, f func(*testing.T))"}, + {"Wait", Func, 25, "func()"}, + }, "text/scanner": { {"(*Position).IsValid", Method, 0, ""}, {"(*Scanner).Init", Method, 0, ""}, @@ -17347,6 +17394,7 @@ var PackageSymbols = map[string][]Symbol{ {"CaseRange.Lo", Field, 0, ""}, {"CaseRanges", Var, 0, ""}, {"Categories", Var, 0, ""}, + {"CategoryAliases", Var, 25, ""}, {"Caucasian_Albanian", Var, 4, ""}, {"Cc", Var, 0, ""}, {"Cf", Var, 0, ""}, @@ -17354,6 +17402,7 @@ var PackageSymbols = map[string][]Symbol{ {"Cham", Var, 0, ""}, {"Cherokee", Var, 0, ""}, {"Chorasmian", Var, 16, ""}, + {"Cn", Var, 25, ""}, {"Co", Var, 0, ""}, {"Common", Var, 0, ""}, {"Coptic", Var, 0, ""}, @@ -17432,6 +17481,7 @@ var PackageSymbols = map[string][]Symbol{ {"Khojki", Var, 4, ""}, {"Khudawadi", Var, 4, ""}, {"L", Var, 0, ""}, + {"LC", Var, 25, ""}, {"Lao", Var, 0, ""}, {"Latin", Var, 0, ""}, {"Lepcha", Var, 0, ""}, diff --git a/vendor/golang.org/x/tools/internal/typesinternal/fx.go b/vendor/golang.org/x/tools/internal/typesinternal/fx.go new file mode 100644 index 0000000000..93acff2170 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/fx.go @@ -0,0 +1,49 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/token" + "go/types" +) + +// NoEffects reports whether the expression has no side effects, i.e., it +// does not modify the memory state. This function is conservative: it may +// return false even when the expression has no effect. +func NoEffects(info *types.Info, expr ast.Expr) bool { + noEffects := true + ast.Inspect(expr, func(n ast.Node) bool { + switch v := n.(type) { + case nil, *ast.Ident, *ast.BasicLit, *ast.BinaryExpr, *ast.ParenExpr, + *ast.SelectorExpr, *ast.IndexExpr, *ast.SliceExpr, *ast.TypeAssertExpr, + *ast.StarExpr, *ast.CompositeLit, *ast.ArrayType, *ast.StructType, + *ast.MapType, *ast.InterfaceType, *ast.KeyValueExpr: + // No effect + case *ast.UnaryExpr: + // Channel send <-ch has effects + if v.Op == token.ARROW { + noEffects = false + } + case *ast.CallExpr: + // Type conversion has no effects + if !info.Types[v.Fun].IsType() { + // TODO(adonovan): Add a case for built-in functions without side + // effects (by using callsPureBuiltin from tools/internal/refactor/inline) + + noEffects = false + } + case *ast.FuncLit: + // A FuncLit has no effects, but do not descend into it. + return false + default: + // All other expressions have effects + noEffects = false + } + + return noEffects + }) + return noEffects +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go new file mode 100644 index 0000000000..f2affec4fb --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/isnamed.go @@ -0,0 +1,71 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/types" + "slices" +) + +// IsTypeNamed reports whether t is (or is an alias for) a +// package-level defined type with the given package path and one of +// the given names. It returns false if t is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsTypeNamed(t types.Type, pkgPath string, names ...string) bool { + if named, ok := types.Unalias(t).(*types.Named); ok { + tname := named.Obj() + return tname != nil && + IsPackageLevel(tname) && + tname.Pkg().Path() == pkgPath && + slices.Contains(names, tname.Name()) + } + return false +} + +// IsPointerToNamed reports whether t is (or is an alias for) a pointer to a +// package-level defined type with the given package path and one of the given +// names. It returns false if t is not a pointer type. +func IsPointerToNamed(t types.Type, pkgPath string, names ...string) bool { + r := Unpointer(t) + if r == t { + return false + } + return IsTypeNamed(r, pkgPath, names...) +} + +// IsFunctionNamed reports whether obj is a package-level function +// defined in the given package and has one of the given names. +// It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.Name", +// which is important for the performance of syntax matching. +func IsFunctionNamed(obj types.Object, pkgPath string, names ...string) bool { + f, ok := obj.(*types.Func) + return ok && + IsPackageLevel(obj) && + f.Pkg().Path() == pkgPath && + f.Type().(*types.Signature).Recv() == nil && + slices.Contains(names, f.Name()) +} + +// IsMethodNamed reports whether obj is a method defined on a +// package-level type with the given package and type name, and has +// one of the given names. It returns false if obj is nil. +// +// This function avoids allocating the concatenation of "pkg.TypeName.Name", +// which is important for the performance of syntax matching. +func IsMethodNamed(obj types.Object, pkgPath string, typeName string, names ...string) bool { + if fn, ok := obj.(*types.Func); ok { + if recv := fn.Type().(*types.Signature).Recv(); recv != nil { + _, T := ReceiverNamed(recv) + return T != nil && + IsTypeNamed(T, pkgPath, typeName) && + slices.Contains(names, fn.Name()) + } + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go index b64f714eb3..64f47919f0 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -15,6 +15,14 @@ import ( // file. // If the same package is imported multiple times, the last appearance is // recorded. +// +// TODO(adonovan): this function ignores the effect of shadowing. It +// should accept a [token.Pos] and a [types.Info] and compute only the +// set of imports that are not shadowed at that point, analogous to +// [analysisinternal.AddImport]. It could also compute (as a side +// effect) the set of additional imports required to ensure that there +// is an accessible import for each necessary package, making it +// converge even more closely with AddImport. func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier { // Construct mapping of import paths to their defined names. // It is only necessary to look at renaming imports. diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index a5cd7e8dbf..fef74a7856 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -2,8 +2,20 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typesinternal provides access to internal go/types APIs that are not -// yet exported. +// Package typesinternal provides helpful operators for dealing with +// go/types: +// +// - operators for querying typed syntax trees (e.g. [Imports], [IsFunctionNamed]); +// - functions for converting types to strings or syntax (e.g. [TypeExpr], FileQualifier]); +// - helpers for working with the [go/types] API (e.g. [NewTypesInfo]); +// - access to internal go/types APIs that are not yet +// exported (e.g. [SetUsesCgo], [ErrorCodeStartEnd], [VarKind]); and +// - common algorithms related to types (e.g. [TooNewStdSymbols]). +// +// See also: +// - [golang.org/x/tools/internal/astutil], for operations on untyped syntax; +// - [golang.org/x/tools/internal/analysisinernal], for helpers for analyzers; +// - [golang.org/x/tools/internal/refactor], for operators to compute text edits. package typesinternal import ( @@ -13,6 +25,7 @@ import ( "reflect" "unsafe" + "golang.org/x/tools/go/ast/inspector" "golang.org/x/tools/internal/aliases" ) @@ -60,6 +73,9 @@ func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, o // which is often excessive.) // // If pkg is nil, it is equivalent to [*types.Package.Name]. +// +// TODO(adonovan): all uses of this with TypeString should be +// eliminated when https://go.dev/issues/75604 is resolved. func NameRelativeTo(pkg *types.Package) types.Qualifier { return func(other *types.Package) string { if pkg != nil && pkg == other { @@ -153,3 +169,31 @@ func NewTypesInfo() *types.Info { FileVersions: map[*ast.File]string{}, } } + +// EnclosingScope returns the innermost block logically enclosing the cursor. +func EnclosingScope(info *types.Info, cur inspector.Cursor) *types.Scope { + for cur := range cur.Enclosing() { + n := cur.Node() + // A function's Scope is associated with its FuncType. + switch f := n.(type) { + case *ast.FuncDecl: + n = f.Type + case *ast.FuncLit: + n = f.Type + } + if b := info.Scopes[n]; b != nil { + return b + } + } + panic("no Scope for *ast.File") +} + +// Imports reports whether path is imported by pkg. +func Imports(pkg *types.Package, path string) bool { + for _, imp := range pkg.Imports() { + if imp.Path() == path { + return true + } + } + return false +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index d272949c17..453bba2ad5 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -204,23 +204,12 @@ func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { } } -// IsZeroExpr uses simple syntactic heuristics to report whether expr -// is a obvious zero value, such as 0, "", nil, or false. -// It cannot do better without type information. -func IsZeroExpr(expr ast.Expr) bool { - switch e := expr.(type) { - case *ast.BasicLit: - return e.Value == "0" || e.Value == `""` - case *ast.Ident: - return e.Name == "nil" || e.Name == "false" - default: - return false - } -} - // TypeExpr returns syntax for the specified type. References to named types // are qualified by an appropriate (optional) qualifier function. // It may panic for types such as Tuple or Union. +// +// See also https://go.dev/issues/75604, which will provide a robust +// Type-to-valid-Go-syntax formatter. func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { switch t := t.(type) { case *types.Basic: diff --git a/vendor/modules.txt b/vendor/modules.txt index 5adf43b5f2..c27eca9dc0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -605,17 +605,17 @@ go.opentelemetry.io/otel/trace/noop go.uber.org/mock/gomock go.uber.org/mock/mockgen go.uber.org/mock/mockgen/model -# golang.org/x/crypto v0.41.0 -## explicit; go 1.23.0 +# golang.org/x/crypto v0.45.0 +## explicit; go 1.24.0 golang.org/x/crypto/curve25519 -# golang.org/x/mod v0.27.0 -## explicit; go 1.23.0 +# golang.org/x/mod v0.29.0 +## explicit; go 1.24.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/modfile golang.org/x/mod/module golang.org/x/mod/semver -# golang.org/x/net v0.43.0 -## explicit; go 1.23.0 +# golang.org/x/net v0.47.0 +## explicit; go 1.24.0 golang.org/x/net/bpf golang.org/x/net/http/httpguts golang.org/x/net/http2 @@ -625,8 +625,8 @@ golang.org/x/net/internal/httpcommon golang.org/x/net/internal/timeseries golang.org/x/net/netutil golang.org/x/net/trace -# golang.org/x/sync v0.16.0 -## explicit; go 1.23.0 +# golang.org/x/sync v0.18.0 +## explicit; go 1.24.0 golang.org/x/sync/errgroup # golang.org/x/sys v0.39.0 ## explicit; go 1.24.0 @@ -637,15 +637,17 @@ golang.org/x/sys/windows/registry golang.org/x/sys/windows/svc golang.org/x/sys/windows/svc/debug golang.org/x/sys/windows/svc/mgr -# golang.org/x/text v0.28.0 -## explicit; go 1.23.0 +# golang.org/x/text v0.31.0 +## explicit; go 1.24.0 golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/tools v0.36.0 -## explicit; go 1.23.0 +# golang.org/x/tools v0.38.0 +## explicit; go 1.24.0 golang.org/x/tools/go/ast/astutil +golang.org/x/tools/go/ast/edge +golang.org/x/tools/go/ast/inspector golang.org/x/tools/go/gcexportdata golang.org/x/tools/go/packages golang.org/x/tools/go/types/objectpath