+//
+// Examples:
+//
+// math.isFinite(0.0/0.0) // returns false
+// math.isFinite(1.2) // returns true
+func Math(options ...MathOption) cel.EnvOption {
+ m := &mathLib{version: math.MaxUint32}
+ for _, o := range options {
+ m = o(m)
+ }
+ return cel.Lib(m)
}
const (
mathNamespace = "math"
leastMacro = "least"
greatestMacro = "greatest"
- minFunc = "math.@min"
- maxFunc = "math.@max"
+
+ // Min-max functions
+ minFunc = "math.@min"
+ maxFunc = "math.@max"
+
+ // Rounding functions
+ ceilFunc = "math.ceil"
+ floorFunc = "math.floor"
+ roundFunc = "math.round"
+ truncFunc = "math.trunc"
+
+ // Floating point helper functions
+ isInfFunc = "math.isInf"
+ isNanFunc = "math.isNaN"
+ isFiniteFunc = "math.isFinite"
+
+ // Signedness functions
+ absFunc = "math.abs"
+ signFunc = "math.sign"
+
+ // Bitwise functions
+ bitAndFunc = "math.bitAnd"
+ bitOrFunc = "math.bitOr"
+ bitXorFunc = "math.bitXor"
+ bitNotFunc = "math.bitNot"
+ bitShiftLeftFunc = "math.bitShiftLeft"
+ bitShiftRightFunc = "math.bitShiftRight"
)
-type mathLib struct{}
+var (
+ errIntOverflow = types.NewErr("integer overflow")
+)
+
+// MathOption declares a functional operator for configuring math extensions.
+type MathOption func(*mathLib) *mathLib
+
+// MathVersion sets the library version for math extensions.
+func MathVersion(version uint32) MathOption {
+ return func(lib *mathLib) *mathLib {
+ lib.version = version
+ return lib
+ }
+}
+
+type mathLib struct {
+ version uint32
+}
// LibraryName implements the SingletonLibrary interface method.
-func (mathLib) LibraryName() string {
+func (*mathLib) LibraryName() string {
return "cel.lib.ext.math"
}
// CompileOptions implements the Library interface method.
-func (mathLib) CompileOptions() []cel.EnvOption {
- return []cel.EnvOption{
+func (lib *mathLib) CompileOptions() []cel.EnvOption {
+ opts := []cel.EnvOption{
cel.Macros(
// math.least(num, ...)
cel.ReceiverVarArgMacro(leastMacro, mathLeast),
@@ -179,10 +464,95 @@ func (mathLib) CompileOptions() []cel.EnvOption {
cel.UnaryBinding(maxList)),
),
}
+ if lib.version >= 1 {
+ opts = append(opts,
+ // Rounding function declarations
+ cel.Function(ceilFunc,
+ cel.Overload("math_ceil_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(ceil))),
+ cel.Function(floorFunc,
+ cel.Overload("math_floor_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(floor))),
+ cel.Function(roundFunc,
+ cel.Overload("math_round_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(round))),
+ cel.Function(truncFunc,
+ cel.Overload("math_trunc_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(trunc))),
+
+ // Floating point helpers
+ cel.Function(isInfFunc,
+ cel.Overload("math_isInf_double", []*cel.Type{cel.DoubleType}, cel.BoolType,
+ cel.UnaryBinding(isInf))),
+ cel.Function(isNanFunc,
+ cel.Overload("math_isNaN_double", []*cel.Type{cel.DoubleType}, cel.BoolType,
+ cel.UnaryBinding(isNaN))),
+ cel.Function(isFiniteFunc,
+ cel.Overload("math_isFinite_double", []*cel.Type{cel.DoubleType}, cel.BoolType,
+ cel.UnaryBinding(isFinite))),
+
+ // Signedness functions
+ cel.Function(absFunc,
+ cel.Overload("math_abs_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(absDouble)),
+ cel.Overload("math_abs_int", []*cel.Type{cel.IntType}, cel.IntType,
+ cel.UnaryBinding(absInt)),
+ cel.Overload("math_abs_uint", []*cel.Type{cel.UintType}, cel.UintType,
+ cel.UnaryBinding(identity)),
+ ),
+ cel.Function(signFunc,
+ cel.Overload("math_sign_double", []*cel.Type{cel.DoubleType}, cel.DoubleType,
+ cel.UnaryBinding(sign)),
+ cel.Overload("math_sign_int", []*cel.Type{cel.IntType}, cel.IntType,
+ cel.UnaryBinding(sign)),
+ cel.Overload("math_sign_uint", []*cel.Type{cel.UintType}, cel.UintType,
+ cel.UnaryBinding(sign)),
+ ),
+
+ // Bitwise operator declarations
+ cel.Function(bitAndFunc,
+ cel.Overload("math_bitAnd_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(bitAndPairInt)),
+ cel.Overload("math_bitAnd_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType,
+ cel.BinaryBinding(bitAndPairUint)),
+ ),
+ cel.Function(bitOrFunc,
+ cel.Overload("math_bitOr_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(bitOrPairInt)),
+ cel.Overload("math_bitOr_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType,
+ cel.BinaryBinding(bitOrPairUint)),
+ ),
+ cel.Function(bitXorFunc,
+ cel.Overload("math_bitXor_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(bitXorPairInt)),
+ cel.Overload("math_bitXor_uint_uint", []*cel.Type{cel.UintType, cel.UintType}, cel.UintType,
+ cel.BinaryBinding(bitXorPairUint)),
+ ),
+ cel.Function(bitNotFunc,
+ cel.Overload("math_bitNot_int_int", []*cel.Type{cel.IntType}, cel.IntType,
+ cel.UnaryBinding(bitNotInt)),
+ cel.Overload("math_bitNot_uint_uint", []*cel.Type{cel.UintType}, cel.UintType,
+ cel.UnaryBinding(bitNotUint)),
+ ),
+ cel.Function(bitShiftLeftFunc,
+ cel.Overload("math_bitShiftLeft_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(bitShiftLeftIntInt)),
+ cel.Overload("math_bitShiftLeft_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType,
+ cel.BinaryBinding(bitShiftLeftUintInt)),
+ ),
+ cel.Function(bitShiftRightFunc,
+ cel.Overload("math_bitShiftRight_int_int", []*cel.Type{cel.IntType, cel.IntType}, cel.IntType,
+ cel.BinaryBinding(bitShiftRightIntInt)),
+ cel.Overload("math_bitShiftRight_uint_int", []*cel.Type{cel.UintType, cel.IntType}, cel.UintType,
+ cel.BinaryBinding(bitShiftRightUintInt)),
+ ),
+ )
+ }
+ return opts
}
// ProgramOptions implements the Library interface method.
-func (mathLib) ProgramOptions() []cel.ProgramOption {
+func (*mathLib) ProgramOptions() []cel.ProgramOption {
return []cel.ProgramOption{}
}
@@ -194,7 +564,7 @@ func mathLeast(meh cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (ast.
case 0:
return nil, meh.NewError(target.ID(), "math.least() requires at least one argument")
case 1:
- if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
+ if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) {
return meh.NewCall(minFunc, args[0]), nil
}
return nil, meh.NewError(args[0].ID(), "math.least() invalid single argument value")
@@ -221,7 +591,7 @@ func mathGreatest(mef cel.MacroExprFactory, target ast.Expr, args []ast.Expr) (a
case 0:
return nil, mef.NewError(target.ID(), "math.greatest() requires at least one argument")
case 1:
- if isListLiteralWithValidArgs(args[0]) || isValidArgType(args[0]) {
+ if isListLiteralWithNumericArgs(args[0]) || isNumericArgType(args[0]) {
return mef.NewCall(maxFunc, args[0]), nil
}
return nil, mef.NewError(args[0].ID(), "math.greatest() invalid single argument value")
@@ -244,6 +614,165 @@ func identity(val ref.Val) ref.Val {
return val
}
+func ceil(val ref.Val) ref.Val {
+ v := val.(types.Double)
+ return types.Double(math.Ceil(float64(v)))
+}
+
+func floor(val ref.Val) ref.Val {
+ v := val.(types.Double)
+ return types.Double(math.Floor(float64(v)))
+}
+
+func round(val ref.Val) ref.Val {
+ v := val.(types.Double)
+ return types.Double(math.Round(float64(v)))
+}
+
+func trunc(val ref.Val) ref.Val {
+ v := val.(types.Double)
+ return types.Double(math.Trunc(float64(v)))
+}
+
+func isInf(val ref.Val) ref.Val {
+ v := val.(types.Double)
+ return types.Bool(math.IsInf(float64(v), 0))
+}
+
+func isFinite(val ref.Val) ref.Val {
+ v := float64(val.(types.Double))
+ return types.Bool(!math.IsInf(v, 0) && !math.IsNaN(v))
+}
+
+func isNaN(val ref.Val) ref.Val {
+ v := val.(types.Double)
+ return types.Bool(math.IsNaN(float64(v)))
+}
+
+func absDouble(val ref.Val) ref.Val {
+ v := float64(val.(types.Double))
+ return types.Double(math.Abs(v))
+}
+
+func absInt(val ref.Val) ref.Val {
+ v := int64(val.(types.Int))
+ if v == math.MinInt64 {
+ return errIntOverflow
+ }
+ if v >= 0 {
+ return val
+ }
+ return -types.Int(v)
+}
+
+func sign(val ref.Val) ref.Val {
+ switch v := val.(type) {
+ case types.Double:
+ if isNaN(v) == types.True {
+ return v
+ }
+ zero := types.Double(0)
+ if v > zero {
+ return types.Double(1)
+ }
+ if v < zero {
+ return types.Double(-1)
+ }
+ return zero
+ case types.Int:
+ return v.Compare(types.IntZero)
+ case types.Uint:
+ if v == types.Uint(0) {
+ return types.Uint(0)
+ }
+ return types.Uint(1)
+ default:
+ return maybeSuffixError(val, "math.sign")
+ }
+}
+
+func bitAndPairInt(first, second ref.Val) ref.Val {
+ l := first.(types.Int)
+ r := second.(types.Int)
+ return l & r
+}
+
+func bitAndPairUint(first, second ref.Val) ref.Val {
+ l := first.(types.Uint)
+ r := second.(types.Uint)
+ return l & r
+}
+
+func bitOrPairInt(first, second ref.Val) ref.Val {
+ l := first.(types.Int)
+ r := second.(types.Int)
+ return l | r
+}
+
+func bitOrPairUint(first, second ref.Val) ref.Val {
+ l := first.(types.Uint)
+ r := second.(types.Uint)
+ return l | r
+}
+
+func bitXorPairInt(first, second ref.Val) ref.Val {
+ l := first.(types.Int)
+ r := second.(types.Int)
+ return l ^ r
+}
+
+func bitXorPairUint(first, second ref.Val) ref.Val {
+ l := first.(types.Uint)
+ r := second.(types.Uint)
+ return l ^ r
+}
+
+func bitNotInt(value ref.Val) ref.Val {
+ v := value.(types.Int)
+ return ^v
+}
+
+func bitNotUint(value ref.Val) ref.Val {
+ v := value.(types.Uint)
+ return ^v
+}
+
+func bitShiftLeftIntInt(value, bits ref.Val) ref.Val {
+ v := value.(types.Int)
+ bs := bits.(types.Int)
+ if bs < types.IntZero {
+ return types.NewErr("math.bitShiftLeft() negative offset: %d", bs)
+ }
+ return v << bs
+}
+
+func bitShiftLeftUintInt(value, bits ref.Val) ref.Val {
+ v := value.(types.Uint)
+ bs := bits.(types.Int)
+ if bs < types.IntZero {
+ return types.NewErr("math.bitShiftLeft() negative offset: %d", bs)
+ }
+ return v << bs
+}
+
+func bitShiftRightIntInt(value, bits ref.Val) ref.Val {
+ v := value.(types.Int)
+ bs := bits.(types.Int)
+ if bs < types.IntZero {
+ return types.NewErr("math.bitShiftRight() negative offset: %d", bs)
+ }
+ return types.Int(types.Uint(v) >> bs)
+}
+
+func bitShiftRightUintInt(value, bits ref.Val) ref.Val {
+ v := value.(types.Uint)
+ bs := bits.(types.Int)
+ if bs < types.IntZero {
+ return types.NewErr("math.bitShiftRight() negative offset: %d", bs)
+ }
+ return v >> bs
+}
+
func minPair(first, second ref.Val) ref.Val {
cmp, ok := first.(traits.Comparer)
if !ok {
@@ -321,13 +850,13 @@ func checkInvalidArgs(meh cel.MacroExprFactory, funcName string, args []ast.Expr
}
func checkInvalidArgLiteral(funcName string, arg ast.Expr) error {
- if !isValidArgType(arg) {
+ if !isNumericArgType(arg) {
return fmt.Errorf("%s simple literal arguments must be numeric", funcName)
}
return nil
}
-func isValidArgType(arg ast.Expr) bool {
+func isNumericArgType(arg ast.Expr) bool {
switch arg.Kind() {
case ast.LiteralKind:
c := ref.Val(arg.AsLiteral())
@@ -344,7 +873,7 @@ func isValidArgType(arg ast.Expr) bool {
}
}
-func isListLiteralWithValidArgs(arg ast.Expr) bool {
+func isListLiteralWithNumericArgs(arg ast.Expr) bool {
switch arg.Kind() {
case ast.ListKind:
list := arg.AsList()
@@ -352,7 +881,7 @@ func isListLiteralWithValidArgs(arg ast.Expr) bool {
return false
}
for _, e := range list.Elements() {
- if !isValidArgType(e) {
+ if !isNumericArgType(e) {
return false
}
}
diff --git a/tools/vendor/github.com/google/cel-go/ext/native.go b/tools/vendor/github.com/google/cel-go/ext/native.go
index d1b787775..36ab4a7ae 100644
--- a/tools/vendor/github.com/google/cel-go/ext/native.go
+++ b/tools/vendor/github.com/google/cel-go/ext/native.go
@@ -15,6 +15,7 @@
package ext
import (
+ "errors"
"fmt"
"reflect"
"strings"
@@ -77,12 +78,45 @@ var (
// same advice holds if you are using custom type adapters and type providers. The native type
// provider composes over whichever type adapter and provider is configured in the cel.Env at
// the time that it is invoked.
-func NativeTypes(refTypes ...any) cel.EnvOption {
+//
+// There is also the possibility to rename the fields of native structs by setting the `cel` tag
+// for fields you want to override. In order to enable this feature, pass in the `EnableStructTag`
+// option. Here is an example to see it in action:
+//
+// ```go
+// package identity
+//
+// type Account struct {
+// ID int
+// OwnerName string `cel:"owner"`
+// }
+//
+// ```
+//
+// The `OwnerName` field is now accessible in CEL via `owner`, e.g. `identity.Account{owner: 'bob'}`.
+// In case there are duplicated field names in the struct, an error will be returned.
+func NativeTypes(args ...any) cel.EnvOption {
return func(env *cel.Env) (*cel.Env, error) {
- tp, err := newNativeTypeProvider(env.CELTypeAdapter(), env.CELTypeProvider(), refTypes...)
+ nativeTypes := make([]any, 0, len(args))
+ tpOptions := nativeTypeOptions{}
+
+ for _, v := range args {
+ switch v := v.(type) {
+ case NativeTypesOption:
+ err := v(&tpOptions)
+ if err != nil {
+ return nil, err
+ }
+ default:
+ nativeTypes = append(nativeTypes, v)
+ }
+ }
+
+ tp, err := newNativeTypeProvider(tpOptions, env.CELTypeAdapter(), env.CELTypeProvider(), nativeTypes...)
if err != nil {
return nil, err
}
+
env, err = cel.CustomTypeAdapter(tp)(env)
if err != nil {
return nil, err
@@ -91,12 +125,79 @@ func NativeTypes(refTypes ...any) cel.EnvOption {
}
}
-func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) {
+// NativeTypesOption is a functional interface for configuring handling of native types.
+type NativeTypesOption func(*nativeTypeOptions) error
+
+// NativeTypesFieldNameHandler is a handler for mapping a reflect.StructField to a CEL field name.
+// This can be used to override the default Go struct field to CEL field name mapping.
+type NativeTypesFieldNameHandler = func(field reflect.StructField) string
+
+func fieldNameByTag(structTagToParse string) func(field reflect.StructField) string {
+ return func(field reflect.StructField) string {
+ tag, found := field.Tag.Lookup(structTagToParse)
+ if found {
+ splits := strings.Split(tag, ",")
+ if len(splits) > 0 {
+ // We make the assumption that the leftmost entry in the tag is the name.
+ // This seems to be true for most tags that have the concept of a name/key, such as:
+ // https://pkg.go.dev/encoding/xml#Marshal
+ // https://pkg.go.dev/encoding/json#Marshal
+ // https://pkg.go.dev/go.mongodb.org/mongo-driver/bson#hdr-Structs
+ // https://pkg.go.dev/gopkg.in/yaml.v2#Marshal
+ name := splits[0]
+ return name
+ }
+ }
+
+ return field.Name
+ }
+}
+
+type nativeTypeOptions struct {
+ // fieldNameHandler controls how CEL should perform struct field renames.
+ // This is most commonly used for switching to parsing based off the struct field tag,
+ // such as "cel" or "json".
+ fieldNameHandler NativeTypesFieldNameHandler
+}
+
+// ParseStructTags configures if native types field names should be overridable by CEL struct tags.
+// This is equivalent to ParseStructTag("cel")
+func ParseStructTags(enabled bool) NativeTypesOption {
+ return func(ntp *nativeTypeOptions) error {
+ if enabled {
+ ntp.fieldNameHandler = fieldNameByTag("cel")
+ } else {
+ ntp.fieldNameHandler = nil
+ }
+ return nil
+ }
+}
+
+// ParseStructTag configures the struct tag to parse. The 0th item in the tag is used as the name of the CEL field.
+// For example:
+// If the tag to parse is "cel" and the struct field has tag cel:"foo", the CEL struct field will be "foo".
+// If the tag to parse is "json" and the struct field has tag json:"foo,omitempty", the CEL struct field will be "foo".
+func ParseStructTag(tag string) NativeTypesOption {
+ return func(ntp *nativeTypeOptions) error {
+ ntp.fieldNameHandler = fieldNameByTag(tag)
+ return nil
+ }
+}
+
+// ParseStructField configures how to parse Go struct fields. It can be used to customize struct field parsing.
+func ParseStructField(handler NativeTypesFieldNameHandler) NativeTypesOption {
+ return func(ntp *nativeTypeOptions) error {
+ ntp.fieldNameHandler = handler
+ return nil
+ }
+}
+
+func newNativeTypeProvider(tpOptions nativeTypeOptions, adapter types.Adapter, provider types.Provider, refTypes ...any) (*nativeTypeProvider, error) {
nativeTypes := make(map[string]*nativeType, len(refTypes))
for _, refType := range refTypes {
switch rt := refType.(type) {
case reflect.Type:
- result, err := newNativeTypes(rt)
+ result, err := newNativeTypes(tpOptions.fieldNameHandler, rt)
if err != nil {
return nil, err
}
@@ -104,7 +205,7 @@ func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTy
nativeTypes[result[idx].TypeName()] = result[idx]
}
case reflect.Value:
- result, err := newNativeTypes(rt.Type())
+ result, err := newNativeTypes(tpOptions.fieldNameHandler, rt.Type())
if err != nil {
return nil, err
}
@@ -119,6 +220,7 @@ func newNativeTypeProvider(adapter types.Adapter, provider types.Provider, refTy
nativeTypes: nativeTypes,
baseAdapter: adapter,
baseProvider: provider,
+ options: tpOptions,
}, nil
}
@@ -126,6 +228,7 @@ type nativeTypeProvider struct {
nativeTypes map[string]*nativeType
baseAdapter types.Adapter
baseProvider types.Provider
+ options nativeTypeOptions
}
// EnumValue proxies to the types.Provider configured at the times the NativeTypes
@@ -155,6 +258,14 @@ func (tp *nativeTypeProvider) FindStructType(typeName string) (*types.Type, bool
return tp.baseProvider.FindStructType(typeName)
}
+func toFieldName(fieldNameHandler NativeTypesFieldNameHandler, f reflect.StructField) string {
+ if fieldNameHandler == nil {
+ return f.Name
+ }
+
+ return fieldNameHandler(f)
+}
+
// FindStructFieldNames looks up the type definition first from the native types, then from
// the backing provider type set. If found, a set of field names corresponding to the type
// will be returned.
@@ -163,7 +274,7 @@ func (tp *nativeTypeProvider) FindStructFieldNames(typeName string) ([]string, b
fieldCount := t.refType.NumField()
fields := make([]string, fieldCount)
for i := 0; i < fieldCount; i++ {
- fields[i] = t.refType.Field(i).Name
+ fields[i] = toFieldName(tp.options.fieldNameHandler, t.refType.Field(i))
}
return fields, true
}
@@ -192,13 +303,13 @@ func (tp *nativeTypeProvider) FindStructFieldType(typeName, fieldName string) (*
Type: celType,
IsSet: func(obj any) bool {
refVal := reflect.Indirect(reflect.ValueOf(obj))
- refField := refVal.FieldByName(fieldName)
+ refField := refVal.FieldByName(refField.Name)
return !refField.IsZero()
},
GetFrom: func(obj any) (any, error) {
refVal := reflect.Indirect(reflect.ValueOf(obj))
- refField := refVal.FieldByName(fieldName)
- return getFieldValue(tp, refField), nil
+ refField := refVal.FieldByName(refField.Name)
+ return getFieldValue(refField), nil
},
}, true
}
@@ -249,6 +360,9 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val {
case []byte:
return tp.baseAdapter.NativeToValue(val)
default:
+ if refVal.Type().Elem() == reflect.TypeOf(byte(0)) {
+ return tp.baseAdapter.NativeToValue(val)
+ }
return types.NewDynamicList(tp, val)
}
case reflect.Map:
@@ -259,7 +373,7 @@ func (tp *nativeTypeProvider) NativeToValue(val any) ref.Val {
time.Time:
return tp.baseAdapter.NativeToValue(val)
default:
- return newNativeObject(tp, val, rawVal)
+ return tp.newNativeObject(val, rawVal)
}
default:
return tp.baseAdapter.NativeToValue(val)
@@ -319,13 +433,13 @@ func convertToCelType(refType reflect.Type) (*cel.Type, bool) {
return nil, false
}
-func newNativeObject(adapter types.Adapter, val any, refValue reflect.Value) ref.Val {
- valType, err := newNativeType(refValue.Type())
+func (tp *nativeTypeProvider) newNativeObject(val any, refValue reflect.Value) ref.Val {
+ valType, err := newNativeType(tp.options.fieldNameHandler, refValue.Type())
if err != nil {
return types.NewErr(err.Error())
}
return &nativeObj{
- Adapter: adapter,
+ Adapter: tp,
val: val,
valType: valType,
refValue: refValue,
@@ -372,12 +486,13 @@ func (o *nativeObj) ConvertToNative(typeDesc reflect.Type) (any, error) {
if !fieldValue.IsValid() || fieldValue.IsZero() {
continue
}
+ fieldName := toFieldName(o.valType.fieldNameHandler, fieldType)
fieldCELVal := o.NativeToValue(fieldValue.Interface())
fieldJSONVal, err := fieldCELVal.ConvertToNative(jsonValueType)
if err != nil {
return nil, err
}
- fields[fieldType.Name] = fieldJSONVal.(*structpb.Value)
+ fields[fieldName] = fieldJSONVal.(*structpb.Value)
}
return &structpb.Struct{Fields: fields}, nil
}
@@ -469,8 +584,8 @@ func (o *nativeObj) Value() any {
return o.val
}
-func newNativeTypes(rawType reflect.Type) ([]*nativeType, error) {
- nt, err := newNativeType(rawType)
+func newNativeTypes(fieldNameHandler NativeTypesFieldNameHandler, rawType reflect.Type) ([]*nativeType, error) {
+ nt, err := newNativeType(fieldNameHandler, rawType)
if err != nil {
return nil, err
}
@@ -489,7 +604,7 @@ func newNativeTypes(rawType reflect.Type) ([]*nativeType, error) {
return
}
alreadySeen[t.String()] = struct{}{}
- nt, ntErr := newNativeType(t)
+ nt, ntErr := newNativeType(fieldNameHandler, t)
if ntErr != nil {
err = ntErr
return
@@ -505,7 +620,11 @@ func newNativeTypes(rawType reflect.Type) ([]*nativeType, error) {
return result, err
}
-func newNativeType(rawType reflect.Type) (*nativeType, error) {
+var (
+ errDuplicatedFieldName = errors.New("field name already exists in struct")
+)
+
+func newNativeType(fieldNameHandler NativeTypesFieldNameHandler, rawType reflect.Type) (*nativeType, error) {
refType := rawType
if refType.Kind() == reflect.Pointer {
refType = refType.Elem()
@@ -513,15 +632,34 @@ func newNativeType(rawType reflect.Type) (*nativeType, error) {
if !isValidObjectType(refType) {
return nil, fmt.Errorf("unsupported reflect.Type %v, must be reflect.Struct", rawType)
}
+
+ // Since naming collisions can only happen with struct tag parsing, we only check for them if it is enabled.
+ if fieldNameHandler != nil {
+ fieldNames := make(map[string]struct{})
+
+ for idx := 0; idx < refType.NumField(); idx++ {
+ field := refType.Field(idx)
+ fieldName := toFieldName(fieldNameHandler, field)
+
+ if _, found := fieldNames[fieldName]; found {
+ return nil, fmt.Errorf("invalid field name `%s` in struct `%s`: %w", fieldName, refType.Name(), errDuplicatedFieldName)
+ } else {
+ fieldNames[fieldName] = struct{}{}
+ }
+ }
+ }
+
return &nativeType{
- typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()),
- refType: refType,
+ typeName: fmt.Sprintf("%s.%s", simplePkgAlias(refType.PkgPath()), refType.Name()),
+ refType: refType,
+ fieldNameHandler: fieldNameHandler,
}, nil
}
type nativeType struct {
- typeName string
- refType reflect.Type
+ typeName string
+ refType reflect.Type
+ fieldNameHandler NativeTypesFieldNameHandler
}
// ConvertToNative implements ref.Val.ConvertToNative.
@@ -569,9 +707,26 @@ func (t *nativeType) Value() any {
return t.typeName
}
+// fieldByName returns the corresponding reflect.StructField for the give name either by matching
+// field tag or field name.
+func (t *nativeType) fieldByName(fieldName string) (reflect.StructField, bool) {
+ if t.fieldNameHandler == nil {
+ return t.refType.FieldByName(fieldName)
+ }
+
+ for i := 0; i < t.refType.NumField(); i++ {
+ f := t.refType.Field(i)
+ if toFieldName(t.fieldNameHandler, f) == fieldName {
+ return f, true
+ }
+ }
+
+ return reflect.StructField{}, false
+}
+
// hasField returns whether a field name has a corresponding Golang reflect.StructField
func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) {
- f, found := t.refType.FieldByName(fieldName)
+ f, found := t.fieldByName(fieldName)
if !found || !f.IsExported() || !isSupportedType(f.Type) {
return reflect.StructField{}, false
}
@@ -579,21 +734,16 @@ func (t *nativeType) hasField(fieldName string) (reflect.StructField, bool) {
}
func adaptFieldValue(adapter types.Adapter, refField reflect.Value) ref.Val {
- return adapter.NativeToValue(getFieldValue(adapter, refField))
+ return adapter.NativeToValue(getFieldValue(refField))
}
-func getFieldValue(adapter types.Adapter, refField reflect.Value) any {
+func getFieldValue(refField reflect.Value) any {
if refField.IsZero() {
switch refField.Kind() {
- case reflect.Array, reflect.Slice:
- return types.NewDynamicList(adapter, []ref.Val{})
- case reflect.Map:
- return types.NewDynamicMap(adapter, map[ref.Val]ref.Val{})
case reflect.Struct:
if refField.Type() == timestampType {
- return types.Timestamp{Time: time.Unix(0, 0)}
+ return time.Unix(0, 0)
}
- return reflect.New(refField.Type()).Elem().Interface()
case reflect.Pointer:
return reflect.New(refField.Type().Elem()).Interface()
}
diff --git a/tools/vendor/github.com/google/cel-go/ext/strings.go b/tools/vendor/github.com/google/cel-go/ext/strings.go
index 2e20f1e4c..2e590a4c5 100644
--- a/tools/vendor/github.com/google/cel-go/ext/strings.go
+++ b/tools/vendor/github.com/google/cel-go/ext/strings.go
@@ -119,7 +119,8 @@ const (
// 'hello mellow'.indexOf('jello') // returns -1
// 'hello mellow'.indexOf('', 2) // returns 2
// 'hello mellow'.indexOf('ello', 2) // returns 7
-// 'hello mellow'.indexOf('ello', 20) // error
+// 'hello mellow'.indexOf('ello', 20) // returns -1
+// 'hello mellow'.indexOf('ello', -1) // error
//
// # Join
//
@@ -155,6 +156,7 @@ const (
// 'hello mellow'.lastIndexOf('ello') // returns 7
// 'hello mellow'.lastIndexOf('jello') // returns -1
// 'hello mellow'.lastIndexOf('ello', 6) // returns 1
+// 'hello mellow'.lastIndexOf('ello', 20) // returns -1
// 'hello mellow'.lastIndexOf('ello', -1) // error
//
// # LowerAscii
@@ -520,7 +522,7 @@ func (lib *stringLib) CompileOptions() []cel.EnvOption {
if lib.version >= 3 {
opts = append(opts,
cel.Function("reverse",
- cel.MemberOverload("reverse", []*cel.Type{cel.StringType}, cel.StringType,
+ cel.MemberOverload("string_reverse", []*cel.Type{cel.StringType}, cel.StringType,
cel.UnaryBinding(func(str ref.Val) ref.Val {
s := str.(types.String)
return stringOrError(reverse(string(s)))
@@ -561,9 +563,13 @@ func indexOfOffset(str, substr string, offset int64) (int64, error) {
off := int(offset)
runes := []rune(str)
subrunes := []rune(substr)
- if off < 0 || off >= len(runes) {
+ if off < 0 {
return -1, fmt.Errorf("index out of range: %d", off)
}
+ // If the offset exceeds the length, return -1 rather than error.
+ if off >= len(runes) {
+ return -1, nil
+ }
for i := off; i < len(runes)-(len(subrunes)-1); i++ {
found := true
for j := 0; j < len(subrunes); j++ {
@@ -594,9 +600,13 @@ func lastIndexOfOffset(str, substr string, offset int64) (int64, error) {
off := int(offset)
runes := []rune(str)
subrunes := []rune(substr)
- if off < 0 || off >= len(runes) {
+ if off < 0 {
return -1, fmt.Errorf("index out of range: %d", off)
}
+ // If the offset is far greater than the length return -1
+ if off >= len(runes) {
+ return -1, nil
+ }
if off > len(runes)-len(subrunes) {
off = len(runes) - len(subrunes)
}
diff --git a/tools/vendor/github.com/google/cel-go/interpreter/activation.go b/tools/vendor/github.com/google/cel-go/interpreter/activation.go
index a80264451..1577f3590 100644
--- a/tools/vendor/github.com/google/cel-go/interpreter/activation.go
+++ b/tools/vendor/github.com/google/cel-go/interpreter/activation.go
@@ -17,7 +17,6 @@ package interpreter
import (
"errors"
"fmt"
- "sync"
"github.com/google/cel-go/common/types/ref"
)
@@ -167,35 +166,3 @@ type partActivation struct {
func (a *partActivation) UnknownAttributePatterns() []*AttributePattern {
return a.unknowns
}
-
-// varActivation represents a single mutable variable binding.
-//
-// This activation type should only be used within folds as the fold loop controls the object
-// life-cycle.
-type varActivation struct {
- parent Activation
- name string
- val ref.Val
-}
-
-// Parent implements the Activation interface method.
-func (v *varActivation) Parent() Activation {
- return v.parent
-}
-
-// ResolveName implements the Activation interface method.
-func (v *varActivation) ResolveName(name string) (any, bool) {
- if name == v.name {
- return v.val, true
- }
- return v.parent.ResolveName(name)
-}
-
-var (
- // pool of var activations to reduce allocations during folds.
- varActivationPool = &sync.Pool{
- New: func() any {
- return &varActivation{}
- },
- }
-)
diff --git a/tools/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go b/tools/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
index 1fbaaf17e..8f19bde7e 100644
--- a/tools/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
+++ b/tools/vendor/github.com/google/cel-go/interpreter/attribute_patterns.go
@@ -178,10 +178,8 @@ func numericValueEquals(value any, celValue ref.Val) bool {
// NewPartialAttributeFactory returns an AttributeFactory implementation capable of performing
// AttributePattern matches with PartialActivation inputs.
-func NewPartialAttributeFactory(container *containers.Container,
- adapter types.Adapter,
- provider types.Provider) AttributeFactory {
- fac := NewAttributeFactory(container, adapter, provider)
+func NewPartialAttributeFactory(container *containers.Container, adapter types.Adapter, provider types.Provider, opts ...AttrFactoryOption) AttributeFactory {
+ fac := NewAttributeFactory(container, adapter, provider, opts...)
return &partialAttributeFactory{
AttributeFactory: fac,
container: container,
diff --git a/tools/vendor/github.com/google/cel-go/interpreter/attributes.go b/tools/vendor/github.com/google/cel-go/interpreter/attributes.go
index 0098750dd..b1b3aacc8 100644
--- a/tools/vendor/github.com/google/cel-go/interpreter/attributes.go
+++ b/tools/vendor/github.com/google/cel-go/interpreter/attributes.go
@@ -126,21 +126,39 @@ type NamespacedAttribute interface {
Qualifiers() []Qualifier
}
+// AttrFactoryOption specifies a functional option for configuring an attribute factory.
+type AttrFactoryOption func(*attrFactory) *attrFactory
+
+// EnableErrorOnBadPresenceTest error generation when a presence test or optional field selection
+// is performed on a primitive type.
+func EnableErrorOnBadPresenceTest(value bool) AttrFactoryOption {
+ return func(fac *attrFactory) *attrFactory {
+ fac.errorOnBadPresenceTest = value
+ return fac
+ }
+}
+
// NewAttributeFactory returns a default AttributeFactory which is produces Attribute values
// capable of resolving types by simple names and qualify the values using the supported qualifier
// types: bool, int, string, and uint.
-func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider) AttributeFactory {
- return &attrFactory{
+func NewAttributeFactory(cont *containers.Container, a types.Adapter, p types.Provider, opts ...AttrFactoryOption) AttributeFactory {
+ fac := &attrFactory{
container: cont,
adapter: a,
provider: p,
}
+ for _, o := range opts {
+ fac = o(fac)
+ }
+ return fac
}
type attrFactory struct {
container *containers.Container
adapter types.Adapter
provider types.Provider
+
+ errorOnBadPresenceTest bool
}
// AbsoluteAttribute refers to a variable value and an optional qualifier path.
@@ -149,12 +167,13 @@ type attrFactory struct {
// resolution rules.
func (r *attrFactory) AbsoluteAttribute(id int64, names ...string) NamespacedAttribute {
return &absoluteAttribute{
- id: id,
- namespaceNames: names,
- qualifiers: []Qualifier{},
- adapter: r.adapter,
- provider: r.provider,
- fac: r,
+ id: id,
+ namespaceNames: names,
+ qualifiers: []Qualifier{},
+ adapter: r.adapter,
+ provider: r.provider,
+ fac: r,
+ errorOnBadPresenceTest: r.errorOnBadPresenceTest,
}
}
@@ -188,11 +207,12 @@ func (r *attrFactory) MaybeAttribute(id int64, name string) Attribute {
// RelativeAttribute refers to an expression and an optional qualifier path.
func (r *attrFactory) RelativeAttribute(id int64, operand Interpretable) Attribute {
return &relativeAttribute{
- id: id,
- operand: operand,
- qualifiers: []Qualifier{},
- adapter: r.adapter,
- fac: r,
+ id: id,
+ operand: operand,
+ qualifiers: []Qualifier{},
+ adapter: r.adapter,
+ fac: r,
+ errorOnBadPresenceTest: r.errorOnBadPresenceTest,
}
}
@@ -214,7 +234,7 @@ func (r *attrFactory) NewQualifier(objType *types.Type, qualID int64, val any, o
}, nil
}
}
- return newQualifier(r.adapter, qualID, val, opt)
+ return newQualifier(r.adapter, qualID, val, opt, r.errorOnBadPresenceTest)
}
type absoluteAttribute struct {
@@ -226,6 +246,8 @@ type absoluteAttribute struct {
adapter types.Adapter
provider types.Provider
fac AttributeFactory
+
+ errorOnBadPresenceTest bool
}
// ID implements the Attribute interface method.
@@ -514,6 +536,8 @@ type relativeAttribute struct {
qualifiers []Qualifier
adapter types.Adapter
fac AttributeFactory
+
+ errorOnBadPresenceTest bool
}
// ID is an implementation of the Attribute interface method.
@@ -577,7 +601,7 @@ func (a *relativeAttribute) String() string {
return fmt.Sprintf("id: %v, operand: %v", a.id, a.operand)
}
-func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier, error) {
+func newQualifier(adapter types.Adapter, id int64, v any, opt, errorOnBadPresenceTest bool) (Qualifier, error) {
var qual Qualifier
switch val := v.(type) {
case Attribute:
@@ -592,71 +616,138 @@ func newQualifier(adapter types.Adapter, id int64, v any, opt bool) (Qualifier,
}, nil
case string:
qual = &stringQualifier{
- id: id,
- value: val,
- celValue: types.String(val),
- adapter: adapter,
- optional: opt,
+ id: id,
+ value: val,
+ celValue: types.String(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case int:
qual = &intQualifier{
- id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt,
+ id: id,
+ value: int64(val),
+ celValue: types.Int(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case int32:
qual = &intQualifier{
- id: id, value: int64(val), celValue: types.Int(val), adapter: adapter, optional: opt,
+ id: id,
+ value: int64(val),
+ celValue: types.Int(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case int64:
qual = &intQualifier{
- id: id, value: val, celValue: types.Int(val), adapter: adapter, optional: opt,
+ id: id,
+ value: val,
+ celValue: types.Int(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case uint:
qual = &uintQualifier{
- id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt,
+ id: id,
+ value: uint64(val),
+ celValue: types.Uint(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case uint32:
qual = &uintQualifier{
- id: id, value: uint64(val), celValue: types.Uint(val), adapter: adapter, optional: opt,
+ id: id,
+ value: uint64(val),
+ celValue: types.Uint(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case uint64:
qual = &uintQualifier{
- id: id, value: val, celValue: types.Uint(val), adapter: adapter, optional: opt,
+ id: id,
+ value: val,
+ celValue: types.Uint(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case bool:
qual = &boolQualifier{
- id: id, value: val, celValue: types.Bool(val), adapter: adapter, optional: opt,
+ id: id,
+ value: val,
+ celValue: types.Bool(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case float32:
qual = &doubleQualifier{
- id: id,
- value: float64(val),
- celValue: types.Double(val),
- adapter: adapter,
- optional: opt,
+ id: id,
+ value: float64(val),
+ celValue: types.Double(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case float64:
qual = &doubleQualifier{
- id: id, value: val, celValue: types.Double(val), adapter: adapter, optional: opt,
+ id: id,
+ value: val,
+ celValue: types.Double(val),
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case types.String:
qual = &stringQualifier{
- id: id, value: string(val), celValue: val, adapter: adapter, optional: opt,
+ id: id,
+ value: string(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case types.Int:
qual = &intQualifier{
- id: id, value: int64(val), celValue: val, adapter: adapter, optional: opt,
+ id: id,
+ value: int64(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case types.Uint:
qual = &uintQualifier{
- id: id, value: uint64(val), celValue: val, adapter: adapter, optional: opt,
+ id: id,
+ value: uint64(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case types.Bool:
qual = &boolQualifier{
- id: id, value: bool(val), celValue: val, adapter: adapter, optional: opt,
+ id: id,
+ value: bool(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case types.Double:
qual = &doubleQualifier{
- id: id, value: float64(val), celValue: val, adapter: adapter, optional: opt,
+ id: id,
+ value: float64(val),
+ celValue: val,
+ adapter: adapter,
+ optional: opt,
+ errorOnBadPresenceTest: errorOnBadPresenceTest,
}
case *types.Unknown:
qual = &unknownQualifier{id: id, value: val}
@@ -687,11 +778,12 @@ func (q *attrQualifier) IsOptional() bool {
}
type stringQualifier struct {
- id int64
- value string
- celValue ref.Val
- adapter types.Adapter
- optional bool
+ id int64
+ value string
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
}
// ID is an implementation of the Qualifier interface method.
@@ -774,7 +866,7 @@ func (q *stringQualifier) qualifyInternal(vars Activation, obj any, presenceTest
return obj, true, nil
}
default:
- return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
}
if presenceTest {
return nil, false, nil
@@ -788,11 +880,12 @@ func (q *stringQualifier) Value() ref.Val {
}
type intQualifier struct {
- id int64
- value int64
- celValue ref.Val
- adapter types.Adapter
- optional bool
+ id int64
+ value int64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
}
// ID is an implementation of the Qualifier interface method.
@@ -898,7 +991,7 @@ func (q *intQualifier) qualifyInternal(vars Activation, obj any, presenceTest, p
return o[i], true, nil
}
default:
- return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
}
if presenceTest {
return nil, false, nil
@@ -915,11 +1008,12 @@ func (q *intQualifier) Value() ref.Val {
}
type uintQualifier struct {
- id int64
- value uint64
- celValue ref.Val
- adapter types.Adapter
- optional bool
+ id int64
+ value uint64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
}
// ID is an implementation of the Qualifier interface method.
@@ -966,7 +1060,7 @@ func (q *uintQualifier) qualifyInternal(vars Activation, obj any, presenceTest,
return obj, true, nil
}
default:
- return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
}
if presenceTest {
return nil, false, nil
@@ -980,11 +1074,12 @@ func (q *uintQualifier) Value() ref.Val {
}
type boolQualifier struct {
- id int64
- value bool
- celValue ref.Val
- adapter types.Adapter
- optional bool
+ id int64
+ value bool
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
}
// ID is an implementation of the Qualifier interface method.
@@ -1017,7 +1112,7 @@ func (q *boolQualifier) qualifyInternal(vars Activation, obj any, presenceTest,
return obj, true, nil
}
default:
- return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
}
if presenceTest {
return nil, false, nil
@@ -1092,11 +1187,12 @@ func (q *fieldQualifier) Value() ref.Val {
// type may not be known ahead of time and may not conform to the standard types supported as valid
// protobuf map key types.
type doubleQualifier struct {
- id int64
- value float64
- celValue ref.Val
- adapter types.Adapter
- optional bool
+ id int64
+ value float64
+ celValue ref.Val
+ adapter types.Adapter
+ optional bool
+ errorOnBadPresenceTest bool
}
// ID is an implementation of the Qualifier interface method.
@@ -1120,7 +1216,7 @@ func (q *doubleQualifier) QualifyIfPresent(vars Activation, obj any, presenceOnl
}
func (q *doubleQualifier) qualifyInternal(vars Activation, obj any, presenceTest, presenceOnly bool) (any, bool, error) {
- return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly)
+ return refQualify(q.adapter, obj, q.celValue, presenceTest, presenceOnly, q.errorOnBadPresenceTest)
}
// Value implements the ConstantQualifier interface
@@ -1226,7 +1322,7 @@ func attrQualifyIfPresent(fac AttributeFactory, vars Activation, obj any, qualAt
// refQualify attempts to convert the value to a CEL value and then uses reflection methods to try and
// apply the qualifier with the option to presence test field accesses before retrieving field values.
-func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly bool) (ref.Val, bool, error) {
+func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, presenceOnly, errorOnBadPresenceTest bool) (ref.Val, bool, error) {
celVal := adapter.NativeToValue(obj)
switch v := celVal.(type) {
case *types.Unknown:
@@ -1283,7 +1379,7 @@ func refQualify(adapter types.Adapter, obj any, idx ref.Val, presenceTest, prese
}
return val, true, nil
default:
- if presenceTest {
+ if presenceTest && !errorOnBadPresenceTest {
return nil, false, nil
}
return nil, false, missingKey(idx)
diff --git a/tools/vendor/github.com/google/cel-go/interpreter/interpretable.go b/tools/vendor/github.com/google/cel-go/interpreter/interpretable.go
index 561238407..ebc432e9d 100644
--- a/tools/vendor/github.com/google/cel-go/interpreter/interpretable.go
+++ b/tools/vendor/github.com/google/cel-go/interpreter/interpretable.go
@@ -16,6 +16,7 @@ package interpreter
import (
"fmt"
+ "sync"
"github.com/google/cel-go/common/functions"
"github.com/google/cel-go/common/operators"
@@ -96,7 +97,7 @@ type InterpretableCall interface {
Args() []Interpretable
}
-// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map
+// InterpretableConstructor interface for inspecting Interpretable instructions that initialize a list, map
// or struct.
type InterpretableConstructor interface {
Interpretable
@@ -720,24 +721,31 @@ func (o *evalObj) Eval(ctx Activation) ref.Val {
return types.LabelErrNode(o.id, o.provider.NewValue(o.typeName, fieldVals))
}
+// InitVals implements the InterpretableConstructor interface method.
func (o *evalObj) InitVals() []Interpretable {
return o.vals
}
+// Type implements the InterpretableConstructor interface method.
func (o *evalObj) Type() ref.Type {
- return types.NewObjectTypeValue(o.typeName)
+ return types.NewObjectType(o.typeName)
}
type evalFold struct {
- id int64
- accuVar string
- iterVar string
- iterRange Interpretable
- accu Interpretable
- cond Interpretable
- step Interpretable
- result Interpretable
- adapter types.Adapter
+ id int64
+ accuVar string
+ iterVar string
+ iterVar2 string
+ iterRange Interpretable
+ accu Interpretable
+ cond Interpretable
+ step Interpretable
+ result Interpretable
+ adapter types.Adapter
+
+ // note an exhaustive fold will ensure that all branches are evaluated
+ // when using mutable values, these branches will mutate the final result
+ // rather than make a throw-away computation.
exhaustive bool
interruptable bool
}
@@ -749,64 +757,30 @@ func (fold *evalFold) ID() int64 {
// Eval implements the Interpretable interface method.
func (fold *evalFold) Eval(ctx Activation) ref.Val {
- foldRange := fold.iterRange.Eval(ctx)
- if !foldRange.Type().HasTrait(traits.IterableType) {
- return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange)
- }
- // Configure the fold activation with the accumulator initial value.
- accuCtx := varActivationPool.Get().(*varActivation)
- accuCtx.parent = ctx
- accuCtx.name = fold.accuVar
- accuCtx.val = fold.accu.Eval(ctx)
- // If the accumulator starts as an empty list, then the comprehension will build a list
- // so create a mutable list to optimize the cost of the inner loop.
- l, ok := accuCtx.val.(traits.Lister)
- buildingList := false
- if !fold.exhaustive && ok && l.Size() == types.IntZero {
- buildingList = true
- accuCtx.val = types.NewMutableList(fold.adapter)
- }
- iterCtx := varActivationPool.Get().(*varActivation)
- iterCtx.parent = accuCtx
- iterCtx.name = fold.iterVar
-
- interrupted := false
- it := foldRange.(traits.Iterable).Iterator()
- for it.HasNext() == types.True {
- // Modify the iter var in the fold activation.
- iterCtx.val = it.Next()
+ // Initialize the folder interface
+ f := newFolder(fold, ctx)
+ defer releaseFolder(f)
- // Evaluate the condition, terminate the loop if false.
- cond := fold.cond.Eval(iterCtx)
- condBool, ok := cond.(types.Bool)
- if !fold.exhaustive && ok && condBool != types.True {
- break
- }
- // Evaluate the evaluation step into accu var.
- accuCtx.val = fold.step.Eval(iterCtx)
- if fold.interruptable {
- if stop, found := ctx.ResolveName("#interrupted"); found && stop == true {
- interrupted = true
- break
- }
+ foldRange := fold.iterRange.Eval(ctx)
+ if fold.iterVar2 != "" {
+ var foldable traits.Foldable
+ switch r := foldRange.(type) {
+ case traits.Mapper:
+ foldable = types.ToFoldableMap(r)
+ case traits.Lister:
+ foldable = types.ToFoldableList(r)
+ default:
+ return types.NewErrWithNodeID(fold.ID(), "unsupported comprehension range type: %T", foldRange)
}
- }
- varActivationPool.Put(iterCtx)
- if interrupted {
- varActivationPool.Put(accuCtx)
- return types.NewErr("operation interrupted")
+ foldable.Fold(f)
+ return f.evalResult()
}
- // Compute the result.
- res := fold.result.Eval(accuCtx)
- varActivationPool.Put(accuCtx)
- // Convert a mutable list to an immutable one, if the comprehension has generated a list as a result.
- if !types.IsUnknownOrError(res) && buildingList {
- if _, ok := res.(traits.MutableLister); ok {
- res = res.(traits.MutableLister).ToImmutableList()
- }
+ if !foldRange.Type().HasTrait(traits.IterableType) {
+ return types.ValOrErr(foldRange, "got '%T', expected iterable type", foldRange)
}
- return res
+ iterable := foldRange.(traits.Iterable)
+ return f.foldIterable(iterable)
}
// Optional Interpretable implementations that specialize, subsume, or extend the core evaluation
@@ -1262,3 +1236,172 @@ func invalidOptionalEntryInit(field any, value ref.Val) ref.Val {
func invalidOptionalElementInit(value ref.Val) ref.Val {
return types.NewErr("cannot initialize optional list element from non-optional value %v", value)
}
+
+// newFolder creates or initializes a pooled folder instance.
+func newFolder(eval *evalFold, ctx Activation) *folder {
+ f := folderPool.Get().(*folder)
+ f.evalFold = eval
+ f.Activation = ctx
+ return f
+}
+
+// releaseFolder resets and releases a pooled folder instance.
+func releaseFolder(f *folder) {
+ f.reset()
+ folderPool.Put(f)
+}
+
+// folder tracks the state associated with folding a list or map with a comprehension v2 style macro.
+//
+// The folder embeds an interpreter.Activation and Interpretable evalFold value as well as implements
+// the traits.Folder interface methods.
+//
+// Instances of a folder are intended to be pooled to minimize allocation overhead with this temporary
+// bookkeeping object which supports lazy evaluation of the accumulator init expression which is useful
+// in preserving evaluation order semantics which might otherwise be disrupted through the use of
+// cel.bind or cel.@block.
+type folder struct {
+ *evalFold
+ Activation
+
+ // fold state objects.
+ accuVal ref.Val
+ iterVar1Val any
+ iterVar2Val any
+
+ // bookkeeping flags to modify Activation and fold behaviors.
+ initialized bool
+ mutableValue bool
+ interrupted bool
+ computeResult bool
+}
+
+func (f *folder) foldIterable(iterable traits.Iterable) ref.Val {
+ it := iterable.Iterator()
+ for it.HasNext() == types.True {
+ f.iterVar1Val = it.Next()
+
+ cond := f.cond.Eval(f)
+ condBool, ok := cond.(types.Bool)
+ if f.interrupted || (!f.exhaustive && ok && condBool != types.True) {
+ return f.evalResult()
+ }
+
+ // Update the accumulation value and check for eval interuption.
+ f.accuVal = f.step.Eval(f)
+ f.initialized = true
+ if f.interruptable && checkInterrupt(f.Activation) {
+ f.interrupted = true
+ return f.evalResult()
+ }
+ }
+ return f.evalResult()
+}
+
+// FoldEntry will either fold comprehension v1 style macros if iterVar2 is unset, or comprehension v2 style
+// macros if both the iterVar and iterVar2 are set to non-empty strings.
+func (f *folder) FoldEntry(key, val any) bool {
+ // Default to referencing both values.
+ f.iterVar1Val = key
+ f.iterVar2Val = val
+
+ // Terminate evaluation if evaluation is interrupted or the condition is not true and exhaustive
+ // eval is not enabled.
+ cond := f.cond.Eval(f)
+ condBool, ok := cond.(types.Bool)
+ if f.interrupted || (!f.exhaustive && ok && condBool != types.True) {
+ return false
+ }
+
+ // Update the accumulation value and check for eval interuption.
+ f.accuVal = f.step.Eval(f)
+ f.initialized = true
+ if f.interruptable && checkInterrupt(f.Activation) {
+ f.interrupted = true
+ return false
+ }
+ return true
+}
+
+// ResolveName overrides the default Activation lookup to perform lazy initialization of the accumulator
+// and specialized lookups of iteration values with consideration for whether the final result is being
+// computed and the iteration variables should be ignored.
+func (f *folder) ResolveName(name string) (any, bool) {
+ if name == f.accuVar {
+ if !f.initialized {
+ f.initialized = true
+ initVal := f.accu.Eval(f.Activation)
+ if !f.exhaustive {
+ if l, isList := initVal.(traits.Lister); isList && l.Size() == types.IntZero {
+ initVal = types.NewMutableList(f.adapter)
+ f.mutableValue = true
+ }
+ if m, isMap := initVal.(traits.Mapper); isMap && m.Size() == types.IntZero {
+ initVal = types.NewMutableMap(f.adapter, map[ref.Val]ref.Val{})
+ f.mutableValue = true
+ }
+ }
+ f.accuVal = initVal
+ }
+ return f.accuVal, true
+ }
+ if !f.computeResult {
+ if name == f.iterVar {
+ f.iterVar1Val = f.adapter.NativeToValue(f.iterVar1Val)
+ return f.iterVar1Val, true
+ }
+ if name == f.iterVar2 {
+ f.iterVar2Val = f.adapter.NativeToValue(f.iterVar2Val)
+ return f.iterVar2Val, true
+ }
+ }
+ return f.Activation.ResolveName(name)
+}
+
+// evalResult computes the final result of the fold after all entries have been folded and accumulated.
+func (f *folder) evalResult() ref.Val {
+ f.computeResult = true
+ if f.interrupted {
+ return types.NewErr("operation interrupted")
+ }
+ res := f.result.Eval(f)
+ // Convert a mutable list or map to an immutable one if the comprehension has generated a list or
+ // map as a result.
+ if !types.IsUnknownOrError(res) && f.mutableValue {
+ if _, ok := res.(traits.MutableLister); ok {
+ res = res.(traits.MutableLister).ToImmutableList()
+ }
+ if _, ok := res.(traits.MutableMapper); ok {
+ res = res.(traits.MutableMapper).ToImmutableMap()
+ }
+ }
+ return res
+}
+
+// reset clears any state associated with folder evaluation.
+func (f *folder) reset() {
+ f.evalFold = nil
+ f.Activation = nil
+ f.accuVal = nil
+ f.iterVar1Val = nil
+ f.iterVar2Val = nil
+
+ f.initialized = false
+ f.mutableValue = false
+ f.interrupted = false
+ f.computeResult = false
+}
+
+func checkInterrupt(a Activation) bool {
+ stop, found := a.ResolveName("#interrupted")
+ return found && stop == true
+}
+
+var (
+ // pool of var folders to reduce allocations during folds.
+ folderPool = &sync.Pool{
+ New: func() any {
+ return &folder{}
+ },
+ }
+)
diff --git a/tools/vendor/github.com/google/cel-go/interpreter/planner.go b/tools/vendor/github.com/google/cel-go/interpreter/planner.go
index cf371f95d..3d918ce87 100644
--- a/tools/vendor/github.com/google/cel-go/interpreter/planner.go
+++ b/tools/vendor/github.com/google/cel-go/interpreter/planner.go
@@ -603,6 +603,7 @@ func (p *planner) planComprehension(expr ast.Expr) (Interpretable, error) {
accuVar: fold.AccuVar(),
accu: accu,
iterVar: fold.IterVar(),
+ iterVar2: fold.IterVar2(),
iterRange: iterRange,
cond: cond,
step: step,
diff --git a/tools/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel b/tools/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
index e70433483..3efed87b7 100644
--- a/tools/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
+++ b/tools/vendor/github.com/google/cel-go/parser/gen/BUILD.bazel
@@ -1,7 +1,7 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
package(
- default_visibility = ["//parser:__subpackages__"],
+ default_visibility = ["//:__subpackages__"],
licenses = ["notice"], # Apache 2.0
)
diff --git a/tools/vendor/github.com/google/cel-go/parser/helper.go b/tools/vendor/github.com/google/cel-go/parser/helper.go
index 182ff034c..9f09ead0e 100644
--- a/tools/vendor/github.com/google/cel-go/parser/helper.go
+++ b/tools/vendor/github.com/google/cel-go/parser/helper.go
@@ -115,7 +115,7 @@ func (p *parserHelper) newObjectField(fieldID int64, field string, value ast.Exp
func (p *parserHelper) newComprehension(ctx any,
iterRange ast.Expr,
- iterVar string,
+ iterVar,
accuVar string,
accuInit ast.Expr,
condition ast.Expr,
@@ -125,6 +125,18 @@ func (p *parserHelper) newComprehension(ctx any,
p.newID(ctx), iterRange, iterVar, accuVar, accuInit, condition, step, result)
}
+func (p *parserHelper) newComprehensionTwoVar(ctx any,
+ iterRange ast.Expr,
+ iterVar, iterVar2,
+ accuVar string,
+ accuInit ast.Expr,
+ condition ast.Expr,
+ step ast.Expr,
+ result ast.Expr) ast.Expr {
+ return p.exprFactory.NewComprehensionTwoVar(
+ p.newID(ctx), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result)
+}
+
func (p *parserHelper) newID(ctx any) int64 {
if id, isID := ctx.(int64); isID {
return id
@@ -140,15 +152,12 @@ func (p *parserHelper) id(ctx any) int64 {
var offset ast.OffsetRange
switch c := ctx.(type) {
case antlr.ParserRuleContext:
- start, stop := c.GetStart(), c.GetStop()
- if stop == nil {
- stop = start
- }
+ start := c.GetStart()
offset.Start = p.sourceInfo.ComputeOffset(int32(start.GetLine()), int32(start.GetColumn()))
- offset.Stop = p.sourceInfo.ComputeOffset(int32(stop.GetLine()), int32(stop.GetColumn()))
+ offset.Stop = offset.Start + int32(len(c.GetText()))
case antlr.Token:
offset.Start = p.sourceInfo.ComputeOffset(int32(c.GetLine()), int32(c.GetColumn()))
- offset.Stop = offset.Start
+ offset.Stop = offset.Start + int32(len(c.GetText()))
case common.Location:
offset.Start = p.sourceInfo.ComputeOffset(int32(c.Line()), int32(c.Column()))
offset.Stop = offset.Start
@@ -164,10 +173,21 @@ func (p *parserHelper) id(ctx any) int64 {
return id
}
+func (p *parserHelper) deleteID(id int64) {
+ p.sourceInfo.ClearOffsetRange(id)
+ if id == p.nextID-1 {
+ p.nextID--
+ }
+}
+
func (p *parserHelper) getLocation(id int64) common.Location {
return p.sourceInfo.GetStartLocation(id)
}
+func (p *parserHelper) getLocationByOffset(offset int32) common.Location {
+ return p.getSourceInfo().GetLocationByOffset(offset)
+}
+
// buildMacroCallArg iterates the expression and returns a new expression
// where all macros have been replaced by their IDs in MacroCalls
func (p *parserHelper) buildMacroCallArg(expr ast.Expr) ast.Expr {
@@ -375,8 +395,10 @@ func (e *exprHelper) Copy(expr ast.Expr) ast.Expr {
cond := e.Copy(compre.LoopCondition())
step := e.Copy(compre.LoopStep())
result := e.Copy(compre.Result())
- return e.exprFactory.NewComprehension(copyID,
- iterRange, compre.IterVar(), compre.AccuVar(), accuInit, cond, step, result)
+ // All comprehensions can be represented by the two-variable comprehension since the
+ // differentiation between one and two-variable is whether the iterVar2 value is non-empty.
+ return e.exprFactory.NewComprehensionTwoVar(copyID,
+ iterRange, compre.IterVar(), compre.IterVar2(), compre.AccuVar(), accuInit, cond, step, result)
}
return e.exprFactory.NewUnspecifiedExpr(copyID)
}
@@ -424,6 +446,20 @@ func (e *exprHelper) NewComprehension(
e.nextMacroID(), iterRange, iterVar, accuVar, accuInit, condition, step, result)
}
+// NewComprehensionTwoVar implements the ExprHelper interface method.
+func (e *exprHelper) NewComprehensionTwoVar(
+ iterRange ast.Expr,
+ iterVar,
+ iterVar2,
+ accuVar string,
+ accuInit,
+ condition,
+ step,
+ result ast.Expr) ast.Expr {
+ return e.exprFactory.NewComprehensionTwoVar(
+ e.nextMacroID(), iterRange, iterVar, iterVar2, accuVar, accuInit, condition, step, result)
+}
+
// NewIdent implements the ExprHelper interface method.
func (e *exprHelper) NewIdent(name string) ast.Expr {
return e.exprFactory.NewIdent(e.nextMacroID(), name)
diff --git a/tools/vendor/github.com/google/cel-go/parser/macro.go b/tools/vendor/github.com/google/cel-go/parser/macro.go
index 1f4c847e0..dc47b4203 100644
--- a/tools/vendor/github.com/google/cel-go/parser/macro.go
+++ b/tools/vendor/github.com/google/cel-go/parser/macro.go
@@ -170,11 +170,12 @@ type ExprHelper interface {
// NewStructField creates a new struct field initializer from the field name and value.
NewStructField(field string, init ast.Expr, optional bool) ast.EntryExpr
- // NewComprehension creates a new comprehension instruction.
+ // NewComprehension creates a new one-variable comprehension instruction.
//
// - iterRange represents the expression that resolves to a list or map where the elements or
// keys (respectively) will be iterated over.
- // - iterVar is the iteration variable name.
+ // - iterVar is the variable name for the list element value, or the map key, depending on the
+ // range type.
// - accuVar is the accumulation variable name, typically parser.AccumulatorName.
// - accuInit is the initial expression whose value will be set for the accuVar prior to
// folding.
@@ -186,11 +187,36 @@ type ExprHelper interface {
// environment in the step and condition expressions. Presently, the name __result__ is commonly
// used by built-in macros but this may change in the future.
NewComprehension(iterRange ast.Expr,
- iterVar string,
+ iterVar,
accuVar string,
- accuInit ast.Expr,
- condition ast.Expr,
- step ast.Expr,
+ accuInit,
+ condition,
+ step,
+ result ast.Expr) ast.Expr
+
+ // NewComprehensionTwoVar creates a new two-variable comprehension instruction.
+ //
+ // - iterRange represents the expression that resolves to a list or map where the elements or
+ // keys (respectively) will be iterated over.
+ // - iterVar is the iteration variable assigned to the list index or the map key.
+ // - iterVar2 is the iteration variable assigned to the list element value or the map key value.
+ // - accuVar is the accumulation variable name, typically parser.AccumulatorName.
+ // - accuInit is the initial expression whose value will be set for the accuVar prior to
+ // folding.
+ // - condition is the expression to test to determine whether to continue folding.
+ // - step is the expression to evaluation at the conclusion of a single fold iteration.
+ // - result is the computation to evaluate at the conclusion of the fold.
+ //
+ // The accuVar should not shadow variable names that you would like to reference within the
+ // environment in the step and condition expressions. Presently, the name __result__ is commonly
+ // used by built-in macros but this may change in the future.
+ NewComprehensionTwoVar(iterRange ast.Expr,
+ iterVar,
+ iterVar2,
+ accuVar string,
+ accuInit,
+ condition,
+ step,
result ast.Expr) ast.Expr
// NewIdent creates an identifier Expr value.
@@ -382,13 +408,11 @@ func makeQuantifier(kind quantifierKind, eh ExprHelper, target ast.Expr, args []
step = eh.NewCall(operators.LogicalOr, eh.NewAccuIdent(), args[1])
result = eh.NewAccuIdent()
case quantifierExistsOne:
- zeroExpr := eh.NewLiteral(types.Int(0))
- oneExpr := eh.NewLiteral(types.Int(1))
- init = zeroExpr
+ init = eh.NewLiteral(types.Int(0))
condition = eh.NewLiteral(types.True)
step = eh.NewCall(operators.Conditional, args[1],
- eh.NewCall(operators.Add, eh.NewAccuIdent(), oneExpr), eh.NewAccuIdent())
- result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), oneExpr)
+ eh.NewCall(operators.Add, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1))), eh.NewAccuIdent())
+ result = eh.NewCall(operators.Equals, eh.NewAccuIdent(), eh.NewLiteral(types.Int(1)))
default:
return nil, eh.NewError(args[0].ID(), fmt.Sprintf("unrecognized quantifier '%v'", kind))
}
diff --git a/tools/vendor/github.com/google/cel-go/parser/parser.go b/tools/vendor/github.com/google/cel-go/parser/parser.go
index cb753df73..5cbb17672 100644
--- a/tools/vendor/github.com/google/cel-go/parser/parser.go
+++ b/tools/vendor/github.com/google/cel-go/parser/parser.go
@@ -856,7 +856,8 @@ func (p *parser) reportError(ctx any, format string, args ...any) ast.Expr {
// ANTLR Parse listener implementations
func (p *parser) SyntaxError(recognizer antlr.Recognizer, offendingSymbol any, line, column int, msg string, e antlr.RecognitionException) {
- l := p.helper.source.NewLocation(line, column)
+ offset := p.helper.sourceInfo.ComputeOffset(int32(line), int32(column))
+ l := p.helper.getLocationByOffset(offset)
// Hack to keep existing error messages consistent with previous versions of CEL when a reserved word
// is used as an identifier. This behavior needs to be overhauled to provide consistent, normalized error
// messages out of ANTLR to prevent future breaking changes related to error message content.
@@ -916,10 +917,12 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg
expr, err := macro.Expander()(eh, target, args)
// An error indicates that the macro was matched, but the arguments were not well-formed.
if err != nil {
- if err.Location != nil {
- return p.reportError(err.Location, err.Message), true
+ loc := err.Location
+ if loc == nil {
+ loc = p.helper.getLocation(exprID)
}
- return p.reportError(p.helper.getLocation(exprID), err.Message), true
+ p.helper.deleteID(exprID)
+ return p.reportError(loc, err.Message), true
}
// A nil value from the macro indicates that the macro implementation decided that
// an expansion should not be performed.
@@ -929,6 +932,7 @@ func (p *parser) expandMacro(exprID int64, function string, target ast.Expr, arg
if p.populateMacroCalls {
p.helper.addMacroCall(expr.ID(), function, target, args...)
}
+ p.helper.deleteID(exprID)
return expr, true
}
diff --git a/tools/vendor/github.com/moby/spdystream/connection.go b/tools/vendor/github.com/moby/spdystream/connection.go
index d649eccc8..1394d0ad4 100644
--- a/tools/vendor/github.com/moby/spdystream/connection.go
+++ b/tools/vendor/github.com/moby/spdystream/connection.go
@@ -712,7 +712,9 @@ func (s *Connection) shutdown(closeTimeout time.Duration) {
var timeout <-chan time.Time
if closeTimeout > time.Duration(0) {
- timeout = time.After(closeTimeout)
+ timer := time.NewTimer(closeTimeout)
+ defer timer.Stop()
+ timeout = timer.C
}
streamsClosed := make(chan bool)
@@ -739,7 +741,15 @@ func (s *Connection) shutdown(closeTimeout time.Duration) {
}
if err != nil {
- duration := 10 * time.Minute
+ // default to 1 second
+ duration := time.Second
+ // if a closeTimeout was given, use that, clipped to 1s-10m
+ if closeTimeout > time.Second {
+ duration = closeTimeout
+ }
+ if duration > 10*time.Minute {
+ duration = 10 * time.Minute
+ }
timer := time.NewTimer(duration)
defer timer.Stop()
select {
@@ -806,7 +816,9 @@ func (s *Connection) CloseWait() error {
func (s *Connection) Wait(waitTimeout time.Duration) error {
var timeout <-chan time.Time
if waitTimeout > time.Duration(0) {
- timeout = time.After(waitTimeout)
+ timer := time.NewTimer(waitTimeout)
+ defer timer.Stop()
+ timeout = timer.C
}
select {
diff --git a/tools/vendor/github.com/onsi/gomega/types/types.go b/tools/vendor/github.com/onsi/gomega/types/types.go
index 7c7adb941..30f2beed3 100644
--- a/tools/vendor/github.com/onsi/gomega/types/types.go
+++ b/tools/vendor/github.com/onsi/gomega/types/types.go
@@ -29,6 +29,8 @@ type Gomega interface {
SetDefaultEventuallyPollingInterval(time.Duration)
SetDefaultConsistentlyDuration(time.Duration)
SetDefaultConsistentlyPollingInterval(time.Duration)
+ EnforceDefaultTimeoutsWhenUsingContexts()
+ DisableDefaultTimeoutsWhenUsingContext()
}
// All Gomega matchers must implement the GomegaMatcher interface
diff --git a/tools/vendor/go.etcd.io/bbolt/.go-version b/tools/vendor/go.etcd.io/bbolt/.go-version
index f124bfa15..013173af5 100644
--- a/tools/vendor/go.etcd.io/bbolt/.go-version
+++ b/tools/vendor/go.etcd.io/bbolt/.go-version
@@ -1 +1 @@
-1.21.9
+1.22.6
diff --git a/tools/vendor/go.etcd.io/bbolt/Makefile b/tools/vendor/go.etcd.io/bbolt/Makefile
index 18154c638..214077974 100644
--- a/tools/vendor/go.etcd.io/bbolt/Makefile
+++ b/tools/vendor/go.etcd.io/bbolt/Makefile
@@ -41,6 +41,15 @@ coverage:
TEST_FREELIST_TYPE=array go test -v -timeout 30m \
-coverprofile cover-freelist-array.out -covermode atomic
+BOLT_CMD=bbolt
+
+build:
+ go build -o bin/${BOLT_CMD} ./cmd/${BOLT_CMD}
+
+.PHONY: clean
+clean: # Clean binaries
+ rm -f ./bin/${BOLT_CMD}
+
.PHONY: gofail-enable
gofail-enable: install-gofail
gofail enable .
@@ -61,3 +70,7 @@ test-failpoint:
@echo "[failpoint] array freelist test"
TEST_FREELIST_TYPE=array go test -v ${TESTFLAGS} -timeout 30m ./tests/failpoint
+.PHONY: test-robustness # Running robustness tests requires root permission
+test-robustness:
+ go test -v ${TESTFLAGS} ./tests/dmflakey -test.root
+ go test -v ${TESTFLAGS} ./tests/robustness -test.root
diff --git a/tools/vendor/go.etcd.io/bbolt/db.go b/tools/vendor/go.etcd.io/bbolt/db.go
index 4175bdf3d..822798e41 100644
--- a/tools/vendor/go.etcd.io/bbolt/db.go
+++ b/tools/vendor/go.etcd.io/bbolt/db.go
@@ -524,7 +524,7 @@ func (db *DB) munmap() error {
// gofail: var unmapError string
// return errors.New(unmapError)
if err := munmap(db); err != nil {
- return fmt.Errorf("unmap error: " + err.Error())
+ return fmt.Errorf("unmap error: %v", err.Error())
}
return nil
@@ -571,7 +571,7 @@ func (db *DB) munlock(fileSize int) error {
// gofail: var munlockError string
// return errors.New(munlockError)
if err := munlock(db, fileSize); err != nil {
- return fmt.Errorf("munlock error: " + err.Error())
+ return fmt.Errorf("munlock error: %v", err.Error())
}
return nil
}
@@ -580,7 +580,7 @@ func (db *DB) mlock(fileSize int) error {
// gofail: var mlockError string
// return errors.New(mlockError)
if err := mlock(db, fileSize); err != nil {
- return fmt.Errorf("mlock error: " + err.Error())
+ return fmt.Errorf("mlock error: %v", err.Error())
}
return nil
}
@@ -1159,6 +1159,8 @@ func (db *DB) grow(sz int) error {
// https://github.com/boltdb/bolt/issues/284
if !db.NoGrowSync && !db.readOnly {
if runtime.GOOS != "windows" {
+ // gofail: var resizeFileError string
+ // return errors.New(resizeFileError)
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("file resize error: %s", err)
}
diff --git a/tools/vendor/go.etcd.io/bbolt/freelist.go b/tools/vendor/go.etcd.io/bbolt/freelist.go
index 61d43f81b..dffc7bc74 100644
--- a/tools/vendor/go.etcd.io/bbolt/freelist.go
+++ b/tools/vendor/go.etcd.io/bbolt/freelist.go
@@ -252,6 +252,14 @@ func (f *freelist) rollback(txid txid) {
}
// Remove pages from pending list and mark as free if allocated by txid.
delete(f.pending, txid)
+
+ // Remove pgids which are allocated by this txid
+ for pgid, tid := range f.allocs {
+ if tid == txid {
+ delete(f.allocs, pgid)
+ }
+ }
+
f.mergeSpans(m)
}
diff --git a/tools/vendor/go.etcd.io/bbolt/tx.go b/tools/vendor/go.etcd.io/bbolt/tx.go
index 2fac8c0a7..766395de3 100644
--- a/tools/vendor/go.etcd.io/bbolt/tx.go
+++ b/tools/vendor/go.etcd.io/bbolt/tx.go
@@ -1,6 +1,7 @@
package bbolt
import (
+ "errors"
"fmt"
"io"
"os"
@@ -185,6 +186,10 @@ func (tx *Tx) Commit() error {
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
+ _ = errors.New("")
+ // gofail: var lackOfDiskSpace string
+ // tx.rollback()
+ // return errors.New(lackOfDiskSpace)
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
@@ -470,6 +475,7 @@ func (tx *Tx) write() error {
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || IgnoreNoSync {
+ // gofail: var beforeSyncDataPages struct{}
if err := fdatasync(tx.db); err != nil {
return err
}
@@ -507,6 +513,7 @@ func (tx *Tx) writeMeta() error {
return err
}
if !tx.db.NoSync || IgnoreNoSync {
+ // gofail: var beforeSyncMetaPage struct{}
if err := fdatasync(tx.db); err != nil {
return err
}
diff --git a/tools/vendor/go.uber.org/zap/.golangci.yml b/tools/vendor/go.uber.org/zap/.golangci.yml
index fbc6df790..2346df135 100644
--- a/tools/vendor/go.uber.org/zap/.golangci.yml
+++ b/tools/vendor/go.uber.org/zap/.golangci.yml
@@ -17,7 +17,7 @@ linters:
- unused
# Our own extras:
- - gofmt
+ - gofumpt
- nolintlint # lints nolint directives
- revive
diff --git a/tools/vendor/go.uber.org/zap/.readme.tmpl b/tools/vendor/go.uber.org/zap/.readme.tmpl
index 92aa65d66..4fea3027a 100644
--- a/tools/vendor/go.uber.org/zap/.readme.tmpl
+++ b/tools/vendor/go.uber.org/zap/.readme.tmpl
@@ -1,7 +1,15 @@
# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
Blazing fast, structured, leveled logging in Go.
+![Zap logo](assets/logo.png)
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+
## Installation
`go get -u go.uber.org/zap`
@@ -92,7 +100,7 @@ standard.
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
diff --git a/tools/vendor/go.uber.org/zap/CHANGELOG.md b/tools/vendor/go.uber.org/zap/CHANGELOG.md
index 11b465976..6d6cd5f4d 100644
--- a/tools/vendor/go.uber.org/zap/CHANGELOG.md
+++ b/tools/vendor/go.uber.org/zap/CHANGELOG.md
@@ -3,14 +3,30 @@ All notable changes to this project will be documented in this file.
This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+## 1.27.0 (20 Feb 2024)
+Enhancements:
+* [#1378][]: Add `WithLazy` method for `SugaredLogger`.
+* [#1399][]: zaptest: Add `NewTestingWriter` for customizing TestingWriter with more flexibility than `NewLogger`.
+* [#1406][]: Add `Log`, `Logw`, `Logln` methods for `SugaredLogger`.
+* [#1416][]: Add `WithPanicHook` option for testing panic logs.
+
+Thanks to @defval, @dimmo, @arxeiss, and @MKrupauskas for their contributions to this release.
+
+[#1378]: https://github.com/uber-go/zap/pull/1378
+[#1399]: https://github.com/uber-go/zap/pull/1399
+[#1406]: https://github.com/uber-go/zap/pull/1406
+[#1416]: https://github.com/uber-go/zap/pull/1416
+
## 1.26.0 (14 Sep 2023)
Enhancements:
+* [#1297][]: Add Dict as a Field.
* [#1319][]: Add `WithLazy` method to `Logger` which lazily evaluates the structured
context.
* [#1350][]: String encoding is much (~50%) faster now.
-Thanks to @jquirke, @cdvr1993 for their contributions to this release.
+Thanks to @hhk7734, @jquirke, and @cdvr1993 for their contributions to this release.
+[#1297]: https://github.com/uber-go/zap/pull/1297
[#1319]: https://github.com/uber-go/zap/pull/1319
[#1350]: https://github.com/uber-go/zap/pull/1350
@@ -25,7 +41,7 @@ Enhancements:
* [#1273][]: Add `Name` to `Logger` which returns the Logger's name if one is set.
* [#1281][]: Add `zap/exp/expfield` package which contains helper methods
`Str` and `Strs` for constructing String-like zap.Fields.
-* [#1310][]: Reduce stack size on `Any`.
+* [#1310][]: Reduce stack size on `Any`.
Thanks to @knight42, @dzakaammar, @bcspragu, and @rexywork for their contributions
to this release.
@@ -352,7 +368,7 @@ to this release.
[#675]: https://github.com/uber-go/zap/pull/675
[#704]: https://github.com/uber-go/zap/pull/704
-## v1.9.1 (06 Aug 2018)
+## 1.9.1 (06 Aug 2018)
Bugfixes:
@@ -360,7 +376,7 @@ Bugfixes:
[#614]: https://github.com/uber-go/zap/pull/614
-## v1.9.0 (19 Jul 2018)
+## 1.9.0 (19 Jul 2018)
Enhancements:
* [#602][]: Reduce number of allocations when logging with reflection.
@@ -373,7 +389,7 @@ Thanks to @nfarah86, @AlekSi, @JeanMertz, @philippgille, @etsangsplk, and
[#572]: https://github.com/uber-go/zap/pull/572
[#606]: https://github.com/uber-go/zap/pull/606
-## v1.8.0 (13 Apr 2018)
+## 1.8.0 (13 Apr 2018)
Enhancements:
* [#508][]: Make log level configurable when redirecting the standard
@@ -391,14 +407,14 @@ Thanks to @DiSiqueira and @djui for their contributions to this release.
[#577]: https://github.com/uber-go/zap/pull/577
[#574]: https://github.com/uber-go/zap/pull/574
-## v1.7.1 (25 Sep 2017)
+## 1.7.1 (25 Sep 2017)
Bugfixes:
* [#504][]: Store strings when using AddByteString with the map encoder.
[#504]: https://github.com/uber-go/zap/pull/504
-## v1.7.0 (21 Sep 2017)
+## 1.7.0 (21 Sep 2017)
Enhancements:
@@ -407,7 +423,7 @@ Enhancements:
[#487]: https://github.com/uber-go/zap/pull/487
-## v1.6.0 (30 Aug 2017)
+## 1.6.0 (30 Aug 2017)
Enhancements:
@@ -418,7 +434,7 @@ Enhancements:
[#490]: https://github.com/uber-go/zap/pull/490
[#491]: https://github.com/uber-go/zap/pull/491
-## v1.5.0 (22 Jul 2017)
+## 1.5.0 (22 Jul 2017)
Enhancements:
@@ -436,7 +452,7 @@ Thanks to @richard-tunein and @pavius for their contributions to this release.
[#460]: https://github.com/uber-go/zap/pull/460
[#470]: https://github.com/uber-go/zap/pull/470
-## v1.4.1 (08 Jun 2017)
+## 1.4.1 (08 Jun 2017)
This release fixes two bugs.
@@ -448,7 +464,7 @@ Bugfixes:
[#435]: https://github.com/uber-go/zap/pull/435
[#444]: https://github.com/uber-go/zap/pull/444
-## v1.4.0 (12 May 2017)
+## 1.4.0 (12 May 2017)
This release adds a few small features and is fully backward-compatible.
@@ -464,7 +480,7 @@ Enhancements:
[#425]: https://github.com/uber-go/zap/pull/425
[#431]: https://github.com/uber-go/zap/pull/431
-## v1.3.0 (25 Apr 2017)
+## 1.3.0 (25 Apr 2017)
This release adds an enhancement to zap's testing helpers as well as the
ability to marshal an AtomicLevel. It is fully backward-compatible.
@@ -478,7 +494,7 @@ Enhancements:
[#415]: https://github.com/uber-go/zap/pull/415
[#416]: https://github.com/uber-go/zap/pull/416
-## v1.2.0 (13 Apr 2017)
+## 1.2.0 (13 Apr 2017)
This release adds a gRPC compatibility wrapper. It is fully backward-compatible.
@@ -489,7 +505,7 @@ Enhancements:
[#402]: https://github.com/uber-go/zap/pull/402
-## v1.1.0 (31 Mar 2017)
+## 1.1.0 (31 Mar 2017)
This release fixes two bugs and adds some enhancements to zap's testing helpers.
It is fully backward-compatible.
@@ -510,7 +526,7 @@ Thanks to @moitias for contributing to this release.
[#396]: https://github.com/uber-go/zap/pull/396
[#386]: https://github.com/uber-go/zap/pull/386
-## v1.0.0 (14 Mar 2017)
+## 1.0.0 (14 Mar 2017)
This is zap's first stable release. All exported APIs are now final, and no
further breaking changes will be made in the 1.x release series. Anyone using a
@@ -569,7 +585,7 @@ contributions to this release.
[#365]: https://github.com/uber-go/zap/pull/365
[#372]: https://github.com/uber-go/zap/pull/372
-## v1.0.0-rc.3 (7 Mar 2017)
+## 1.0.0-rc.3 (7 Mar 2017)
This is the third release candidate for zap's stable release. There are no
breaking changes.
@@ -595,7 +611,7 @@ Thanks to @ansel1 and @suyash for their contributions to this release.
[#353]: https://github.com/uber-go/zap/pull/353
[#311]: https://github.com/uber-go/zap/pull/311
-## v1.0.0-rc.2 (21 Feb 2017)
+## 1.0.0-rc.2 (21 Feb 2017)
This is the second release candidate for zap's stable release. It includes two
breaking changes.
@@ -641,7 +657,7 @@ Thanks to @skipor and @chapsuk for their contributions to this release.
[#326]: https://github.com/uber-go/zap/pull/326
[#300]: https://github.com/uber-go/zap/pull/300
-## v1.0.0-rc.1 (14 Feb 2017)
+## 1.0.0-rc.1 (14 Feb 2017)
This is the first release candidate for zap's stable release. There are multiple
breaking changes and improvements from the pre-release version. Most notably:
@@ -661,7 +677,7 @@ breaking changes and improvements from the pre-release version. Most notably:
* Sampling is more accurate, and doesn't depend on the standard library's shared
timer heap.
-## v0.1.0-beta.1 (6 Feb 2017)
+## 0.1.0-beta.1 (6 Feb 2017)
This is a minor version, tagged to allow users to pin to the pre-1.0 APIs and
upgrade at their leisure. Since this is the first tagged release, there are no
diff --git a/tools/vendor/go.uber.org/zap/LICENSE.txt b/tools/vendor/go.uber.org/zap/LICENSE
similarity index 100%
rename from tools/vendor/go.uber.org/zap/LICENSE.txt
rename to tools/vendor/go.uber.org/zap/LICENSE
diff --git a/tools/vendor/go.uber.org/zap/README.md b/tools/vendor/go.uber.org/zap/README.md
index 9de08927b..a17035cb6 100644
--- a/tools/vendor/go.uber.org/zap/README.md
+++ b/tools/vendor/go.uber.org/zap/README.md
@@ -1,7 +1,16 @@
-# :zap: zap [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+# :zap: zap
+
+
+
Blazing fast, structured, leveled logging in Go.
+![Zap logo](assets/logo.png)
+
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
+
+
+
## Installation
`go get -u go.uber.org/zap`
@@ -66,41 +75,44 @@ Log a message and 10 fields:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 1744 ns/op | +0% | 5 allocs/op
-| :zap: zap (sugared) | 2483 ns/op | +42% | 10 allocs/op
-| zerolog | 918 ns/op | -47% | 1 allocs/op
-| go-kit | 5590 ns/op | +221% | 57 allocs/op
-| slog | 5640 ns/op | +223% | 40 allocs/op
-| apex/log | 21184 ns/op | +1115% | 63 allocs/op
-| logrus | 24338 ns/op | +1296% | 79 allocs/op
-| log15 | 26054 ns/op | +1394% | 74 allocs/op
+| :zap: zap | 656 ns/op | +0% | 5 allocs/op
+| :zap: zap (sugared) | 935 ns/op | +43% | 10 allocs/op
+| zerolog | 380 ns/op | -42% | 1 allocs/op
+| go-kit | 2249 ns/op | +243% | 57 allocs/op
+| slog (LogAttrs) | 2479 ns/op | +278% | 40 allocs/op
+| slog | 2481 ns/op | +278% | 42 allocs/op
+| apex/log | 9591 ns/op | +1362% | 63 allocs/op
+| log15 | 11393 ns/op | +1637% | 75 allocs/op
+| logrus | 11654 ns/op | +1677% | 79 allocs/op
Log a message with a logger that already has 10 fields of context:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 193 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 227 ns/op | +18% | 1 allocs/op
-| zerolog | 81 ns/op | -58% | 0 allocs/op
-| slog | 322 ns/op | +67% | 0 allocs/op
-| go-kit | 5377 ns/op | +2686% | 56 allocs/op
-| apex/log | 19518 ns/op | +10013% | 53 allocs/op
-| log15 | 19812 ns/op | +10165% | 70 allocs/op
-| logrus | 21997 ns/op | +11297% | 68 allocs/op
+| :zap: zap | 67 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 84 ns/op | +25% | 1 allocs/op
+| zerolog | 35 ns/op | -48% | 0 allocs/op
+| slog | 193 ns/op | +188% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +199% | 0 allocs/op
+| go-kit | 2460 ns/op | +3572% | 56 allocs/op
+| log15 | 9038 ns/op | +13390% | 70 allocs/op
+| apex/log | 9068 ns/op | +13434% | 53 allocs/op
+| logrus | 10521 ns/op | +15603% | 68 allocs/op
Log a static string, without any context or `printf`-style templating:
| Package | Time | Time % to zap | Objects Allocated |
| :------ | :--: | :-----------: | :---------------: |
-| :zap: zap | 165 ns/op | +0% | 0 allocs/op
-| :zap: zap (sugared) | 212 ns/op | +28% | 1 allocs/op
-| zerolog | 95 ns/op | -42% | 0 allocs/op
-| slog | 296 ns/op | +79% | 0 allocs/op
-| go-kit | 415 ns/op | +152% | 9 allocs/op
-| standard library | 422 ns/op | +156% | 2 allocs/op
-| apex/log | 1601 ns/op | +870% | 5 allocs/op
-| logrus | 3017 ns/op | +1728% | 23 allocs/op
-| log15 | 3469 ns/op | +2002% | 20 allocs/op
+| :zap: zap | 63 ns/op | +0% | 0 allocs/op
+| :zap: zap (sugared) | 81 ns/op | +29% | 1 allocs/op
+| zerolog | 32 ns/op | -49% | 0 allocs/op
+| standard library | 124 ns/op | +97% | 1 allocs/op
+| slog | 196 ns/op | +211% | 0 allocs/op
+| slog (LogAttrs) | 200 ns/op | +217% | 0 allocs/op
+| go-kit | 213 ns/op | +238% | 9 allocs/op
+| apex/log | 771 ns/op | +1124% | 5 allocs/op
+| logrus | 1439 ns/op | +2184% | 23 allocs/op
+| log15 | 2069 ns/op | +3184% | 20 allocs/op
## Development Status: Stable
@@ -120,7 +132,7 @@ standard.
-Released under the [MIT License](LICENSE.txt).
+Released under the [MIT License](LICENSE).
In particular, keep in mind that we may be
benchmarking against slightly older versions of other packages. Versions are
diff --git a/tools/vendor/go.uber.org/zap/buffer/buffer.go b/tools/vendor/go.uber.org/zap/buffer/buffer.go
index 27fb5cd5d..0b8540c21 100644
--- a/tools/vendor/go.uber.org/zap/buffer/buffer.go
+++ b/tools/vendor/go.uber.org/zap/buffer/buffer.go
@@ -42,7 +42,7 @@ func (b *Buffer) AppendByte(v byte) {
b.bs = append(b.bs, v)
}
-// AppendBytes writes a single byte to the Buffer.
+// AppendBytes writes the given slice of bytes to the Buffer.
func (b *Buffer) AppendBytes(v []byte) {
b.bs = append(b.bs, v...)
}
diff --git a/tools/vendor/go.uber.org/zap/field.go b/tools/vendor/go.uber.org/zap/field.go
index c8dd3358a..6743930b8 100644
--- a/tools/vendor/go.uber.org/zap/field.go
+++ b/tools/vendor/go.uber.org/zap/field.go
@@ -460,6 +460,8 @@ func (d dictObject) MarshalLogObject(enc zapcore.ObjectEncoder) error {
// - https://github.com/uber-go/zap/pull/1304
// - https://github.com/uber-go/zap/pull/1305
// - https://github.com/uber-go/zap/pull/1308
+//
+// See https://github.com/golang/go/issues/62077 for upstream issue.
type anyFieldC[T any] func(string, T) Field
func (f anyFieldC[T]) Any(key string, val any) Field {
diff --git a/tools/vendor/go.uber.org/zap/logger.go b/tools/vendor/go.uber.org/zap/logger.go
index 6205fe48a..c4d300323 100644
--- a/tools/vendor/go.uber.org/zap/logger.go
+++ b/tools/vendor/go.uber.org/zap/logger.go
@@ -43,6 +43,7 @@ type Logger struct {
development bool
addCaller bool
+ onPanic zapcore.CheckWriteHook // default is WriteThenPanic
onFatal zapcore.CheckWriteHook // default is WriteThenFatal
name string
@@ -345,27 +346,12 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
// Set up any required terminal behavior.
switch ent.Level {
case zapcore.PanicLevel:
- ce = ce.After(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
case zapcore.FatalLevel:
- onFatal := log.onFatal
- // nil or WriteThenNoop will lead to continued execution after
- // a Fatal log entry, which is unexpected. For example,
- //
- // f, err := os.Open(..)
- // if err != nil {
- // log.Fatal("cannot open", zap.Error(err))
- // }
- // fmt.Println(f.Name())
- //
- // The f.Name() will panic if we continue execution after the
- // log.Fatal.
- if onFatal == nil || onFatal == zapcore.WriteThenNoop {
- onFatal = zapcore.WriteThenFatal
- }
- ce = ce.After(ent, onFatal)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenFatal, log.onFatal))
case zapcore.DPanicLevel:
if log.development {
- ce = ce.After(ent, zapcore.WriteThenPanic)
+ ce = ce.After(ent, terminalHookOverride(zapcore.WriteThenPanic, log.onPanic))
}
}
@@ -430,3 +416,20 @@ func (log *Logger) check(lvl zapcore.Level, msg string) *zapcore.CheckedEntry {
return ce
}
+
+func terminalHookOverride(defaultHook, override zapcore.CheckWriteHook) zapcore.CheckWriteHook {
+ // A nil or WriteThenNoop hook will lead to continued execution after
+ // a Panic or Fatal log entry, which is unexpected. For example,
+ //
+ // f, err := os.Open(..)
+ // if err != nil {
+ // log.Fatal("cannot open", zap.Error(err))
+ // }
+ // fmt.Println(f.Name())
+ //
+ // The f.Name() will panic if we continue execution after the log.Fatal.
+ if override == nil || override == zapcore.WriteThenNoop {
+ return defaultHook
+ }
+ return override
+}
diff --git a/tools/vendor/go.uber.org/zap/options.go b/tools/vendor/go.uber.org/zap/options.go
index c4f3bca3d..43d357ac9 100644
--- a/tools/vendor/go.uber.org/zap/options.go
+++ b/tools/vendor/go.uber.org/zap/options.go
@@ -132,6 +132,21 @@ func IncreaseLevel(lvl zapcore.LevelEnabler) Option {
})
}
+// WithPanicHook sets a CheckWriteHook to run on Panic/DPanic logs.
+// Zap will call this hook after writing a log statement with a Panic/DPanic level.
+//
+// For example, the following builds a logger that will exit the current
+// goroutine after writing a Panic/DPanic log message, but it will not start a panic.
+//
+// zap.New(core, zap.WithPanicHook(zapcore.WriteThenGoexit))
+//
+// This is useful for testing Panic/DPanic log output.
+func WithPanicHook(hook zapcore.CheckWriteHook) Option {
+ return optionFunc(func(log *Logger) {
+ log.onPanic = hook
+ })
+}
+
// OnFatal sets the action to take on fatal logs.
//
// Deprecated: Use [WithFatalHook] instead.
diff --git a/tools/vendor/go.uber.org/zap/sugar.go b/tools/vendor/go.uber.org/zap/sugar.go
index 00ac5fe3a..8904cd087 100644
--- a/tools/vendor/go.uber.org/zap/sugar.go
+++ b/tools/vendor/go.uber.org/zap/sugar.go
@@ -115,6 +115,21 @@ func (s *SugaredLogger) With(args ...interface{}) *SugaredLogger {
return &SugaredLogger{base: s.base.With(s.sweetenFields(args)...)}
}
+// WithLazy adds a variadic number of fields to the logging context lazily.
+// The fields are evaluated only if the logger is further chained with [With]
+// or is written to with any of the log level methods.
+// Until that occurs, the logger may retain references to objects inside the fields,
+// and logging will reflect the state of an object at the time of logging,
+// not the time of WithLazy().
+//
+// Similar to [With], fields added to the child don't affect the parent,
+// and vice versa. Also, the keys in key-value pairs should be strings. In development,
+// passing a non-string key panics, while in production it logs an error and skips the pair.
+// Passing an orphaned key has the same behavior.
+func (s *SugaredLogger) WithLazy(args ...interface{}) *SugaredLogger {
+ return &SugaredLogger{base: s.base.WithLazy(s.sweetenFields(args)...)}
+}
+
// Level reports the minimum enabled level for this logger.
//
// For NopLoggers, this is [zapcore.InvalidLevel].
@@ -122,6 +137,12 @@ func (s *SugaredLogger) Level() zapcore.Level {
return zapcore.LevelOf(s.base.core)
}
+// Log logs the provided arguments at provided level.
+// Spaces are added between arguments when neither is a string.
+func (s *SugaredLogger) Log(lvl zapcore.Level, args ...interface{}) {
+ s.log(lvl, "", args, nil)
+}
+
// Debug logs the provided arguments at [DebugLevel].
// Spaces are added between arguments when neither is a string.
func (s *SugaredLogger) Debug(args ...interface{}) {
@@ -165,6 +186,12 @@ func (s *SugaredLogger) Fatal(args ...interface{}) {
s.log(FatalLevel, "", args, nil)
}
+// Logf formats the message according to the format specifier
+// and logs it at provided level.
+func (s *SugaredLogger) Logf(lvl zapcore.Level, template string, args ...interface{}) {
+ s.log(lvl, template, args, nil)
+}
+
// Debugf formats the message according to the format specifier
// and logs it at [DebugLevel].
func (s *SugaredLogger) Debugf(template string, args ...interface{}) {
@@ -208,6 +235,12 @@ func (s *SugaredLogger) Fatalf(template string, args ...interface{}) {
s.log(FatalLevel, template, args, nil)
}
+// Logw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (s *SugaredLogger) Logw(lvl zapcore.Level, msg string, keysAndValues ...interface{}) {
+ s.log(lvl, msg, nil, keysAndValues)
+}
+
// Debugw logs a message with some additional context. The variadic key-value
// pairs are treated as they are in With.
//
@@ -255,6 +288,12 @@ func (s *SugaredLogger) Fatalw(msg string, keysAndValues ...interface{}) {
s.log(FatalLevel, msg, nil, keysAndValues)
}
+// Logln logs a message at provided level.
+// Spaces are always added between arguments.
+func (s *SugaredLogger) Logln(lvl zapcore.Level, args ...interface{}) {
+ s.logln(lvl, args, nil)
+}
+
// Debugln logs a message at [DebugLevel].
// Spaces are always added between arguments.
func (s *SugaredLogger) Debugln(args ...interface{}) {
diff --git a/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go b/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go
index 8ca0bfaf5..cc2b4e07b 100644
--- a/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go
+++ b/tools/vendor/go.uber.org/zap/zapcore/console_encoder.go
@@ -77,7 +77,7 @@ func (c consoleEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
// If this ever becomes a performance bottleneck, we can implement
// ArrayEncoder for our plain-text format.
arr := getSliceEncoder()
- if c.TimeKey != "" && c.EncodeTime != nil {
+ if c.TimeKey != "" && c.EncodeTime != nil && !ent.Time.IsZero() {
c.EncodeTime(ent.Time, arr)
}
if c.LevelKey != "" && c.EncodeLevel != nil {
diff --git a/tools/vendor/go.uber.org/zap/zapcore/encoder.go b/tools/vendor/go.uber.org/zap/zapcore/encoder.go
index 5769ff3e4..044625415 100644
--- a/tools/vendor/go.uber.org/zap/zapcore/encoder.go
+++ b/tools/vendor/go.uber.org/zap/zapcore/encoder.go
@@ -37,6 +37,9 @@ const DefaultLineEnding = "\n"
const OmitKey = ""
// A LevelEncoder serializes a Level to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type LevelEncoder func(Level, PrimitiveArrayEncoder)
// LowercaseLevelEncoder serializes a Level to a lowercase string. For example,
@@ -90,6 +93,9 @@ func (e *LevelEncoder) UnmarshalText(text []byte) error {
}
// A TimeEncoder serializes a time.Time to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type TimeEncoder func(time.Time, PrimitiveArrayEncoder)
// EpochTimeEncoder serializes a time.Time to a floating-point number of seconds
@@ -219,6 +225,9 @@ func (e *TimeEncoder) UnmarshalJSON(data []byte) error {
}
// A DurationEncoder serializes a time.Duration to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type DurationEncoder func(time.Duration, PrimitiveArrayEncoder)
// SecondsDurationEncoder serializes a time.Duration to a floating-point number of seconds elapsed.
@@ -262,6 +271,9 @@ func (e *DurationEncoder) UnmarshalText(text []byte) error {
}
// A CallerEncoder serializes an EntryCaller to a primitive type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type CallerEncoder func(EntryCaller, PrimitiveArrayEncoder)
// FullCallerEncoder serializes a caller in /full/path/to/package/file:line
@@ -292,6 +304,9 @@ func (e *CallerEncoder) UnmarshalText(text []byte) error {
// A NameEncoder serializes a period-separated logger name to a primitive
// type.
+//
+// This function must make exactly one call
+// to a PrimitiveArrayEncoder's Append* method.
type NameEncoder func(string, PrimitiveArrayEncoder)
// FullNameEncoder serializes the logger name as-is.
diff --git a/tools/vendor/go.uber.org/zap/zapcore/field.go b/tools/vendor/go.uber.org/zap/zapcore/field.go
index 95bdb0a12..308c9781e 100644
--- a/tools/vendor/go.uber.org/zap/zapcore/field.go
+++ b/tools/vendor/go.uber.org/zap/zapcore/field.go
@@ -47,7 +47,7 @@ const (
ByteStringType
// Complex128Type indicates that the field carries a complex128.
Complex128Type
- // Complex64Type indicates that the field carries a complex128.
+ // Complex64Type indicates that the field carries a complex64.
Complex64Type
// DurationType indicates that the field carries a time.Duration.
DurationType
diff --git a/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go b/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go
index c8ab86979..9685169b2 100644
--- a/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go
+++ b/tools/vendor/go.uber.org/zap/zapcore/json_encoder.go
@@ -372,7 +372,7 @@ func (enc *jsonEncoder) EncodeEntry(ent Entry, fields []Field) (*buffer.Buffer,
final.AppendString(ent.Level.String())
}
}
- if final.TimeKey != "" {
+ if final.TimeKey != "" && !ent.Time.IsZero() {
final.AddTime(final.TimeKey, ent.Time)
}
if ent.LoggerName != "" && final.NameKey != "" {
diff --git a/tools/vendor/golang.org/x/net/html/doc.go b/tools/vendor/golang.org/x/net/html/doc.go
index 3a7e5ab17..885c4c593 100644
--- a/tools/vendor/golang.org/x/net/html/doc.go
+++ b/tools/vendor/golang.org/x/net/html/doc.go
@@ -78,16 +78,11 @@ example, to process each anchor node in depth-first order:
if err != nil {
// ...
}
- var f func(*html.Node)
- f = func(n *html.Node) {
+ for n := range doc.Descendants() {
if n.Type == html.ElementNode && n.Data == "a" {
// Do something with n...
}
- for c := n.FirstChild; c != nil; c = c.NextSibling {
- f(c)
- }
}
- f(doc)
The relevant specifications include:
https://html.spec.whatwg.org/multipage/syntax.html and
diff --git a/tools/vendor/golang.org/x/net/html/doctype.go b/tools/vendor/golang.org/x/net/html/doctype.go
index c484e5a94..bca3ae9a0 100644
--- a/tools/vendor/golang.org/x/net/html/doctype.go
+++ b/tools/vendor/golang.org/x/net/html/doctype.go
@@ -87,7 +87,7 @@ func parseDoctype(s string) (n *Node, quirks bool) {
}
}
if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" &&
- strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" {
+ strings.EqualFold(lastAttr.Val, "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd") {
quirks = true
}
}
diff --git a/tools/vendor/golang.org/x/net/html/foreign.go b/tools/vendor/golang.org/x/net/html/foreign.go
index 9da9e9dc4..e8515d8e8 100644
--- a/tools/vendor/golang.org/x/net/html/foreign.go
+++ b/tools/vendor/golang.org/x/net/html/foreign.go
@@ -40,8 +40,7 @@ func htmlIntegrationPoint(n *Node) bool {
if n.Data == "annotation-xml" {
for _, a := range n.Attr {
if a.Key == "encoding" {
- val := strings.ToLower(a.Val)
- if val == "text/html" || val == "application/xhtml+xml" {
+ if strings.EqualFold(a.Val, "text/html") || strings.EqualFold(a.Val, "application/xhtml+xml") {
return true
}
}
diff --git a/tools/vendor/golang.org/x/net/html/iter.go b/tools/vendor/golang.org/x/net/html/iter.go
new file mode 100644
index 000000000..54be8fd30
--- /dev/null
+++ b/tools/vendor/golang.org/x/net/html/iter.go
@@ -0,0 +1,56 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.23
+
+package html
+
+import "iter"
+
+// Ancestors returns an iterator over the ancestors of n, starting with n.Parent.
+//
+// Mutating a Node or its parents while iterating may have unexpected results.
+func (n *Node) Ancestors() iter.Seq[*Node] {
+ _ = n.Parent // eager nil check
+
+ return func(yield func(*Node) bool) {
+ for p := n.Parent; p != nil && yield(p); p = p.Parent {
+ }
+ }
+}
+
+// ChildNodes returns an iterator over the immediate children of n,
+// starting with n.FirstChild.
+//
+// Mutating a Node or its children while iterating may have unexpected results.
+func (n *Node) ChildNodes() iter.Seq[*Node] {
+ _ = n.FirstChild // eager nil check
+
+ return func(yield func(*Node) bool) {
+ for c := n.FirstChild; c != nil && yield(c); c = c.NextSibling {
+ }
+ }
+
+}
+
+// Descendants returns an iterator over all nodes recursively beneath
+// n, excluding n itself. Nodes are visited in depth-first preorder.
+//
+// Mutating a Node or its descendants while iterating may have unexpected results.
+func (n *Node) Descendants() iter.Seq[*Node] {
+ _ = n.FirstChild // eager nil check
+
+ return func(yield func(*Node) bool) {
+ n.descendants(yield)
+ }
+}
+
+func (n *Node) descendants(yield func(*Node) bool) bool {
+ for c := range n.ChildNodes() {
+ if !yield(c) || !c.descendants(yield) {
+ return false
+ }
+ }
+ return true
+}
diff --git a/tools/vendor/golang.org/x/net/html/node.go b/tools/vendor/golang.org/x/net/html/node.go
index 1350eef22..77741a195 100644
--- a/tools/vendor/golang.org/x/net/html/node.go
+++ b/tools/vendor/golang.org/x/net/html/node.go
@@ -38,6 +38,10 @@ var scopeMarker = Node{Type: scopeMarkerNode}
// that it looks like "a 1<<24-1 {
return ConnectionError(ErrCodeProtocol)
}
+ case SettingEnableConnectProtocol:
+ if s.Val != 1 && s.Val != 0 {
+ return ConnectionError(ErrCodeProtocol)
+ }
}
return nil
}
@@ -150,21 +158,23 @@ func (s Setting) Valid() error {
type SettingID uint16
const (
- SettingHeaderTableSize SettingID = 0x1
- SettingEnablePush SettingID = 0x2
- SettingMaxConcurrentStreams SettingID = 0x3
- SettingInitialWindowSize SettingID = 0x4
- SettingMaxFrameSize SettingID = 0x5
- SettingMaxHeaderListSize SettingID = 0x6
+ SettingHeaderTableSize SettingID = 0x1
+ SettingEnablePush SettingID = 0x2
+ SettingMaxConcurrentStreams SettingID = 0x3
+ SettingInitialWindowSize SettingID = 0x4
+ SettingMaxFrameSize SettingID = 0x5
+ SettingMaxHeaderListSize SettingID = 0x6
+ SettingEnableConnectProtocol SettingID = 0x8
)
var settingName = map[SettingID]string{
- SettingHeaderTableSize: "HEADER_TABLE_SIZE",
- SettingEnablePush: "ENABLE_PUSH",
- SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
- SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
- SettingMaxFrameSize: "MAX_FRAME_SIZE",
- SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+ SettingHeaderTableSize: "HEADER_TABLE_SIZE",
+ SettingEnablePush: "ENABLE_PUSH",
+ SettingMaxConcurrentStreams: "MAX_CONCURRENT_STREAMS",
+ SettingInitialWindowSize: "INITIAL_WINDOW_SIZE",
+ SettingMaxFrameSize: "MAX_FRAME_SIZE",
+ SettingMaxHeaderListSize: "MAX_HEADER_LIST_SIZE",
+ SettingEnableConnectProtocol: "ENABLE_CONNECT_PROTOCOL",
}
func (s SettingID) String() string {
diff --git a/tools/vendor/golang.org/x/net/http2/server.go b/tools/vendor/golang.org/x/net/http2/server.go
index 617b4a476..b55547aec 100644
--- a/tools/vendor/golang.org/x/net/http2/server.go
+++ b/tools/vendor/golang.org/x/net/http2/server.go
@@ -306,7 +306,7 @@ func ConfigureServer(s *http.Server, conf *Server) error {
if s.TLSNextProto == nil {
s.TLSNextProto = map[string]func(*http.Server, *tls.Conn, http.Handler){}
}
- protoHandler := func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ protoHandler := func(hs *http.Server, c net.Conn, h http.Handler, sawClientPreface bool) {
if testHookOnConn != nil {
testHookOnConn()
}
@@ -323,12 +323,31 @@ func ConfigureServer(s *http.Server, conf *Server) error {
ctx = bc.BaseContext()
}
conf.ServeConn(c, &ServeConnOpts{
- Context: ctx,
- Handler: h,
- BaseConfig: hs,
+ Context: ctx,
+ Handler: h,
+ BaseConfig: hs,
+ SawClientPreface: sawClientPreface,
})
}
- s.TLSNextProto[NextProtoTLS] = protoHandler
+ s.TLSNextProto[NextProtoTLS] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ protoHandler(hs, c, h, false)
+ }
+ // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+ //
+ // A connection passed in this method has already had the HTTP/2 preface read from it.
+ s.TLSNextProto[nextProtoUnencryptedHTTP2] = func(hs *http.Server, c *tls.Conn, h http.Handler) {
+ nc, err := unencryptedNetConnFromTLSConn(c)
+ if err != nil {
+ if lg := hs.ErrorLog; lg != nil {
+ lg.Print(err)
+ } else {
+ log.Print(err)
+ }
+ go c.Close()
+ return
+ }
+ protoHandler(hs, nc, h, true)
+ }
return nil
}
@@ -913,14 +932,18 @@ func (sc *serverConn) serve(conf http2Config) {
sc.vlogf("http2: server connection from %v on %p", sc.conn.RemoteAddr(), sc.hs)
}
+ settings := writeSettings{
+ {SettingMaxFrameSize, conf.MaxReadFrameSize},
+ {SettingMaxConcurrentStreams, sc.advMaxStreams},
+ {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
+ {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
+ {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
+ }
+ if !disableExtendedConnectProtocol {
+ settings = append(settings, Setting{SettingEnableConnectProtocol, 1})
+ }
sc.writeFrame(FrameWriteRequest{
- write: writeSettings{
- {SettingMaxFrameSize, conf.MaxReadFrameSize},
- {SettingMaxConcurrentStreams, sc.advMaxStreams},
- {SettingMaxHeaderListSize, sc.maxHeaderListSize()},
- {SettingHeaderTableSize, conf.MaxDecoderHeaderTableSize},
- {SettingInitialWindowSize, uint32(sc.initialStreamRecvWindowSize)},
- },
+ write: settings,
})
sc.unackedSettings++
@@ -1782,6 +1805,9 @@ func (sc *serverConn) processSetting(s Setting) error {
sc.maxFrameSize = int32(s.Val) // the maximum valid s.Val is < 2^31
case SettingMaxHeaderListSize:
sc.peerMaxHeaderListSize = s.Val
+ case SettingEnableConnectProtocol:
+ // Receipt of this parameter by a server does not
+ // have any impact
default:
// Unknown setting: "An endpoint that receives a SETTINGS
// frame with any unknown or unsupported identifier MUST
@@ -2212,11 +2238,17 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
scheme: f.PseudoValue("scheme"),
authority: f.PseudoValue("authority"),
path: f.PseudoValue("path"),
+ protocol: f.PseudoValue("protocol"),
+ }
+
+ // extended connect is disabled, so we should not see :protocol
+ if disableExtendedConnectProtocol && rp.protocol != "" {
+ return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
isConnect := rp.method == "CONNECT"
if isConnect {
- if rp.path != "" || rp.scheme != "" || rp.authority == "" {
+ if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
@@ -2240,6 +2272,9 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
if rp.authority == "" {
rp.authority = rp.header.Get("Host")
}
+ if rp.protocol != "" {
+ rp.header.Set(":protocol", rp.protocol)
+ }
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
if err != nil {
@@ -2266,6 +2301,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
type requestParam struct {
method string
scheme, authority, path string
+ protocol string
header http.Header
}
@@ -2307,7 +2343,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
var url_ *url.URL
var requestURI string
- if rp.method == "CONNECT" {
+ if rp.method == "CONNECT" && rp.protocol == "" {
url_ = &url.URL{Host: rp.authority}
requestURI = rp.authority // mimic HTTP/1 server behavior
} else {
@@ -2880,6 +2916,11 @@ func (w *responseWriter) SetWriteDeadline(deadline time.Time) error {
return nil
}
+func (w *responseWriter) EnableFullDuplex() error {
+ // We always support full duplex responses, so this is a no-op.
+ return nil
+}
+
func (w *responseWriter) Flush() {
w.FlushError()
}
diff --git a/tools/vendor/golang.org/x/net/http2/transport.go b/tools/vendor/golang.org/x/net/http2/transport.go
index 0c5f64aa8..090d0e1bd 100644
--- a/tools/vendor/golang.org/x/net/http2/transport.go
+++ b/tools/vendor/golang.org/x/net/http2/transport.go
@@ -202,6 +202,20 @@ func (t *Transport) markNewGoroutine() {
}
}
+func (t *Transport) now() time.Time {
+ if t != nil && t.transportTestHooks != nil {
+ return t.transportTestHooks.group.Now()
+ }
+ return time.Now()
+}
+
+func (t *Transport) timeSince(when time.Time) time.Duration {
+ if t != nil && t.transportTestHooks != nil {
+ return t.now().Sub(when)
+ }
+ return time.Since(when)
+}
+
// newTimer creates a new time.Timer, or a synthetic timer in tests.
func (t *Transport) newTimer(d time.Duration) timer {
if t.transportTestHooks != nil {
@@ -281,8 +295,8 @@ func configureTransports(t1 *http.Transport) (*Transport, error) {
if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") {
t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1")
}
- upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper {
- addr := authorityAddr("https", authority)
+ upgradeFn := func(scheme, authority string, c net.Conn) http.RoundTripper {
+ addr := authorityAddr(scheme, authority)
if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil {
go c.Close()
return erringRoundTripper{err}
@@ -293,18 +307,37 @@ func configureTransports(t1 *http.Transport) (*Transport, error) {
// was unknown)
go c.Close()
}
+ if scheme == "http" {
+ return (*unencryptedTransport)(t2)
+ }
return t2
}
- if m := t1.TLSNextProto; len(m) == 0 {
- t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{
- "h2": upgradeFn,
+ if t1.TLSNextProto == nil {
+ t1.TLSNextProto = make(map[string]func(string, *tls.Conn) http.RoundTripper)
+ }
+ t1.TLSNextProto[NextProtoTLS] = func(authority string, c *tls.Conn) http.RoundTripper {
+ return upgradeFn("https", authority, c)
+ }
+ // The "unencrypted_http2" TLSNextProto key is used to pass off non-TLS HTTP/2 conns.
+ t1.TLSNextProto[nextProtoUnencryptedHTTP2] = func(authority string, c *tls.Conn) http.RoundTripper {
+ nc, err := unencryptedNetConnFromTLSConn(c)
+ if err != nil {
+ go c.Close()
+ return erringRoundTripper{err}
}
- } else {
- m["h2"] = upgradeFn
+ return upgradeFn("http", authority, nc)
}
return t2, nil
}
+// unencryptedTransport is a Transport with a RoundTrip method that
+// always permits http:// URLs.
+type unencryptedTransport Transport
+
+func (t *unencryptedTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ return (*Transport)(t).RoundTripOpt(req, RoundTripOpt{allowHTTP: true})
+}
+
func (t *Transport) connPool() ClientConnPool {
t.connPoolOnce.Do(t.initConnPool)
return t.connPoolOrDef
@@ -324,7 +357,7 @@ type ClientConn struct {
t *Transport
tconn net.Conn // usually *tls.Conn, except specialized impls
tlsState *tls.ConnectionState // nil only for specialized impls
- reused uint32 // whether conn is being reused; atomic
+ atomicReused uint32 // whether conn is being reused; atomic
singleUse bool // whether being used for a single http.Request
getConnCalled bool // used by clientConnPool
@@ -335,25 +368,26 @@ type ClientConn struct {
idleTimeout time.Duration // or 0 for never
idleTimer timer
- mu sync.Mutex // guards following
- cond *sync.Cond // hold mu; broadcast on flow/closed changes
- flow outflow // our conn-level flow control quota (cs.outflow is per stream)
- inflow inflow // peer's conn-level flow control
- doNotReuse bool // whether conn is marked to not be reused for any future requests
- closing bool
- closed bool
- seenSettings bool // true if we've seen a settings frame, false otherwise
- wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
- goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
- goAwayDebug string // goAway frame's debug data, retained as a string
- streams map[uint32]*clientStream // client-initiated
- streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
- nextStreamID uint32
- pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
- pings map[[8]byte]chan struct{} // in flight ping data to notification channel
- br *bufio.Reader
- lastActive time.Time
- lastIdle time.Time // time last idle
+ mu sync.Mutex // guards following
+ cond *sync.Cond // hold mu; broadcast on flow/closed changes
+ flow outflow // our conn-level flow control quota (cs.outflow is per stream)
+ inflow inflow // peer's conn-level flow control
+ doNotReuse bool // whether conn is marked to not be reused for any future requests
+ closing bool
+ closed bool
+ seenSettings bool // true if we've seen a settings frame, false otherwise
+ seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
+ wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
+ goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received
+ goAwayDebug string // goAway frame's debug data, retained as a string
+ streams map[uint32]*clientStream // client-initiated
+ streamsReserved int // incr by ReserveNewRequest; decr on RoundTrip
+ nextStreamID uint32
+ pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams
+ pings map[[8]byte]chan struct{} // in flight ping data to notification channel
+ br *bufio.Reader
+ lastActive time.Time
+ lastIdle time.Time // time last idle
// Settings from peer: (also guarded by wmu)
maxFrameSize uint32
maxConcurrentStreams uint32
@@ -363,6 +397,25 @@ type ClientConn struct {
initialStreamRecvWindowSize int32
readIdleTimeout time.Duration
pingTimeout time.Duration
+ extendedConnectAllowed bool
+
+ // rstStreamPingsBlocked works around an unfortunate gRPC behavior.
+ // gRPC strictly limits the number of PING frames that it will receive.
+ // The default is two pings per two hours, but the limit resets every time
+ // the gRPC endpoint sends a HEADERS or DATA frame. See golang/go#70575.
+ //
+ // rstStreamPingsBlocked is set after receiving a response to a PING frame
+ // bundled with an RST_STREAM (see pendingResets below), and cleared after
+ // receiving a HEADERS or DATA frame.
+ rstStreamPingsBlocked bool
+
+ // pendingResets is the number of RST_STREAM frames we have sent to the peer,
+ // without confirming that the peer has received them. When we send a RST_STREAM,
+ // we bundle it with a PING frame, unless a PING is already in flight. We count
+ // the reset stream against the connection's concurrency limit until we get
+ // a PING response. This limits the number of requests we'll try to send to a
+ // completely unresponsive connection.
+ pendingResets int
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
@@ -420,12 +473,12 @@ type clientStream struct {
sentHeaders bool
// owned by clientConnReadLoop:
- firstByte bool // got the first response byte
- pastHeaders bool // got first MetaHeadersFrame (actual headers)
- pastTrailers bool // got optional second MetaHeadersFrame (trailers)
- num1xx uint8 // number of 1xx responses seen
- readClosed bool // peer sent an END_STREAM flag
- readAborted bool // read loop reset the stream
+ firstByte bool // got the first response byte
+ pastHeaders bool // got first MetaHeadersFrame (actual headers)
+ pastTrailers bool // got optional second MetaHeadersFrame (trailers)
+ readClosed bool // peer sent an END_STREAM flag
+ readAborted bool // read loop reset the stream
+ totalHeaderSize int64 // total size of 1xx headers seen
trailer http.Header // accumulated trailers
resTrailer *http.Header // client's Response.Trailer
@@ -530,6 +583,8 @@ type RoundTripOpt struct {
// no cached connection is available, RoundTripOpt
// will return ErrNoCachedConn.
OnlyCachedConn bool
+
+ allowHTTP bool // allow http:// URLs
}
func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -562,7 +617,14 @@ func authorityAddr(scheme string, authority string) (addr string) {
// RoundTripOpt is like RoundTrip, but takes options.
func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Response, error) {
- if !(req.URL.Scheme == "https" || (req.URL.Scheme == "http" && t.AllowHTTP)) {
+ switch req.URL.Scheme {
+ case "https":
+ // Always okay.
+ case "http":
+ if !t.AllowHTTP && !opt.allowHTTP {
+ return nil, errors.New("http2: unencrypted HTTP/2 not enabled")
+ }
+ default:
return nil, errors.New("http2: unsupported scheme")
}
@@ -573,7 +635,7 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err)
return nil, err
}
- reused := !atomic.CompareAndSwapUint32(&cc.reused, 0, 1)
+ reused := !atomic.CompareAndSwapUint32(&cc.atomicReused, 0, 1)
traceGotConn(req, cc, reused)
res, err := cc.RoundTrip(req)
if err != nil && retry <= 6 {
@@ -598,6 +660,22 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res
}
}
}
+ if err == errClientConnNotEstablished {
+ // This ClientConn was created recently,
+ // this is the first request to use it,
+ // and the connection is closed and not usable.
+ //
+ // In this state, cc.idleTimer will remove the conn from the pool
+ // when it fires. Stop the timer and remove it here so future requests
+ // won't try to use this connection.
+ //
+ // If the timer has already fired and we're racing it, the redundant
+ // call to MarkDead is harmless.
+ if cc.idleTimer != nil {
+ cc.idleTimer.Stop()
+ }
+ t.connPool().MarkDead(cc)
+ }
if err != nil {
t.vlogf("RoundTrip failure: %v", err)
return nil, err
@@ -616,9 +694,10 @@ func (t *Transport) CloseIdleConnections() {
}
var (
- errClientConnClosed = errors.New("http2: client conn is closed")
- errClientConnUnusable = errors.New("http2: client conn not usable")
- errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
+ errClientConnClosed = errors.New("http2: client conn is closed")
+ errClientConnUnusable = errors.New("http2: client conn not usable")
+ errClientConnNotEstablished = errors.New("http2: client conn could not be established")
+ errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY")
)
// shouldRetryRequest is called by RoundTrip when a request fails to get
@@ -752,11 +831,13 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead.
streams: make(map[uint32]*clientStream),
singleUse: singleUse,
+ seenSettingsChan: make(chan struct{}),
wantSettingsAck: true,
readIdleTimeout: conf.SendPingTimeout,
pingTimeout: conf.PingTimeout,
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
+ lastActive: t.now(),
}
var group synctestGroupInterface
if t.transportTestHooks != nil {
@@ -960,7 +1041,7 @@ func (cc *ClientConn) State() ClientConnState {
return ClientConnState{
Closed: cc.closed,
Closing: cc.closing || cc.singleUse || cc.doNotReuse || cc.goAway != nil,
- StreamsActive: len(cc.streams),
+ StreamsActive: len(cc.streams) + cc.pendingResets,
StreamsReserved: cc.streamsReserved,
StreamsPending: cc.pendingRequests,
LastIdle: cc.lastIdle,
@@ -992,16 +1073,38 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
// writing it.
maxConcurrentOkay = true
} else {
- maxConcurrentOkay = int64(len(cc.streams)+cc.streamsReserved+1) <= int64(cc.maxConcurrentStreams)
+ // We can take a new request if the total of
+ // - active streams;
+ // - reservation slots for new streams; and
+ // - streams for which we have sent a RST_STREAM and a PING,
+ // but received no subsequent frame
+ // is less than the concurrency limit.
+ maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams)
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
!cc.doNotReuse &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
!cc.tooIdleLocked()
+
+ // If this connection has never been used for a request and is closed,
+ // then let it take a request (which will fail).
+ //
+ // This avoids a situation where an error early in a connection's lifetime
+ // goes unreported.
+ if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
+ st.canTakeNewRequest = true
+ }
+
return
}
+// currentRequestCountLocked reports the number of concurrency slots currently in use,
+// including active streams, reserved slots, and reset streams waiting for acknowledgement.
+func (cc *ClientConn) currentRequestCountLocked() int {
+ return len(cc.streams) + cc.streamsReserved + cc.pendingResets
+}
+
func (cc *ClientConn) canTakeNewRequestLocked() bool {
st := cc.idleStateLocked()
return st.canTakeNewRequest
@@ -1014,7 +1117,7 @@ func (cc *ClientConn) tooIdleLocked() bool {
// times are compared based on their wall time. We don't want
// to reuse a connection that's been sitting idle during
// VM/laptop suspend if monotonic time was also frozen.
- return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && time.Since(cc.lastIdle.Round(0)) > cc.idleTimeout
+ return cc.idleTimeout != 0 && !cc.lastIdle.IsZero() && cc.t.timeSince(cc.lastIdle.Round(0)) > cc.idleTimeout
}
// onIdleTimeout is called from a time.AfterFunc goroutine. It will
@@ -1376,6 +1479,8 @@ func (cs *clientStream) doRequest(req *http.Request, streamf func(*clientStream)
cs.cleanupWriteRequest(err)
}
+var errExtendedConnectNotSupported = errors.New("net/http: extended connect not supported by peer")
+
// writeRequest sends a request.
//
// It returns nil after the request is written, the response read,
@@ -1391,12 +1496,31 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
return err
}
+ // wait for setting frames to be received, a server can change this value later,
+ // but we just wait for the first settings frame
+ var isExtendedConnect bool
+ if req.Method == "CONNECT" && req.Header.Get(":protocol") != "" {
+ isExtendedConnect = true
+ }
+
// Acquire the new-request lock by writing to reqHeaderMu.
// This lock guards the critical section covering allocating a new stream ID
// (requires mu) and creating the stream (requires wmu).
if cc.reqHeaderMu == nil {
panic("RoundTrip on uninitialized ClientConn") // for tests
}
+ if isExtendedConnect {
+ select {
+ case <-cs.reqCancel:
+ return errRequestCanceled
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-cc.seenSettingsChan:
+ if !cc.extendedConnectAllowed {
+ return errExtendedConnectNotSupported
+ }
+ }
+ }
select {
case cc.reqHeaderMu <- struct{}{}:
case <-cs.reqCancel:
@@ -1578,6 +1702,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
cs.reqBodyClosed = make(chan struct{})
}
bodyClosed := cs.reqBodyClosed
+ closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
cc.mu.Unlock()
if mustCloseBody {
cs.reqBody.Close()
@@ -1602,16 +1727,44 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
if cs.sentHeaders {
if se, ok := err.(StreamError); ok {
if se.Cause != errFromPeer {
- cc.writeStreamReset(cs.ID, se.Code, err)
+ cc.writeStreamReset(cs.ID, se.Code, false, err)
}
} else {
- cc.writeStreamReset(cs.ID, ErrCodeCancel, err)
+ // We're cancelling an in-flight request.
+ //
+ // This could be due to the server becoming unresponsive.
+ // To avoid sending too many requests on a dead connection,
+ // we let the request continue to consume a concurrency slot
+ // until we can confirm the server is still responding.
+ // We do this by sending a PING frame along with the RST_STREAM
+ // (unless a ping is already in flight).
+ //
+ // For simplicity, we don't bother tracking the PING payload:
+ // We reset cc.pendingResets any time we receive a PING ACK.
+ //
+ // We skip this if the conn is going to be closed on idle,
+ // because it's short lived and will probably be closed before
+ // we get the ping response.
+ ping := false
+ if !closeOnIdle {
+ cc.mu.Lock()
+ // rstStreamPingsBlocked works around a gRPC behavior:
+ // see comment on the field for details.
+ if !cc.rstStreamPingsBlocked {
+ if cc.pendingResets == 0 {
+ ping = true
+ }
+ cc.pendingResets++
+ }
+ cc.mu.Unlock()
+ }
+ cc.writeStreamReset(cs.ID, ErrCodeCancel, ping, err)
}
}
cs.bufPipe.CloseWithError(err) // no-op if already closed
} else {
if cs.sentHeaders && !cs.sentEndStream {
- cc.writeStreamReset(cs.ID, ErrCodeNo, nil)
+ cc.writeStreamReset(cs.ID, ErrCodeNo, false, nil)
}
cs.bufPipe.CloseWithError(errRequestCanceled)
}
@@ -1633,12 +1786,17 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
// Must hold cc.mu.
func (cc *ClientConn) awaitOpenSlotForStreamLocked(cs *clientStream) error {
for {
- cc.lastActive = time.Now()
+ if cc.closed && cc.nextStreamID == 1 && cc.streamsReserved == 0 {
+ // This is the very first request sent to this connection.
+ // Return a fatal error which aborts the retry loop.
+ return errClientConnNotEstablished
+ }
+ cc.lastActive = cc.t.now()
if cc.closed || !cc.canTakeNewRequestLocked() {
return errClientConnUnusable
}
cc.lastIdle = time.Time{}
- if int64(len(cc.streams)) < int64(cc.maxConcurrentStreams) {
+ if cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams) {
return nil
}
cc.pendingRequests++
@@ -1910,7 +2068,7 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
func validateHeaders(hdrs http.Header) string {
for k, vv := range hdrs {
- if !httpguts.ValidHeaderFieldName(k) {
+ if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
@@ -1926,6 +2084,10 @@ func validateHeaders(hdrs http.Header) string {
var errNilRequestURL = errors.New("http2: Request.URI is nil")
+func isNormalConnect(req *http.Request) bool {
+ return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
+}
+
// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
@@ -1946,7 +2108,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
}
var path string
- if req.Method != "CONNECT" {
+ if !isNormalConnect(req) {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
@@ -1983,7 +2145,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail
m = http.MethodGet
}
f(":method", m)
- if req.Method != "CONNECT" {
+ if !isNormalConnect(req) {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
@@ -2180,10 +2342,10 @@ func (cc *ClientConn) forgetStreamID(id uint32) {
if len(cc.streams) != slen-1 {
panic("forgetting unknown stream id")
}
- cc.lastActive = time.Now()
+ cc.lastActive = cc.t.now()
if len(cc.streams) == 0 && cc.idleTimer != nil {
cc.idleTimer.Reset(cc.idleTimeout)
- cc.lastIdle = time.Now()
+ cc.lastIdle = cc.t.now()
}
// Wake up writeRequestBody via clientStream.awaitFlowControl and
// wake up RoundTrip if there is a pending request.
@@ -2243,7 +2405,6 @@ func isEOFOrNetReadError(err error) bool {
func (rl *clientConnReadLoop) cleanup() {
cc := rl.cc
- cc.t.connPool().MarkDead(cc)
defer cc.closeConn()
defer close(cc.readerDone)
@@ -2267,6 +2428,24 @@ func (rl *clientConnReadLoop) cleanup() {
}
cc.closed = true
+ // If the connection has never been used, and has been open for only a short time,
+ // leave it in the connection pool for a little while.
+ //
+ // This avoids a situation where new connections are constantly created,
+ // added to the pool, fail, and are removed from the pool, without any error
+ // being surfaced to the user.
+ const unusedWaitTime = 5 * time.Second
+ idleTime := cc.t.now().Sub(cc.lastActive)
+ if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
+ cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
+ cc.t.connPool().MarkDead(cc)
+ })
+ } else {
+ cc.mu.Unlock() // avoid any deadlocks in MarkDead
+ cc.t.connPool().MarkDead(cc)
+ cc.mu.Lock()
+ }
+
for _, cs := range cc.streams {
select {
case <-cs.peerClosed:
@@ -2324,7 +2503,7 @@ func (rl *clientConnReadLoop) run() error {
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
}
if se, ok := err.(StreamError); ok {
- if cs := rl.streamByID(se.StreamID); cs != nil {
+ if cs := rl.streamByID(se.StreamID, notHeaderOrDataFrame); cs != nil {
if se.Cause == nil {
se.Cause = cc.fr.errDetail
}
@@ -2370,13 +2549,16 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
+ if !cc.seenSettings {
+ close(cc.seenSettingsChan)
+ }
return err
}
}
}
func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error {
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, headerOrDataFrame)
if cs == nil {
// We'd get here if we canceled a request while the
// server had its response still in flight. So if this
@@ -2494,15 +2676,34 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
if f.StreamEnded() {
return nil, errors.New("1xx informational response with END_STREAM flag")
}
- cs.num1xx++
- const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http
- if cs.num1xx > max1xxResponses {
- return nil, errors.New("http2: too many 1xx informational responses")
- }
if fn := cs.get1xxTraceFunc(); fn != nil {
+ // If the 1xx response is being delivered to the user,
+ // then they're responsible for limiting the number
+ // of responses.
if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil {
return nil, err
}
+ } else {
+ // If the user didn't examine the 1xx response, then we
+ // limit the size of all 1xx headers.
+ //
+ // This differs a bit from the HTTP/1 implementation, which
+ // limits the size of all 1xx headers plus the final response.
+ // Use the larger limit of MaxHeaderListSize and
+ // net/http.Transport.MaxResponseHeaderBytes.
+ limit := int64(cs.cc.t.maxHeaderListSize())
+ if t1 := cs.cc.t.t1; t1 != nil && t1.MaxResponseHeaderBytes > limit {
+ limit = t1.MaxResponseHeaderBytes
+ }
+ for _, h := range f.Fields {
+ cs.totalHeaderSize += int64(h.Size())
+ }
+ if cs.totalHeaderSize > limit {
+ if VerboseLogs {
+ log.Printf("http2: 1xx informational responses too large")
+ }
+ return nil, errors.New("header list too large")
+ }
}
if statusCode == 100 {
traceGot100Continue(cs.trace)
@@ -2686,7 +2887,7 @@ func (b transportResponseBody) Close() error {
func (rl *clientConnReadLoop) processData(f *DataFrame) error {
cc := rl.cc
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, headerOrDataFrame)
data := f.Data()
if cs == nil {
cc.mu.Lock()
@@ -2821,9 +3022,22 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) {
cs.abortStream(err)
}
-func (rl *clientConnReadLoop) streamByID(id uint32) *clientStream {
+// Constants passed to streamByID for documentation purposes.
+const (
+ headerOrDataFrame = true
+ notHeaderOrDataFrame = false
+)
+
+// streamByID returns the stream with the given id, or nil if no stream has that id.
+// If headerOrData is true, it clears rst.StreamPingsBlocked.
+func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientStream {
rl.cc.mu.Lock()
defer rl.cc.mu.Unlock()
+ if headerOrData {
+ // Work around an unfortunate gRPC behavior.
+ // See comment on ClientConn.rstStreamPingsBlocked for details.
+ rl.cc.rstStreamPingsBlocked = false
+ }
cs := rl.cc.streams[id]
if cs != nil && !cs.readAborted {
return cs
@@ -2917,6 +3131,21 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
case SettingHeaderTableSize:
cc.henc.SetMaxDynamicTableSize(s.Val)
cc.peerMaxHeaderTableSize = s.Val
+ case SettingEnableConnectProtocol:
+ if err := s.Valid(); err != nil {
+ return err
+ }
+ // If the peer wants to send us SETTINGS_ENABLE_CONNECT_PROTOCOL,
+ // we require that it do so in the first SETTINGS frame.
+ //
+ // When we attempt to use extended CONNECT, we wait for the first
+ // SETTINGS frame to see if the server supports it. If we let the
+ // server enable the feature with a later SETTINGS frame, then
+ // users will see inconsistent results depending on whether we've
+ // seen that frame or not.
+ if !cc.seenSettings {
+ cc.extendedConnectAllowed = s.Val == 1
+ }
default:
cc.vlogf("Unhandled Setting: %v", s)
}
@@ -2934,6 +3163,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
// connection can establish to our default.
cc.maxConcurrentStreams = defaultMaxConcurrentStreams
}
+ close(cc.seenSettingsChan)
cc.seenSettings = true
}
@@ -2942,7 +3172,7 @@ func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
cc := rl.cc
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame)
if f.StreamID != 0 && cs == nil {
return nil
}
@@ -2971,7 +3201,7 @@ func (rl *clientConnReadLoop) processWindowUpdate(f *WindowUpdateFrame) error {
}
func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error {
- cs := rl.streamByID(f.StreamID)
+ cs := rl.streamByID(f.StreamID, notHeaderOrDataFrame)
if cs == nil {
// TODO: return error if server tries to RST_STREAM an idle stream
return nil
@@ -3046,6 +3276,12 @@ func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
close(c)
delete(cc.pings, f.Data)
}
+ if cc.pendingResets > 0 {
+ // See clientStream.cleanupWriteRequest.
+ cc.pendingResets = 0
+ cc.rstStreamPingsBlocked = true
+ cc.cond.Broadcast()
+ }
return nil
}
cc := rl.cc
@@ -3068,13 +3304,20 @@ func (rl *clientConnReadLoop) processPushPromise(f *PushPromiseFrame) error {
return ConnectionError(ErrCodeProtocol)
}
-func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) {
+// writeStreamReset sends a RST_STREAM frame.
+// When ping is true, it also sends a PING frame with a random payload.
+func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool, err error) {
// TODO: map err to more interesting error codes, once the
// HTTP community comes up with some. But currently for
// RST_STREAM there's no equivalent to GOAWAY frame's debug
// data, and the error codes are all pretty vague ("cancel").
cc.wmu.Lock()
cc.fr.WriteRSTStream(streamID, code)
+ if ping {
+ var payload [8]byte
+ rand.Read(payload[:])
+ cc.fr.WritePing(false, payload)
+ }
cc.bw.Flush()
cc.wmu.Unlock()
}
@@ -3228,7 +3471,7 @@ func traceGotConn(req *http.Request, cc *ClientConn, reused bool) {
cc.mu.Lock()
ci.WasIdle = len(cc.streams) == 0 && reused
if ci.WasIdle && !cc.lastActive.IsZero() {
- ci.IdleTime = time.Since(cc.lastActive)
+ ci.IdleTime = cc.t.timeSince(cc.lastActive)
}
cc.mu.Unlock()
diff --git a/tools/vendor/golang.org/x/net/http2/unencrypted.go b/tools/vendor/golang.org/x/net/http2/unencrypted.go
new file mode 100644
index 000000000..b2de21161
--- /dev/null
+++ b/tools/vendor/golang.org/x/net/http2/unencrypted.go
@@ -0,0 +1,32 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package http2
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+)
+
+const nextProtoUnencryptedHTTP2 = "unencrypted_http2"
+
+// unencryptedNetConnFromTLSConn retrieves a net.Conn wrapped in a *tls.Conn.
+//
+// TLSNextProto functions accept a *tls.Conn.
+//
+// When passing an unencrypted HTTP/2 connection to a TLSNextProto function,
+// we pass a *tls.Conn with an underlying net.Conn containing the unencrypted connection.
+// To be extra careful about mistakes (accidentally dropping TLS encryption in a place
+// where we want it), the tls.Conn contains a net.Conn with an UnencryptedNetConn method
+// that returns the actual connection we want to use.
+func unencryptedNetConnFromTLSConn(tc *tls.Conn) (net.Conn, error) {
+ conner, ok := tc.NetConn().(interface {
+ UnencryptedNetConn() net.Conn
+ })
+ if !ok {
+ return nil, errors.New("http2: TLS conn unexpectedly found in unencrypted handoff")
+ }
+ return conner.UnencryptedNetConn(), nil
+}
diff --git a/tools/vendor/golang.org/x/oauth2/token.go b/tools/vendor/golang.org/x/oauth2/token.go
index 5bbb33217..109997d77 100644
--- a/tools/vendor/golang.org/x/oauth2/token.go
+++ b/tools/vendor/golang.org/x/oauth2/token.go
@@ -49,6 +49,13 @@ type Token struct {
// mechanisms for that TokenSource will not be used.
Expiry time.Time `json:"expiry,omitempty"`
+ // ExpiresIn is the OAuth2 wire format "expires_in" field,
+ // which specifies how many seconds later the token expires,
+ // relative to an unknown time base approximately around "now".
+ // It is the application's responsibility to populate
+ // `Expiry` from `ExpiresIn` when required.
+ ExpiresIn int64 `json:"expires_in,omitempty"`
+
// raw optionally contains extra metadata from the server
// when updating a token.
raw interface{}
diff --git a/tools/vendor/golang.org/x/sys/unix/ioctl_linux.go b/tools/vendor/golang.org/x/sys/unix/ioctl_linux.go
index dbe680eab..7ca4fa12a 100644
--- a/tools/vendor/golang.org/x/sys/unix/ioctl_linux.go
+++ b/tools/vendor/golang.org/x/sys/unix/ioctl_linux.go
@@ -58,6 +58,102 @@ func IoctlGetEthtoolDrvinfo(fd int, ifname string) (*EthtoolDrvinfo, error) {
return &value, err
}
+// IoctlGetEthtoolTsInfo fetches ethtool timestamping and PHC
+// association for the network device specified by ifname.
+func IoctlGetEthtoolTsInfo(fd int, ifname string) (*EthtoolTsInfo, error) {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return nil, err
+ }
+
+ value := EthtoolTsInfo{Cmd: ETHTOOL_GET_TS_INFO}
+ ifrd := ifr.withData(unsafe.Pointer(&value))
+
+ err = ioctlIfreqData(fd, SIOCETHTOOL, &ifrd)
+ return &value, err
+}
+
+// IoctlGetHwTstamp retrieves the hardware timestamping configuration
+// for the network device specified by ifname.
+func IoctlGetHwTstamp(fd int, ifname string) (*HwTstampConfig, error) {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return nil, err
+ }
+
+ value := HwTstampConfig{}
+ ifrd := ifr.withData(unsafe.Pointer(&value))
+
+ err = ioctlIfreqData(fd, SIOCGHWTSTAMP, &ifrd)
+ return &value, err
+}
+
+// IoctlSetHwTstamp updates the hardware timestamping configuration for
+// the network device specified by ifname.
+func IoctlSetHwTstamp(fd int, ifname string, cfg *HwTstampConfig) error {
+ ifr, err := NewIfreq(ifname)
+ if err != nil {
+ return err
+ }
+ ifrd := ifr.withData(unsafe.Pointer(cfg))
+ return ioctlIfreqData(fd, SIOCSHWTSTAMP, &ifrd)
+}
+
+// FdToClockID derives the clock ID from the file descriptor number
+// - see clock_gettime(3), FD_TO_CLOCKID macros. The resulting ID is
+// suitable for system calls like ClockGettime.
+func FdToClockID(fd int) int32 { return int32((int(^fd) << 3) | 3) }
+
+// IoctlPtpClockGetcaps returns the description of a given PTP device.
+func IoctlPtpClockGetcaps(fd int) (*PtpClockCaps, error) {
+ var value PtpClockCaps
+ err := ioctlPtr(fd, PTP_CLOCK_GETCAPS2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpSysOffsetPrecise returns a description of the clock
+// offset compared to the system clock.
+func IoctlPtpSysOffsetPrecise(fd int) (*PtpSysOffsetPrecise, error) {
+ var value PtpSysOffsetPrecise
+ err := ioctlPtr(fd, PTP_SYS_OFFSET_PRECISE2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpSysOffsetExtended returns an extended description of the
+// clock offset compared to the system clock. The samples parameter
+// specifies the desired number of measurements.
+func IoctlPtpSysOffsetExtended(fd int, samples uint) (*PtpSysOffsetExtended, error) {
+ value := PtpSysOffsetExtended{Samples: uint32(samples)}
+ err := ioctlPtr(fd, PTP_SYS_OFFSET_EXTENDED2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpPinGetfunc returns the configuration of the specified
+// I/O pin on given PTP device.
+func IoctlPtpPinGetfunc(fd int, index uint) (*PtpPinDesc, error) {
+ value := PtpPinDesc{Index: uint32(index)}
+ err := ioctlPtr(fd, PTP_PIN_GETFUNC2, unsafe.Pointer(&value))
+ return &value, err
+}
+
+// IoctlPtpPinSetfunc updates configuration of the specified PTP
+// I/O pin.
+func IoctlPtpPinSetfunc(fd int, pd *PtpPinDesc) error {
+ return ioctlPtr(fd, PTP_PIN_SETFUNC2, unsafe.Pointer(pd))
+}
+
+// IoctlPtpPeroutRequest configures the periodic output mode of the
+// PTP I/O pins.
+func IoctlPtpPeroutRequest(fd int, r *PtpPeroutRequest) error {
+ return ioctlPtr(fd, PTP_PEROUT_REQUEST2, unsafe.Pointer(r))
+}
+
+// IoctlPtpExttsRequest configures the external timestamping mode
+// of the PTP I/O pins.
+func IoctlPtpExttsRequest(fd int, r *PtpExttsRequest) error {
+ return ioctlPtr(fd, PTP_EXTTS_REQUEST2, unsafe.Pointer(r))
+}
+
// IoctlGetWatchdogInfo fetches information about a watchdog device from the
// Linux watchdog API. For more information, see:
// https://www.kernel.org/doc/html/latest/watchdog/watchdog-api.html.
diff --git a/tools/vendor/golang.org/x/sys/unix/mkerrors.sh b/tools/vendor/golang.org/x/sys/unix/mkerrors.sh
index ac54ecaba..6ab02b6c3 100644
--- a/tools/vendor/golang.org/x/sys/unix/mkerrors.sh
+++ b/tools/vendor/golang.org/x/sys/unix/mkerrors.sh
@@ -158,6 +158,16 @@ includes_Linux='
#endif
#define _GNU_SOURCE
+// See the description in unix/linux/types.go
+#if defined(__ARM_EABI__) || \
+ (defined(__mips__) && (_MIPS_SIM == _ABIO32)) || \
+ (defined(__powerpc__) && (!defined(__powerpc64__)))
+# ifdef _TIME_BITS
+# undef _TIME_BITS
+# endif
+# define _TIME_BITS 32
+#endif
+
// is broken on powerpc64, as it fails to include definitions of
// these structures. We just include them copied from .
#if defined(__powerpc__)
@@ -256,6 +266,7 @@ struct ltchars {
#include
#include
#include