diff --git a/go.mod b/go.mod
index a59ebbf7b..562af0509 100644
--- a/go.mod
+++ b/go.mod
@@ -15,22 +15,24 @@ require (
k8s.io/api v0.29.2
k8s.io/apimachinery v0.29.2
k8s.io/client-go v0.29.2
- knative.dev/eventing v0.41.1-0.20240627060150-a6ac8111e82f
- knative.dev/hack v0.0.0-20240607132042-09143140a254
- knative.dev/pkg v0.0.0-20240626134149-3f6a546ac3a4
- knative.dev/serving v0.41.1-0.20240626185720-a043ddf2770a
+ knative.dev/eventing v0.42.0
+ knative.dev/hack v0.0.0-20240704013904-b9799599afcf
+ knative.dev/pkg v0.0.0-20240716082220-4355f0c73608
+ knative.dev/serving v0.42.0
)
require (
contrib.go.opencensus.io/exporter/ocagent v0.7.1-0.20200907061046-05415f1de66d // indirect
contrib.go.opencensus.io/exporter/prometheus v0.4.2 // indirect
contrib.go.opencensus.io/exporter/zipkin v0.1.2 // indirect
+ github.com/antlr/antlr4/runtime/Go/antlr v1.4.10 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/blendle/zapdriver v1.3.1 // indirect
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
- github.com/cespare/xxhash/v2 v2.2.0 // indirect
+ github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.15.2 // indirect
+ github.com/cloudevents/sdk-go/sql/v2 v2.0.0-20240712172937-3ce6b2f1f011 // indirect
github.com/coreos/go-oidc/v3 v3.9.0 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
@@ -72,36 +74,38 @@ require (
github.com/prometheus/statsd_exporter v0.22.7 // indirect
github.com/rickb777/date v1.13.0 // indirect
github.com/rickb777/plural v1.2.1 // indirect
+ github.com/robfig/cron/v3 v3.0.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
go.opencensus.io v0.24.0 // indirect
go.uber.org/automaxprocs v1.5.3 // indirect
go.uber.org/multierr v1.11.0 // indirect
- golang.org/x/crypto v0.24.0 // indirect
- golang.org/x/mod v0.18.0 // indirect
- golang.org/x/net v0.26.0 // indirect
+ golang.org/x/crypto v0.25.0 // indirect
+ golang.org/x/mod v0.19.0 // indirect
+ golang.org/x/net v0.27.0 // indirect
golang.org/x/oauth2 v0.21.0 // indirect
golang.org/x/sync v0.7.0 // indirect
- golang.org/x/sys v0.21.0 // indirect
- golang.org/x/term v0.21.0 // indirect
+ golang.org/x/sys v0.22.0 // indirect
+ golang.org/x/term v0.22.0 // indirect
golang.org/x/text v0.16.0 // indirect
golang.org/x/time v0.5.0 // indirect
- golang.org/x/tools v0.22.0 // indirect
+ golang.org/x/tools v0.23.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/api v0.183.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
- google.golang.org/grpc v1.64.0 // indirect
+ google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.34.1 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/apiextensions-apiserver v0.29.2 // indirect
+ k8s.io/apiserver v0.29.2 // indirect
k8s.io/code-generator v0.29.2 // indirect
k8s.io/gengo v0.0.0-20240129211411-f967bbeff4b4 // indirect
k8s.io/klog/v2 v2.120.1 // indirect
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 // indirect
k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
- knative.dev/networking v0.0.0-20240611072033-3b8764c0bb4c // indirect
+ knative.dev/networking v0.0.0-20240716111826-bab7f2a3e556 // indirect
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
diff --git a/go.sum b/go.sum
index 6637b00dd..cfdc6f825 100644
--- a/go.sum
+++ b/go.sum
@@ -58,16 +58,16 @@ github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMr
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
-github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
-github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
+github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.15.2 h1:AbtPqiUDzKup5JpTZzO297/QXgL/TAdpdXQCNwLzlaM=
github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.15.2/go.mod h1:ZbYLE+yaEQ2j4vbRc9qzvGmg30A9LhwFt/1bSebNnbU=
-github.com/cloudevents/sdk-go/sql/v2 v2.15.2 h1:TNaTeWIbDaci89xgXbmmNVGccawQOvEfWYLWrr7Fk/k=
-github.com/cloudevents/sdk-go/sql/v2 v2.15.2/go.mod h1:us+PSk8OXdk8pDbRfvxy5w8ub5goKE7UP9PjKDY7TPw=
+github.com/cloudevents/sdk-go/sql/v2 v2.0.0-20240712172937-3ce6b2f1f011 h1:mx6avAROtrV9yTlBBH4Y8IAmspmcz9v44Pkcrjq0tAA=
+github.com/cloudevents/sdk-go/sql/v2 v2.0.0-20240712172937-3ce6b2f1f011/go.mod h1:oqJ9+L9IXySYb8PN6M/g/K8y/WdVQunmmZhJnlLFcCk=
github.com/cloudevents/sdk-go/v2 v2.15.2 h1:54+I5xQEnI73RBhWHxbI1XJcqOFOVJN85vb41+8mHUc=
github.com/cloudevents/sdk-go/v2 v2.15.2/go.mod h1:lL7kSWAE/V8VI4Wh0jbL2v/jvqsm6tjmaQBSvxcv4uE=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
@@ -126,8 +126,8 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
-github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
-github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
+github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
+github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@@ -373,8 +373,8 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
-golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI=
-golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM=
+golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30=
+golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -407,8 +407,8 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0=
-golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8=
+golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -447,8 +447,8 @@ golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
-golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ=
-golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE=
+golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys=
+golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -514,15 +514,15 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
-golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI=
+golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
-golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA=
-golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0=
+golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk=
+golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -585,8 +585,8 @@ golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.22.0 h1:gqSGLZqv+AI9lIQzniJ0nZDRG5GBPsSi+DRNHWNz6yA=
-golang.org/x/tools v0.22.0/go.mod h1:aCwcsjqvq7Yqt6TNyX7QMU2enbQ/Gt0bo6krSeEri+c=
+golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg=
+golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -668,8 +668,8 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
-google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY=
-google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg=
+google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
+google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@@ -737,16 +737,16 @@ k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00 h1:aVUu9fTY98ivBPKR9Y5w/A
k8s.io/kube-openapi v0.0.0-20231010175941-2dd684a91f00/go.mod h1:AsvuZPBlUDVuCdzJ87iajxtXuR9oktsTctW/R9wwouA=
k8s.io/utils v0.0.0-20240102154912-e7106e64919e h1:eQ/4ljkx21sObifjzXwlPKpdGLrCfRziVtos3ofG/sQ=
k8s.io/utils v0.0.0-20240102154912-e7106e64919e/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
-knative.dev/eventing v0.41.1-0.20240627060150-a6ac8111e82f h1:FuJQSlda7F1Yk1eaSSvWXq7ic9h4Lo/s9EjMnKCoFcg=
-knative.dev/eventing v0.41.1-0.20240627060150-a6ac8111e82f/go.mod h1:3h0QrfHELs61mrTI4GDPEQh4rwsap0YYA5XgRrNgnlc=
-knative.dev/hack v0.0.0-20240607132042-09143140a254 h1:1YFnu3U6dWZg0oxm6GU8kEdA9A+BvSWKJO7sg3N0kq8=
-knative.dev/hack v0.0.0-20240607132042-09143140a254/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
-knative.dev/networking v0.0.0-20240611072033-3b8764c0bb4c h1:Q+DdJYzvhwAVWMQtP6mbEr5dNxpr+K9HAF9RqJmZefY=
-knative.dev/networking v0.0.0-20240611072033-3b8764c0bb4c/go.mod h1:WhZLv94eOMDGHbdZiMrw6cnRfN3WEcFgpjUcV0A48pI=
-knative.dev/pkg v0.0.0-20240626134149-3f6a546ac3a4 h1:slPKf3UKdBFZlz+hFy+KXzTgY9yOePLzRuEhKzgc5a4=
-knative.dev/pkg v0.0.0-20240626134149-3f6a546ac3a4/go.mod h1:Wikg4u73T6vk9TctrxZt60VXzqmGEQIx0iKfk1+9o4c=
-knative.dev/serving v0.41.1-0.20240626185720-a043ddf2770a h1:HAhAQRkvCMr1CBGtmUghy9DseqcTFs4SFNb/slg5ics=
-knative.dev/serving v0.41.1-0.20240626185720-a043ddf2770a/go.mod h1:7+wAf1rE/O2O+92Ft8Bfw3LnDirkg4c/+jKU3giMIoc=
+knative.dev/eventing v0.42.0 h1:pbPPhV4JlgpHBZxLBhJTUf+4HuZe5y/zlkOGHZfvtZ0=
+knative.dev/eventing v0.42.0/go.mod h1:hW5BMYcihtCelT9pqaMtK8gmNOo1ybxcigjBY+/fU+k=
+knative.dev/hack v0.0.0-20240704013904-b9799599afcf h1:n92FmZRywgtHso7pFAku7CW0qvRAs1hXtMQqO0R6eiE=
+knative.dev/hack v0.0.0-20240704013904-b9799599afcf/go.mod h1:yk2OjGDsbEnQjfxdm0/HJKS2WqTLEFg/N6nUs6Rqx3Q=
+knative.dev/networking v0.0.0-20240716111826-bab7f2a3e556 h1:9OTyJkrjiFh/burZiti3WucGv8Qtt91VJTnXfO5dC2g=
+knative.dev/networking v0.0.0-20240716111826-bab7f2a3e556/go.mod h1:1PosUDkXqoHNzYxtLIwa7LFqSsIXBShHOseAb6XBeEU=
+knative.dev/pkg v0.0.0-20240716082220-4355f0c73608 h1:BOiRzcnRS9Z5ruxlCiS/K1/Hb5bUN0X4W3xCegdcYQE=
+knative.dev/pkg v0.0.0-20240716082220-4355f0c73608/go.mod h1:M67lDZ4KbltYSon0Ox4/6qjlZNOIXW4Ldequ81yofbw=
+knative.dev/serving v0.42.0 h1:utItXW+L6inUfJ7Y1LgnbAMc/RyxvvAQNliGU2XC34s=
+knative.dev/serving v0.42.0/go.mod h1:3cgU8/864RcqA0ZPrc3jFcmS3uJL/mOlUZiYsXonwaE=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
new file mode 100644
index 000000000..52cf18e42
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/LICENSE
@@ -0,0 +1,26 @@
+Copyright 2021 The ANTLR Project
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
new file mode 100644
index 000000000..a4e2079e6
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "sync"
+
+var ATNInvalidAltNumber int
+
+type ATN struct {
+ // DecisionToState is the decision points for all rules, subrules, optional
+ // blocks, ()+, ()*, etc. Used to build DFA predictors for them.
+ DecisionToState []DecisionState
+
+ // grammarType is the ATN type and is used for deserializing ATNs from strings.
+ grammarType int
+
+ // lexerActions is referenced by action transitions in the ATN for lexer ATNs.
+ lexerActions []LexerAction
+
+ // maxTokenType is the maximum value for any symbol recognized by a transition in the ATN.
+ maxTokenType int
+
+ modeNameToStartState map[string]*TokensStartState
+
+ modeToStartState []*TokensStartState
+
+ // ruleToStartState maps from rule index to starting state number.
+ ruleToStartState []*RuleStartState
+
+ // ruleToStopState maps from rule index to stop state number.
+ ruleToStopState []*RuleStopState
+
+ // ruleToTokenType maps the rule index to the resulting token type for lexer
+ // ATNs. For parser ATNs, it maps the rule index to the generated bypass token
+ // type if ATNDeserializationOptions.isGenerateRuleBypassTransitions was
+ // specified, and otherwise is nil.
+ ruleToTokenType []int
+
+ states []ATNState
+
+ mu sync.Mutex
+ stateMu sync.RWMutex
+ edgeMu sync.RWMutex
+}
+
+func NewATN(grammarType int, maxTokenType int) *ATN {
+ return &ATN{
+ grammarType: grammarType,
+ maxTokenType: maxTokenType,
+ modeNameToStartState: make(map[string]*TokensStartState),
+ }
+}
+
+// NextTokensInContext computes the set of valid tokens that can occur starting
+// in state s. If ctx is nil, the set of tokens will not include what can follow
+// the rule surrounding s. In other words, the set will be restricted to tokens
+// reachable staying within the rule of s.
+func (a *ATN) NextTokensInContext(s ATNState, ctx RuleContext) *IntervalSet {
+ return NewLL1Analyzer(a).Look(s, nil, ctx)
+}
+
+// NextTokensNoContext computes the set of valid tokens that can occur starting
+// in s and staying in same rule. Token.EPSILON is in set if we reach end of
+// rule.
+func (a *ATN) NextTokensNoContext(s ATNState) *IntervalSet {
+ a.mu.Lock()
+ defer a.mu.Unlock()
+ iset := s.GetNextTokenWithinRule()
+ if iset == nil {
+ iset = a.NextTokensInContext(s, nil)
+ iset.readOnly = true
+ s.SetNextTokenWithinRule(iset)
+ }
+ return iset
+}
+
+func (a *ATN) NextTokens(s ATNState, ctx RuleContext) *IntervalSet {
+ if ctx == nil {
+ return a.NextTokensNoContext(s)
+ }
+
+ return a.NextTokensInContext(s, ctx)
+}
+
+func (a *ATN) addState(state ATNState) {
+ if state != nil {
+ state.SetATN(a)
+ state.SetStateNumber(len(a.states))
+ }
+
+ a.states = append(a.states, state)
+}
+
+func (a *ATN) removeState(state ATNState) {
+ a.states[state.GetStateNumber()] = nil // Just free the memory; don't shift states in the slice
+}
+
+func (a *ATN) defineDecisionState(s DecisionState) int {
+ a.DecisionToState = append(a.DecisionToState, s)
+ s.setDecision(len(a.DecisionToState) - 1)
+
+ return s.getDecision()
+}
+
+func (a *ATN) getDecisionState(decision int) DecisionState {
+ if len(a.DecisionToState) == 0 {
+ return nil
+ }
+
+ return a.DecisionToState[decision]
+}
+
+// getExpectedTokens computes the set of input symbols which could follow ATN
+// state number stateNumber in the specified full parse context ctx and returns
+// the set of potentially valid input symbols which could follow the specified
+// state in the specified context. This method considers the complete parser
+// context, but does not evaluate semantic predicates (i.e. all predicates
+// encountered during the calculation are assumed true). If a path in the ATN
+// exists from the starting state to the RuleStopState of the outermost context
+// without Matching any symbols, Token.EOF is added to the returned set.
+//
+// A nil ctx defaults to ParserRuleContext.EMPTY.
+//
+// It panics if the ATN does not contain state stateNumber.
+func (a *ATN) getExpectedTokens(stateNumber int, ctx RuleContext) *IntervalSet {
+ if stateNumber < 0 || stateNumber >= len(a.states) {
+ panic("Invalid state number.")
+ }
+
+ s := a.states[stateNumber]
+ following := a.NextTokens(s, nil)
+
+ if !following.contains(TokenEpsilon) {
+ return following
+ }
+
+ expected := NewIntervalSet()
+
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := a.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+
+ following = a.NextTokens(rt.(*RuleTransition).followState, nil)
+ expected.addSet(following)
+ expected.removeOne(TokenEpsilon)
+ ctx = ctx.GetParent().(RuleContext)
+ }
+
+ if following.contains(TokenEpsilon) {
+ expected.addOne(TokenEOF)
+ }
+
+ return expected
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
new file mode 100644
index 000000000..97ba417f7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config.go
@@ -0,0 +1,295 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+type comparable interface {
+ equals(other interface{}) bool
+}
+
+// ATNConfig is a tuple: (ATN state, predicted alt, syntactic, semantic
+// context). The syntactic context is a graph-structured stack node whose
+// path(s) to the root is the rule invocation(s) chain used to arrive at the
+// state. The semantic context is the tree of semantic predicates encountered
+// before reaching an ATN state.
+type ATNConfig interface {
+ comparable
+
+ hash() int
+
+ GetState() ATNState
+ GetAlt() int
+ GetSemanticContext() SemanticContext
+
+ GetContext() PredictionContext
+ SetContext(PredictionContext)
+
+ GetReachesIntoOuterContext() int
+ SetReachesIntoOuterContext(int)
+
+ String() string
+
+ getPrecedenceFilterSuppressed() bool
+ setPrecedenceFilterSuppressed(bool)
+}
+
+type BaseATNConfig struct {
+ precedenceFilterSuppressed bool
+ state ATNState
+ alt int
+ context PredictionContext
+ semanticContext SemanticContext
+ reachesIntoOuterContext int
+}
+
+func NewBaseATNConfig7(old *BaseATNConfig) *BaseATNConfig { // TODO: Dup
+ return &BaseATNConfig{
+ state: old.state,
+ alt: old.alt,
+ context: old.context,
+ semanticContext: old.semanticContext,
+ reachesIntoOuterContext: old.reachesIntoOuterContext,
+ }
+}
+
+func NewBaseATNConfig6(state ATNState, alt int, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig5(state, alt, context, SemanticContextNone)
+}
+
+func NewBaseATNConfig5(state ATNState, alt int, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil") // TODO: Necessary?
+ }
+
+ return &BaseATNConfig{state: state, alt: alt, context: context, semanticContext: semanticContext}
+}
+
+func NewBaseATNConfig4(c ATNConfig, state ATNState) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext())
+}
+
+func NewBaseATNConfig3(c ATNConfig, state ATNState, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig2(c ATNConfig, semanticContext SemanticContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, c.GetState(), c.GetContext(), semanticContext)
+}
+
+func NewBaseATNConfig1(c ATNConfig, state ATNState, context PredictionContext) *BaseATNConfig {
+ return NewBaseATNConfig(c, state, context, c.GetSemanticContext())
+}
+
+func NewBaseATNConfig(c ATNConfig, state ATNState, context PredictionContext, semanticContext SemanticContext) *BaseATNConfig {
+ if semanticContext == nil {
+ panic("semanticContext cannot be nil")
+ }
+
+ return &BaseATNConfig{
+ state: state,
+ alt: c.GetAlt(),
+ context: context,
+ semanticContext: semanticContext,
+ reachesIntoOuterContext: c.GetReachesIntoOuterContext(),
+ precedenceFilterSuppressed: c.getPrecedenceFilterSuppressed(),
+ }
+}
+
+func (b *BaseATNConfig) getPrecedenceFilterSuppressed() bool {
+ return b.precedenceFilterSuppressed
+}
+
+func (b *BaseATNConfig) setPrecedenceFilterSuppressed(v bool) {
+ b.precedenceFilterSuppressed = v
+}
+
+func (b *BaseATNConfig) GetState() ATNState {
+ return b.state
+}
+
+func (b *BaseATNConfig) GetAlt() int {
+ return b.alt
+}
+
+func (b *BaseATNConfig) SetContext(v PredictionContext) {
+ b.context = v
+}
+func (b *BaseATNConfig) GetContext() PredictionContext {
+ return b.context
+}
+
+func (b *BaseATNConfig) GetSemanticContext() SemanticContext {
+ return b.semanticContext
+}
+
+func (b *BaseATNConfig) GetReachesIntoOuterContext() int {
+ return b.reachesIntoOuterContext
+}
+
+func (b *BaseATNConfig) SetReachesIntoOuterContext(v int) {
+ b.reachesIntoOuterContext = v
+}
+
+// An ATN configuration is equal to another if both have the same state, they
+// predict the same alternative, and syntactic/semantic contexts are the same.
+func (b *BaseATNConfig) equals(o interface{}) bool {
+ if b == o {
+ return true
+ }
+
+ var other, ok = o.(*BaseATNConfig)
+
+ if !ok {
+ return false
+ }
+
+ var equal bool
+
+ if b.context == nil {
+ equal = other.context == nil
+ } else {
+ equal = b.context.equals(other.context)
+ }
+
+ var (
+ nums = b.state.GetStateNumber() == other.state.GetStateNumber()
+ alts = b.alt == other.alt
+ cons = b.semanticContext.equals(other.semanticContext)
+ sups = b.precedenceFilterSuppressed == other.precedenceFilterSuppressed
+ )
+
+ return nums && alts && cons && sups && equal
+}
+
+func (b *BaseATNConfig) hash() int {
+ var c int
+ if b.context != nil {
+ c = b.context.hash()
+ }
+
+ h := murmurInit(7)
+ h = murmurUpdate(h, b.state.GetStateNumber())
+ h = murmurUpdate(h, b.alt)
+ h = murmurUpdate(h, c)
+ h = murmurUpdate(h, b.semanticContext.hash())
+ return murmurFinish(h, 4)
+}
+
+func (b *BaseATNConfig) String() string {
+ var s1, s2, s3 string
+
+ if b.context != nil {
+ s1 = ",[" + fmt.Sprint(b.context) + "]"
+ }
+
+ if b.semanticContext != SemanticContextNone {
+ s2 = "," + fmt.Sprint(b.semanticContext)
+ }
+
+ if b.reachesIntoOuterContext > 0 {
+ s3 = ",up=" + fmt.Sprint(b.reachesIntoOuterContext)
+ }
+
+ return fmt.Sprintf("(%v,%v%v%v%v)", b.state, b.alt, s1, s2, s3)
+}
+
+type LexerATNConfig struct {
+ *BaseATNConfig
+ lexerActionExecutor *LexerActionExecutor
+ passedThroughNonGreedyDecision bool
+}
+
+func NewLexerATNConfig6(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func NewLexerATNConfig5(state ATNState, alt int, context PredictionContext, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone),
+ lexerActionExecutor: lexerActionExecutor,
+ }
+}
+
+func NewLexerATNConfig4(c *LexerATNConfig, state ATNState) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig3(c *LexerATNConfig, state ATNState, lexerActionExecutor *LexerActionExecutor) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, c.GetContext(), c.GetSemanticContext()),
+ lexerActionExecutor: lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig2(c *LexerATNConfig, state ATNState, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{
+ BaseATNConfig: NewBaseATNConfig(c, state, context, c.GetSemanticContext()),
+ lexerActionExecutor: c.lexerActionExecutor,
+ passedThroughNonGreedyDecision: checkNonGreedyDecision(c, state),
+ }
+}
+
+func NewLexerATNConfig1(state ATNState, alt int, context PredictionContext) *LexerATNConfig {
+ return &LexerATNConfig{BaseATNConfig: NewBaseATNConfig5(state, alt, context, SemanticContextNone)}
+}
+
+func (l *LexerATNConfig) hash() int {
+ var f int
+ if l.passedThroughNonGreedyDecision {
+ f = 1
+ } else {
+ f = 0
+ }
+ h := murmurInit(7)
+ h = murmurUpdate(h, l.state.GetStateNumber())
+ h = murmurUpdate(h, l.alt)
+ h = murmurUpdate(h, l.context.hash())
+ h = murmurUpdate(h, l.semanticContext.hash())
+ h = murmurUpdate(h, f)
+ h = murmurUpdate(h, l.lexerActionExecutor.hash())
+ h = murmurFinish(h, 6)
+ return h
+}
+
+func (l *LexerATNConfig) equals(other interface{}) bool {
+ var othert, ok = other.(*LexerATNConfig)
+
+ if l == other {
+ return true
+ } else if !ok {
+ return false
+ } else if l.passedThroughNonGreedyDecision != othert.passedThroughNonGreedyDecision {
+ return false
+ }
+
+ var b bool
+
+ if l.lexerActionExecutor != nil {
+ b = !l.lexerActionExecutor.equals(othert.lexerActionExecutor)
+ } else {
+ b = othert.lexerActionExecutor != nil
+ }
+
+ if b {
+ return false
+ }
+
+ return l.BaseATNConfig.equals(othert.BaseATNConfig)
+}
+
+
+func checkNonGreedyDecision(source *LexerATNConfig, target ATNState) bool {
+ var ds, ok = target.(DecisionState)
+
+ return source.passedThroughNonGreedyDecision || (ok && ds.getNonGreedy())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
new file mode 100644
index 000000000..49ad4a719
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_config_set.go
@@ -0,0 +1,407 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+type ATNConfigSet interface {
+ hash() int
+ Add(ATNConfig, *DoubleDict) bool
+ AddAll([]ATNConfig) bool
+
+ GetStates() Set
+ GetPredicates() []SemanticContext
+ GetItems() []ATNConfig
+
+ OptimizeConfigs(interpreter *BaseATNSimulator)
+
+ Equals(other interface{}) bool
+
+ Length() int
+ IsEmpty() bool
+ Contains(ATNConfig) bool
+ ContainsFast(ATNConfig) bool
+ Clear()
+ String() string
+
+ HasSemanticContext() bool
+ SetHasSemanticContext(v bool)
+
+ ReadOnly() bool
+ SetReadOnly(bool)
+
+ GetConflictingAlts() *BitSet
+ SetConflictingAlts(*BitSet)
+
+ Alts() *BitSet
+
+ FullContext() bool
+
+ GetUniqueAlt() int
+ SetUniqueAlt(int)
+
+ GetDipsIntoOuterContext() bool
+ SetDipsIntoOuterContext(bool)
+}
+
+// BaseATNConfigSet is a specialized set of ATNConfig that tracks information
+// about its elements and can combine similar configurations using a
+// graph-structured stack.
+type BaseATNConfigSet struct {
+ cachedHash int
+
+ // configLookup is used to determine whether two BaseATNConfigSets are equal. We
+ // need all configurations with the same (s, i, _, semctx) to be equal. A key
+ // effectively doubles the number of objects associated with ATNConfigs. All
+ // keys are hashed by (s, i, _, pi), not including the context. Wiped out when
+ // read-only because a set becomes a DFA state.
+ configLookup Set
+
+ // configs is the added elements.
+ configs []ATNConfig
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ conflictingAlts *BitSet
+
+ // dipsIntoOuterContext is used by parsers and lexers. In a lexer, it indicates
+ // we hit a pred while computing a closure operation. Do not make a DFA state
+ // from the BaseATNConfigSet in this case. TODO: How is this used by parsers?
+ dipsIntoOuterContext bool
+
+ // fullCtx is whether it is part of a full context LL prediction. Used to
+ // determine how to merge $. It is a wildcard with SLL, but not for an LL
+ // context merge.
+ fullCtx bool
+
+ // Used in parser and lexer. In lexer, it indicates we hit a pred
+ // while computing a closure operation. Don't make a DFA state from a.
+ hasSemanticContext bool
+
+ // readOnly is whether it is read-only. Do not
+ // allow any code to manipulate the set if true because DFA states will point at
+ // sets and those must not change. It not protect other fields; conflictingAlts
+ // in particular, which is assigned after readOnly.
+ readOnly bool
+
+ // TODO: These fields make me pretty uncomfortable, but it is nice to pack up
+ // info together because it saves recomputation. Can we track conflicts as they
+ // are added to save scanning configs later?
+ uniqueAlt int
+}
+
+func (b *BaseATNConfigSet) Alts() *BitSet {
+ alts := NewBitSet()
+ for _, it := range b.configs {
+ alts.add(it.GetAlt())
+ }
+ return alts
+}
+
+func NewBaseATNConfigSet(fullCtx bool) *BaseATNConfigSet {
+ return &BaseATNConfigSet{
+ cachedHash: -1,
+ configLookup: newArray2DHashSetWithCap(hashATNConfig, equalATNConfigs, 16, 2),
+ fullCtx: fullCtx,
+ }
+}
+
+// Add merges contexts with existing configs for (s, i, pi, _), where s is the
+// ATNConfig.state, i is the ATNConfig.alt, and pi is the
+// ATNConfig.semanticContext. We use (s,i,pi) as the key. Updates
+// dipsIntoOuterContext and hasSemanticContext when necessary.
+func (b *BaseATNConfigSet) Add(config ATNConfig, mergeCache *DoubleDict) bool {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if config.GetSemanticContext() != SemanticContextNone {
+ b.hasSemanticContext = true
+ }
+
+ if config.GetReachesIntoOuterContext() > 0 {
+ b.dipsIntoOuterContext = true
+ }
+
+ existing := b.configLookup.Add(config).(ATNConfig)
+
+ if existing == config {
+ b.cachedHash = -1
+ b.configs = append(b.configs, config) // Track order here
+ return true
+ }
+
+ // Merge a previous (s, i, pi, _) with it and save the result
+ rootIsWildcard := !b.fullCtx
+ merged := merge(existing.GetContext(), config.GetContext(), rootIsWildcard, mergeCache)
+
+ // No need to check for existing.context because config.context is in the cache,
+ // since the only way to create new graphs is the "call rule" and here. We cache
+ // at both places.
+ existing.SetReachesIntoOuterContext(intMax(existing.GetReachesIntoOuterContext(), config.GetReachesIntoOuterContext()))
+
+ // Preserve the precedence filter suppression during the merge
+ if config.getPrecedenceFilterSuppressed() {
+ existing.setPrecedenceFilterSuppressed(true)
+ }
+
+ // Replace the context because there is no need to do alt mapping
+ existing.SetContext(merged)
+
+ return true
+}
+
+func (b *BaseATNConfigSet) GetStates() Set {
+ states := newArray2DHashSet(nil, nil)
+
+ for i := 0; i < len(b.configs); i++ {
+ states.Add(b.configs[i].GetState())
+ }
+
+ return states
+}
+
+func (b *BaseATNConfigSet) HasSemanticContext() bool {
+ return b.hasSemanticContext
+}
+
+func (b *BaseATNConfigSet) SetHasSemanticContext(v bool) {
+ b.hasSemanticContext = v
+}
+
+func (b *BaseATNConfigSet) GetPredicates() []SemanticContext {
+ preds := make([]SemanticContext, 0)
+
+ for i := 0; i < len(b.configs); i++ {
+ c := b.configs[i].GetSemanticContext()
+
+ if c != SemanticContextNone {
+ preds = append(preds, c)
+ }
+ }
+
+ return preds
+}
+
+func (b *BaseATNConfigSet) GetItems() []ATNConfig {
+ return b.configs
+}
+
+func (b *BaseATNConfigSet) OptimizeConfigs(interpreter *BaseATNSimulator) {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ if b.configLookup.Len() == 0 {
+ return
+ }
+
+ for i := 0; i < len(b.configs); i++ {
+ config := b.configs[i]
+
+ config.SetContext(interpreter.getCachedContext(config.GetContext()))
+ }
+}
+
+func (b *BaseATNConfigSet) AddAll(coll []ATNConfig) bool {
+ for i := 0; i < len(coll); i++ {
+ b.Add(coll[i], nil)
+ }
+
+ return false
+}
+
+func (b *BaseATNConfigSet) Equals(other interface{}) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseATNConfigSet); !ok {
+ return false
+ }
+
+ other2 := other.(*BaseATNConfigSet)
+
+ return b.configs != nil &&
+ // TODO: b.configs.equals(other2.configs) && // TODO: Is b necessary?
+ b.fullCtx == other2.fullCtx &&
+ b.uniqueAlt == other2.uniqueAlt &&
+ b.conflictingAlts == other2.conflictingAlts &&
+ b.hasSemanticContext == other2.hasSemanticContext &&
+ b.dipsIntoOuterContext == other2.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) hash() int {
+ if b.readOnly {
+ if b.cachedHash == -1 {
+ b.cachedHash = b.hashCodeConfigs()
+ }
+
+ return b.cachedHash
+ }
+
+ return b.hashCodeConfigs()
+}
+
+func (b *BaseATNConfigSet) hashCodeConfigs() int {
+ h := 1
+ for _, config := range b.configs {
+ h = 31*h + config.hash()
+ }
+ return h
+}
+
+func (b *BaseATNConfigSet) Length() int {
+ return len(b.configs)
+}
+
+func (b *BaseATNConfigSet) IsEmpty() bool {
+ return len(b.configs) == 0
+}
+
+func (b *BaseATNConfigSet) Contains(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item)
+}
+
+func (b *BaseATNConfigSet) ContainsFast(item ATNConfig) bool {
+ if b.configLookup == nil {
+ panic("not implemented for read-only sets")
+ }
+
+ return b.configLookup.Contains(item) // TODO: containsFast is not implemented for Set
+}
+
+func (b *BaseATNConfigSet) Clear() {
+ if b.readOnly {
+ panic("set is read-only")
+ }
+
+ b.configs = make([]ATNConfig, 0)
+ b.cachedHash = -1
+ b.configLookup = newArray2DHashSet(nil, equalATNConfigs)
+}
+
+func (b *BaseATNConfigSet) FullContext() bool {
+ return b.fullCtx
+}
+
+func (b *BaseATNConfigSet) GetDipsIntoOuterContext() bool {
+ return b.dipsIntoOuterContext
+}
+
+func (b *BaseATNConfigSet) SetDipsIntoOuterContext(v bool) {
+ b.dipsIntoOuterContext = v
+}
+
+func (b *BaseATNConfigSet) GetUniqueAlt() int {
+ return b.uniqueAlt
+}
+
+func (b *BaseATNConfigSet) SetUniqueAlt(v int) {
+ b.uniqueAlt = v
+}
+
+func (b *BaseATNConfigSet) GetConflictingAlts() *BitSet {
+ return b.conflictingAlts
+}
+
+func (b *BaseATNConfigSet) SetConflictingAlts(v *BitSet) {
+ b.conflictingAlts = v
+}
+
+func (b *BaseATNConfigSet) ReadOnly() bool {
+ return b.readOnly
+}
+
+func (b *BaseATNConfigSet) SetReadOnly(readOnly bool) {
+ b.readOnly = readOnly
+
+ if readOnly {
+ b.configLookup = nil // Read only, so no need for the lookup cache
+ }
+}
+
+func (b *BaseATNConfigSet) String() string {
+ s := "["
+
+ for i, c := range b.configs {
+ s += c.String()
+
+ if i != len(b.configs)-1 {
+ s += ", "
+ }
+ }
+
+ s += "]"
+
+ if b.hasSemanticContext {
+ s += ",hasSemanticContext=" + fmt.Sprint(b.hasSemanticContext)
+ }
+
+ if b.uniqueAlt != ATNInvalidAltNumber {
+ s += ",uniqueAlt=" + fmt.Sprint(b.uniqueAlt)
+ }
+
+ if b.conflictingAlts != nil {
+ s += ",conflictingAlts=" + b.conflictingAlts.String()
+ }
+
+ if b.dipsIntoOuterContext {
+ s += ",dipsIntoOuterContext"
+ }
+
+ return s
+}
+
+type OrderedATNConfigSet struct {
+ *BaseATNConfigSet
+}
+
+func NewOrderedATNConfigSet() *OrderedATNConfigSet {
+ b := NewBaseATNConfigSet(false)
+
+ b.configLookup = newArray2DHashSet(nil, nil)
+
+ return &OrderedATNConfigSet{BaseATNConfigSet: b}
+}
+
+func hashATNConfig(i interface{}) int {
+ o := i.(ATNConfig)
+ hash := 7
+ hash = 31*hash + o.GetState().GetStateNumber()
+ hash = 31*hash + o.GetAlt()
+ hash = 31*hash + o.GetSemanticContext().hash()
+ return hash
+}
+
+func equalATNConfigs(a, b interface{}) bool {
+ if a == nil || b == nil {
+ return false
+ }
+
+ if a == b {
+ return true
+ }
+
+ var ai, ok = a.(ATNConfig)
+ var bi, ok1 = b.(ATNConfig)
+
+ if !ok || !ok1 {
+ return false
+ }
+
+ if ai.GetState().GetStateNumber() != bi.GetState().GetStateNumber() {
+ return false
+ }
+
+ if ai.GetAlt() != bi.GetAlt() {
+ return false
+ }
+
+ return ai.GetSemanticContext().equals(bi.GetSemanticContext())
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
new file mode 100644
index 000000000..cb8eafb0b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserialization_options.go
@@ -0,0 +1,61 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "errors"
+
+var defaultATNDeserializationOptions = ATNDeserializationOptions{true, true, false}
+
+type ATNDeserializationOptions struct {
+ readOnly bool
+ verifyATN bool
+ generateRuleBypassTransitions bool
+}
+
+func (opts *ATNDeserializationOptions) ReadOnly() bool {
+ return opts.readOnly
+}
+
+func (opts *ATNDeserializationOptions) SetReadOnly(readOnly bool) {
+ if opts.readOnly {
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.readOnly = readOnly
+}
+
+func (opts *ATNDeserializationOptions) VerifyATN() bool {
+ return opts.verifyATN
+}
+
+func (opts *ATNDeserializationOptions) SetVerifyATN(verifyATN bool) {
+ if opts.readOnly {
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.verifyATN = verifyATN
+}
+
+func (opts *ATNDeserializationOptions) GenerateRuleBypassTransitions() bool {
+ return opts.generateRuleBypassTransitions
+}
+
+func (opts *ATNDeserializationOptions) SetGenerateRuleBypassTransitions(generateRuleBypassTransitions bool) {
+ if opts.readOnly {
+ panic(errors.New("Cannot mutate read only ATNDeserializationOptions"))
+ }
+ opts.generateRuleBypassTransitions = generateRuleBypassTransitions
+}
+
+func DefaultATNDeserializationOptions() *ATNDeserializationOptions {
+ return NewATNDeserializationOptions(&defaultATNDeserializationOptions)
+}
+
+func NewATNDeserializationOptions(other *ATNDeserializationOptions) *ATNDeserializationOptions {
+ o := new(ATNDeserializationOptions)
+ if other != nil {
+ *o = *other
+ o.readOnly = false
+ }
+ return o
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
new file mode 100644
index 000000000..aea9bbfa9
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_deserializer.go
@@ -0,0 +1,683 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+const serializedVersion = 4
+
+type loopEndStateIntPair struct {
+ item0 *LoopEndState
+ item1 int
+}
+
+type blockStartStateIntPair struct {
+ item0 BlockStartState
+ item1 int
+}
+
+type ATNDeserializer struct {
+ options *ATNDeserializationOptions
+ data []int32
+ pos int
+}
+
+func NewATNDeserializer(options *ATNDeserializationOptions) *ATNDeserializer {
+ if options == nil {
+ options = &defaultATNDeserializationOptions
+ }
+
+ return &ATNDeserializer{options: options}
+}
+
+func stringInSlice(a string, list []string) int {
+ for i, b := range list {
+ if b == a {
+ return i
+ }
+ }
+
+ return -1
+}
+
+func (a *ATNDeserializer) Deserialize(data []int32) *ATN {
+ a.data = data
+ a.pos = 0
+ a.checkVersion()
+
+ atn := a.readATN()
+
+ a.readStates(atn)
+ a.readRules(atn)
+ a.readModes(atn)
+
+ sets := a.readSets(atn, nil)
+
+ a.readEdges(atn, sets)
+ a.readDecisions(atn)
+ a.readLexerActions(atn)
+ a.markPrecedenceDecisions(atn)
+ a.verifyATN(atn)
+
+ if a.options.GenerateRuleBypassTransitions() && atn.grammarType == ATNTypeParser {
+ a.generateRuleBypassTransitions(atn)
+ // Re-verify after modification
+ a.verifyATN(atn)
+ }
+
+ return atn
+
+}
+
+func (a *ATNDeserializer) checkVersion() {
+ version := a.readInt()
+
+ if version != serializedVersion {
+ panic("Could not deserialize ATN with version " + strconv.Itoa(version) + " (expected " + strconv.Itoa(serializedVersion) + ").")
+ }
+}
+
+func (a *ATNDeserializer) readATN() *ATN {
+ grammarType := a.readInt()
+ maxTokenType := a.readInt()
+
+ return NewATN(grammarType, maxTokenType)
+}
+
+func (a *ATNDeserializer) readStates(atn *ATN) {
+ nstates := a.readInt()
+
+ // Allocate worst case size.
+ loopBackStateNumbers := make([]loopEndStateIntPair, 0, nstates)
+ endStateNumbers := make([]blockStartStateIntPair, 0, nstates)
+
+ // Preallocate states slice.
+ atn.states = make([]ATNState, 0, nstates)
+
+ for i := 0; i < nstates; i++ {
+ stype := a.readInt()
+
+ // Ignore bad types of states
+ if stype == ATNStateInvalidType {
+ atn.addState(nil)
+ continue
+ }
+
+ ruleIndex := a.readInt()
+
+ s := a.stateFactory(stype, ruleIndex)
+
+ if stype == ATNStateLoopEnd {
+ loopBackStateNumber := a.readInt()
+
+ loopBackStateNumbers = append(loopBackStateNumbers, loopEndStateIntPair{s.(*LoopEndState), loopBackStateNumber})
+ } else if s2, ok := s.(BlockStartState); ok {
+ endStateNumber := a.readInt()
+
+ endStateNumbers = append(endStateNumbers, blockStartStateIntPair{s2, endStateNumber})
+ }
+
+ atn.addState(s)
+ }
+
+ // Delay the assignment of loop back and end states until we know all the state
+ // instances have been initialized
+ for _, pair := range loopBackStateNumbers {
+ pair.item0.loopBackState = atn.states[pair.item1]
+ }
+
+ for _, pair := range endStateNumbers {
+ pair.item0.setEndState(atn.states[pair.item1].(*BlockEndState))
+ }
+
+ numNonGreedyStates := a.readInt()
+ for j := 0; j < numNonGreedyStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(DecisionState).setNonGreedy(true)
+ }
+
+ numPrecedenceStates := a.readInt()
+ for j := 0; j < numPrecedenceStates; j++ {
+ stateNumber := a.readInt()
+
+ atn.states[stateNumber].(*RuleStartState).isPrecedenceRule = true
+ }
+}
+
+func (a *ATNDeserializer) readRules(atn *ATN) {
+ nrules := a.readInt()
+
+ if atn.grammarType == ATNTypeLexer {
+ atn.ruleToTokenType = make([]int, nrules)
+ }
+
+ atn.ruleToStartState = make([]*RuleStartState, nrules)
+
+ for i := range atn.ruleToStartState {
+ s := a.readInt()
+ startState := atn.states[s].(*RuleStartState)
+
+ atn.ruleToStartState[i] = startState
+
+ if atn.grammarType == ATNTypeLexer {
+ tokenType := a.readInt()
+
+ atn.ruleToTokenType[i] = tokenType
+ }
+ }
+
+ atn.ruleToStopState = make([]*RuleStopState, nrules)
+
+ for _, state := range atn.states {
+ if s2, ok := state.(*RuleStopState); ok {
+ atn.ruleToStopState[s2.ruleIndex] = s2
+ atn.ruleToStartState[s2.ruleIndex].stopState = s2
+ }
+ }
+}
+
+func (a *ATNDeserializer) readModes(atn *ATN) {
+ nmodes := a.readInt()
+ atn.modeToStartState = make([]*TokensStartState, nmodes)
+
+ for i := range atn.modeToStartState {
+ s := a.readInt()
+
+ atn.modeToStartState[i] = atn.states[s].(*TokensStartState)
+ }
+}
+
+func (a *ATNDeserializer) readSets(atn *ATN, sets []*IntervalSet) []*IntervalSet {
+ m := a.readInt()
+
+ // Preallocate the needed capacity.
+ if cap(sets)-len(sets) < m {
+ isets := make([]*IntervalSet, len(sets), len(sets)+m)
+ copy(isets, sets)
+ sets = isets
+ }
+
+ for i := 0; i < m; i++ {
+ iset := NewIntervalSet()
+
+ sets = append(sets, iset)
+
+ n := a.readInt()
+ containsEOF := a.readInt()
+
+ if containsEOF != 0 {
+ iset.addOne(-1)
+ }
+
+ for j := 0; j < n; j++ {
+ i1 := a.readInt()
+ i2 := a.readInt()
+
+ iset.addRange(i1, i2)
+ }
+ }
+
+ return sets
+}
+
+func (a *ATNDeserializer) readEdges(atn *ATN, sets []*IntervalSet) {
+ nedges := a.readInt()
+
+ for i := 0; i < nedges; i++ {
+ var (
+ src = a.readInt()
+ trg = a.readInt()
+ ttype = a.readInt()
+ arg1 = a.readInt()
+ arg2 = a.readInt()
+ arg3 = a.readInt()
+ trans = a.edgeFactory(atn, ttype, src, trg, arg1, arg2, arg3, sets)
+ srcState = atn.states[src]
+ )
+
+ srcState.AddTransition(trans, -1)
+ }
+
+ // Edges for rule stop states can be derived, so they are not serialized
+ for _, state := range atn.states {
+ for _, t := range state.GetTransitions() {
+ var rt, ok = t.(*RuleTransition)
+
+ if !ok {
+ continue
+ }
+
+ outermostPrecedenceReturn := -1
+
+ if atn.ruleToStartState[rt.getTarget().GetRuleIndex()].isPrecedenceRule {
+ if rt.precedence == 0 {
+ outermostPrecedenceReturn = rt.getTarget().GetRuleIndex()
+ }
+ }
+
+ trans := NewEpsilonTransition(rt.followState, outermostPrecedenceReturn)
+
+ atn.ruleToStopState[rt.getTarget().GetRuleIndex()].AddTransition(trans, -1)
+ }
+ }
+
+ for _, state := range atn.states {
+ if s2, ok := state.(BlockStartState); ok {
+ // We need to know the end state to set its start state
+ if s2.getEndState() == nil {
+ panic("IllegalState")
+ }
+
+ // Block end states can only be associated to a single block start state
+ if s2.getEndState().startState != nil {
+ panic("IllegalState")
+ }
+
+ s2.getEndState().startState = state
+ }
+
+ if s2, ok := state.(*PlusLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*PlusBlockStartState); ok {
+ t2.loopBackState = state
+ }
+ }
+ } else if s2, ok := state.(*StarLoopbackState); ok {
+ for _, t := range s2.GetTransitions() {
+ if t2, ok := t.getTarget().(*StarLoopEntryState); ok {
+ t2.loopBackState = state
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) readDecisions(atn *ATN) {
+ ndecisions := a.readInt()
+
+ for i := 0; i < ndecisions; i++ {
+ s := a.readInt()
+ decState := atn.states[s].(DecisionState)
+
+ atn.DecisionToState = append(atn.DecisionToState, decState)
+ decState.setDecision(i)
+ }
+}
+
+func (a *ATNDeserializer) readLexerActions(atn *ATN) {
+ if atn.grammarType == ATNTypeLexer {
+ count := a.readInt()
+
+ atn.lexerActions = make([]LexerAction, count)
+
+ for i := range atn.lexerActions {
+ actionType := a.readInt()
+ data1 := a.readInt()
+ data2 := a.readInt()
+ atn.lexerActions[i] = a.lexerActionFactory(actionType, data1, data2)
+ }
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransitions(atn *ATN) {
+ count := len(atn.ruleToStartState)
+
+ for i := 0; i < count; i++ {
+ atn.ruleToTokenType[i] = atn.maxTokenType + i + 1
+ }
+
+ for i := 0; i < count; i++ {
+ a.generateRuleBypassTransition(atn, i)
+ }
+}
+
+func (a *ATNDeserializer) generateRuleBypassTransition(atn *ATN, idx int) {
+ bypassStart := NewBasicBlockStartState()
+
+ bypassStart.ruleIndex = idx
+ atn.addState(bypassStart)
+
+ bypassStop := NewBlockEndState()
+
+ bypassStop.ruleIndex = idx
+ atn.addState(bypassStop)
+
+ bypassStart.endState = bypassStop
+
+ atn.defineDecisionState(bypassStart.BaseDecisionState)
+
+ bypassStop.startState = bypassStart
+
+ var excludeTransition Transition
+ var endState ATNState
+
+ if atn.ruleToStartState[idx].isPrecedenceRule {
+ // Wrap from the beginning of the rule to the StarLoopEntryState
+ endState = nil
+
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ if a.stateIsEndStateFor(state, idx) != nil {
+ endState = state
+ excludeTransition = state.(*StarLoopEntryState).loopBackState.GetTransitions()[0]
+
+ break
+ }
+ }
+
+ if excludeTransition == nil {
+ panic("Couldn't identify final state of the precedence rule prefix section.")
+ }
+ } else {
+ endState = atn.ruleToStopState[idx]
+ }
+
+ // All non-excluded transitions that currently target end state need to target
+ // blockEnd instead
+ for i := 0; i < len(atn.states); i++ {
+ state := atn.states[i]
+
+ for j := 0; j < len(state.GetTransitions()); j++ {
+ transition := state.GetTransitions()[j]
+
+ if transition == excludeTransition {
+ continue
+ }
+
+ if transition.getTarget() == endState {
+ transition.setTarget(bypassStop)
+ }
+ }
+ }
+
+ // All transitions leaving the rule start state need to leave blockStart instead
+ ruleToStartState := atn.ruleToStartState[idx]
+ count := len(ruleToStartState.GetTransitions())
+
+ for count > 0 {
+ bypassStart.AddTransition(ruleToStartState.GetTransitions()[count-1], -1)
+ ruleToStartState.SetTransitions([]Transition{ruleToStartState.GetTransitions()[len(ruleToStartState.GetTransitions())-1]})
+ }
+
+ // Link the new states
+ atn.ruleToStartState[idx].AddTransition(NewEpsilonTransition(bypassStart, -1), -1)
+ bypassStop.AddTransition(NewEpsilonTransition(endState, -1), -1)
+
+ MatchState := NewBasicState()
+
+ atn.addState(MatchState)
+ MatchState.AddTransition(NewAtomTransition(bypassStop, atn.ruleToTokenType[idx]), -1)
+ bypassStart.AddTransition(NewEpsilonTransition(MatchState, -1), -1)
+}
+
+func (a *ATNDeserializer) stateIsEndStateFor(state ATNState, idx int) ATNState {
+ if state.GetRuleIndex() != idx {
+ return nil
+ }
+
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ return nil
+ }
+
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if _, ok := maybeLoopEndState.(*LoopEndState); !ok {
+ return nil
+ }
+
+ var _, ok = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if maybeLoopEndState.(*LoopEndState).epsilonOnlyTransitions && ok {
+ return state
+ }
+
+ return nil
+}
+
+// markPrecedenceDecisions analyzes the StarLoopEntryState states in the
+// specified ATN to set the StarLoopEntryState.precedenceRuleDecision field to
+// the correct value.
+func (a *ATNDeserializer) markPrecedenceDecisions(atn *ATN) {
+ for _, state := range atn.states {
+ if _, ok := state.(*StarLoopEntryState); !ok {
+ continue
+ }
+
+ // We analyze the ATN to determine if a ATN decision state is the
+ // decision for the closure block that determines whether a
+ // precedence rule should continue or complete.
+ if atn.ruleToStartState[state.GetRuleIndex()].isPrecedenceRule {
+ maybeLoopEndState := state.GetTransitions()[len(state.GetTransitions())-1].getTarget()
+
+ if s3, ok := maybeLoopEndState.(*LoopEndState); ok {
+ var _, ok2 = maybeLoopEndState.GetTransitions()[0].getTarget().(*RuleStopState)
+
+ if s3.epsilonOnlyTransitions && ok2 {
+ state.(*StarLoopEntryState).precedenceRuleDecision = true
+ }
+ }
+ }
+ }
+}
+
+func (a *ATNDeserializer) verifyATN(atn *ATN) {
+ if !a.options.VerifyATN() {
+ return
+ }
+
+ // Verify assumptions
+ for _, state := range atn.states {
+ if state == nil {
+ continue
+ }
+
+ a.checkCondition(state.GetEpsilonOnlyTransitions() || len(state.GetTransitions()) <= 1, "")
+
+ switch s2 := state.(type) {
+ case *PlusBlockStartState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *StarLoopEntryState:
+ a.checkCondition(s2.loopBackState != nil, "")
+ a.checkCondition(len(s2.GetTransitions()) == 2, "")
+
+ switch s2.transitions[0].getTarget().(type) {
+ case *StarBlockStartState:
+ _, ok := s2.transitions[1].getTarget().(*LoopEndState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(!s2.nonGreedy, "")
+
+ case *LoopEndState:
+ var _, ok = s2.transitions[1].getTarget().(*StarBlockStartState)
+
+ a.checkCondition(ok, "")
+ a.checkCondition(s2.nonGreedy, "")
+
+ default:
+ panic("IllegalState")
+ }
+
+ case *StarLoopbackState:
+ a.checkCondition(len(state.GetTransitions()) == 1, "")
+
+ var _, ok = state.GetTransitions()[0].getTarget().(*StarLoopEntryState)
+
+ a.checkCondition(ok, "")
+
+ case *LoopEndState:
+ a.checkCondition(s2.loopBackState != nil, "")
+
+ case *RuleStartState:
+ a.checkCondition(s2.stopState != nil, "")
+
+ case BlockStartState:
+ a.checkCondition(s2.getEndState() != nil, "")
+
+ case *BlockEndState:
+ a.checkCondition(s2.startState != nil, "")
+
+ case DecisionState:
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || s2.getDecision() >= 0, "")
+
+ default:
+ var _, ok = s2.(*RuleStopState)
+
+ a.checkCondition(len(s2.GetTransitions()) <= 1 || ok, "")
+ }
+ }
+}
+
+func (a *ATNDeserializer) checkCondition(condition bool, message string) {
+ if !condition {
+ if message == "" {
+ message = "IllegalState"
+ }
+
+ panic(message)
+ }
+}
+
+func (a *ATNDeserializer) readInt() int {
+ v := a.data[a.pos]
+
+ a.pos++
+
+ return int(v) // data is 32 bits but int is at least that big
+}
+
+func (a *ATNDeserializer) edgeFactory(atn *ATN, typeIndex, src, trg, arg1, arg2, arg3 int, sets []*IntervalSet) Transition {
+ target := atn.states[trg]
+
+ switch typeIndex {
+ case TransitionEPSILON:
+ return NewEpsilonTransition(target, -1)
+
+ case TransitionRANGE:
+ if arg3 != 0 {
+ return NewRangeTransition(target, TokenEOF, arg2)
+ }
+
+ return NewRangeTransition(target, arg1, arg2)
+
+ case TransitionRULE:
+ return NewRuleTransition(atn.states[arg1], arg2, arg3, target)
+
+ case TransitionPREDICATE:
+ return NewPredicateTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionPRECEDENCE:
+ return NewPrecedencePredicateTransition(target, arg1)
+
+ case TransitionATOM:
+ if arg3 != 0 {
+ return NewAtomTransition(target, TokenEOF)
+ }
+
+ return NewAtomTransition(target, arg1)
+
+ case TransitionACTION:
+ return NewActionTransition(target, arg1, arg2, arg3 != 0)
+
+ case TransitionSET:
+ return NewSetTransition(target, sets[arg1])
+
+ case TransitionNOTSET:
+ return NewNotSetTransition(target, sets[arg1])
+
+ case TransitionWILDCARD:
+ return NewWildcardTransition(target)
+ }
+
+ panic("The specified transition type is not valid.")
+}
+
+func (a *ATNDeserializer) stateFactory(typeIndex, ruleIndex int) ATNState {
+ var s ATNState
+
+ switch typeIndex {
+ case ATNStateInvalidType:
+ return nil
+
+ case ATNStateBasic:
+ s = NewBasicState()
+
+ case ATNStateRuleStart:
+ s = NewRuleStartState()
+
+ case ATNStateBlockStart:
+ s = NewBasicBlockStartState()
+
+ case ATNStatePlusBlockStart:
+ s = NewPlusBlockStartState()
+
+ case ATNStateStarBlockStart:
+ s = NewStarBlockStartState()
+
+ case ATNStateTokenStart:
+ s = NewTokensStartState()
+
+ case ATNStateRuleStop:
+ s = NewRuleStopState()
+
+ case ATNStateBlockEnd:
+ s = NewBlockEndState()
+
+ case ATNStateStarLoopBack:
+ s = NewStarLoopbackState()
+
+ case ATNStateStarLoopEntry:
+ s = NewStarLoopEntryState()
+
+ case ATNStatePlusLoopBack:
+ s = NewPlusLoopbackState()
+
+ case ATNStateLoopEnd:
+ s = NewLoopEndState()
+
+ default:
+ panic(fmt.Sprintf("state type %d is invalid", typeIndex))
+ }
+
+ s.SetRuleIndex(ruleIndex)
+
+ return s
+}
+
+func (a *ATNDeserializer) lexerActionFactory(typeIndex, data1, data2 int) LexerAction {
+ switch typeIndex {
+ case LexerActionTypeChannel:
+ return NewLexerChannelAction(data1)
+
+ case LexerActionTypeCustom:
+ return NewLexerCustomAction(data1, data2)
+
+ case LexerActionTypeMode:
+ return NewLexerModeAction(data1)
+
+ case LexerActionTypeMore:
+ return LexerMoreActionINSTANCE
+
+ case LexerActionTypePopMode:
+ return LexerPopModeActionINSTANCE
+
+ case LexerActionTypePushMode:
+ return NewLexerPushModeAction(data1)
+
+ case LexerActionTypeSkip:
+ return LexerSkipActionINSTANCE
+
+ case LexerActionTypeType:
+ return NewLexerTypeAction(data1)
+
+ default:
+ panic(fmt.Sprintf("lexer action %d is invalid", typeIndex))
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
new file mode 100644
index 000000000..d5454d6d5
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_simulator.go
@@ -0,0 +1,50 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+var ATNSimulatorError = NewDFAState(0x7FFFFFFF, NewBaseATNConfigSet(false))
+
+type IATNSimulator interface {
+ SharedContextCache() *PredictionContextCache
+ ATN() *ATN
+ DecisionToDFA() []*DFA
+}
+
+type BaseATNSimulator struct {
+ atn *ATN
+ sharedContextCache *PredictionContextCache
+ decisionToDFA []*DFA
+}
+
+func NewBaseATNSimulator(atn *ATN, sharedContextCache *PredictionContextCache) *BaseATNSimulator {
+ b := new(BaseATNSimulator)
+
+ b.atn = atn
+ b.sharedContextCache = sharedContextCache
+
+ return b
+}
+
+func (b *BaseATNSimulator) getCachedContext(context PredictionContext) PredictionContext {
+ if b.sharedContextCache == nil {
+ return context
+ }
+
+ visited := make(map[PredictionContext]PredictionContext)
+
+ return getCachedBasePredictionContext(context, b.sharedContextCache, visited)
+}
+
+func (b *BaseATNSimulator) SharedContextCache() *PredictionContextCache {
+ return b.sharedContextCache
+}
+
+func (b *BaseATNSimulator) ATN() *ATN {
+ return b.atn
+}
+
+func (b *BaseATNSimulator) DecisionToDFA() []*DFA {
+ return b.decisionToDFA
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
new file mode 100644
index 000000000..3835bb2e9
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_state.go
@@ -0,0 +1,392 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+// Constants for serialization.
+const (
+ ATNStateInvalidType = 0
+ ATNStateBasic = 1
+ ATNStateRuleStart = 2
+ ATNStateBlockStart = 3
+ ATNStatePlusBlockStart = 4
+ ATNStateStarBlockStart = 5
+ ATNStateTokenStart = 6
+ ATNStateRuleStop = 7
+ ATNStateBlockEnd = 8
+ ATNStateStarLoopBack = 9
+ ATNStateStarLoopEntry = 10
+ ATNStatePlusLoopBack = 11
+ ATNStateLoopEnd = 12
+
+ ATNStateInvalidStateNumber = -1
+)
+
+var ATNStateInitialNumTransitions = 4
+
+type ATNState interface {
+ GetEpsilonOnlyTransitions() bool
+
+ GetRuleIndex() int
+ SetRuleIndex(int)
+
+ GetNextTokenWithinRule() *IntervalSet
+ SetNextTokenWithinRule(*IntervalSet)
+
+ GetATN() *ATN
+ SetATN(*ATN)
+
+ GetStateType() int
+
+ GetStateNumber() int
+ SetStateNumber(int)
+
+ GetTransitions() []Transition
+ SetTransitions([]Transition)
+ AddTransition(Transition, int)
+
+ String() string
+ hash() int
+}
+
+type BaseATNState struct {
+ // NextTokenWithinRule caches lookahead during parsing. Not used during construction.
+ NextTokenWithinRule *IntervalSet
+
+ // atn is the current ATN.
+ atn *ATN
+
+ epsilonOnlyTransitions bool
+
+ // ruleIndex tracks the Rule index because there are no Rule objects at runtime.
+ ruleIndex int
+
+ stateNumber int
+
+ stateType int
+
+ // Track the transitions emanating from this ATN state.
+ transitions []Transition
+}
+
+func NewBaseATNState() *BaseATNState {
+ return &BaseATNState{stateNumber: ATNStateInvalidStateNumber, stateType: ATNStateInvalidType}
+}
+
+func (as *BaseATNState) GetRuleIndex() int {
+ return as.ruleIndex
+}
+
+func (as *BaseATNState) SetRuleIndex(v int) {
+ as.ruleIndex = v
+}
+func (as *BaseATNState) GetEpsilonOnlyTransitions() bool {
+ return as.epsilonOnlyTransitions
+}
+
+func (as *BaseATNState) GetATN() *ATN {
+ return as.atn
+}
+
+func (as *BaseATNState) SetATN(atn *ATN) {
+ as.atn = atn
+}
+
+func (as *BaseATNState) GetTransitions() []Transition {
+ return as.transitions
+}
+
+func (as *BaseATNState) SetTransitions(t []Transition) {
+ as.transitions = t
+}
+
+func (as *BaseATNState) GetStateType() int {
+ return as.stateType
+}
+
+func (as *BaseATNState) GetStateNumber() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) SetStateNumber(stateNumber int) {
+ as.stateNumber = stateNumber
+}
+
+func (as *BaseATNState) GetNextTokenWithinRule() *IntervalSet {
+ return as.NextTokenWithinRule
+}
+
+func (as *BaseATNState) SetNextTokenWithinRule(v *IntervalSet) {
+ as.NextTokenWithinRule = v
+}
+
+func (as *BaseATNState) hash() int {
+ return as.stateNumber
+}
+
+func (as *BaseATNState) String() string {
+ return strconv.Itoa(as.stateNumber)
+}
+
+func (as *BaseATNState) equals(other interface{}) bool {
+ if ot, ok := other.(ATNState); ok {
+ return as.stateNumber == ot.GetStateNumber()
+ }
+
+ return false
+}
+
+func (as *BaseATNState) isNonGreedyExitState() bool {
+ return false
+}
+
+func (as *BaseATNState) AddTransition(trans Transition, index int) {
+ if len(as.transitions) == 0 {
+ as.epsilonOnlyTransitions = trans.getIsEpsilon()
+ } else if as.epsilonOnlyTransitions != trans.getIsEpsilon() {
+ as.epsilonOnlyTransitions = false
+ }
+
+ if index == -1 {
+ as.transitions = append(as.transitions, trans)
+ } else {
+ as.transitions = append(as.transitions[:index], append([]Transition{trans}, as.transitions[index:]...)...)
+ // TODO: as.transitions.splice(index, 1, trans)
+ }
+}
+
+type BasicState struct {
+ *BaseATNState
+}
+
+func NewBasicState() *BasicState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBasic
+
+ return &BasicState{BaseATNState: b}
+}
+
+type DecisionState interface {
+ ATNState
+
+ getDecision() int
+ setDecision(int)
+
+ getNonGreedy() bool
+ setNonGreedy(bool)
+}
+
+type BaseDecisionState struct {
+ *BaseATNState
+ decision int
+ nonGreedy bool
+}
+
+func NewBaseDecisionState() *BaseDecisionState {
+ return &BaseDecisionState{BaseATNState: NewBaseATNState(), decision: -1}
+}
+
+func (s *BaseDecisionState) getDecision() int {
+ return s.decision
+}
+
+func (s *BaseDecisionState) setDecision(b int) {
+ s.decision = b
+}
+
+func (s *BaseDecisionState) getNonGreedy() bool {
+ return s.nonGreedy
+}
+
+func (s *BaseDecisionState) setNonGreedy(b bool) {
+ s.nonGreedy = b
+}
+
+type BlockStartState interface {
+ DecisionState
+
+ getEndState() *BlockEndState
+ setEndState(*BlockEndState)
+}
+
+// BaseBlockStartState is the start of a regular (...) block.
+type BaseBlockStartState struct {
+ *BaseDecisionState
+ endState *BlockEndState
+}
+
+func NewBlockStartState() *BaseBlockStartState {
+ return &BaseBlockStartState{BaseDecisionState: NewBaseDecisionState()}
+}
+
+func (s *BaseBlockStartState) getEndState() *BlockEndState {
+ return s.endState
+}
+
+func (s *BaseBlockStartState) setEndState(b *BlockEndState) {
+ s.endState = b
+}
+
+type BasicBlockStartState struct {
+ *BaseBlockStartState
+}
+
+func NewBasicBlockStartState() *BasicBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateBlockStart
+
+ return &BasicBlockStartState{BaseBlockStartState: b}
+}
+
+var _ BlockStartState = &BasicBlockStartState{}
+
+// BlockEndState is a terminal node of a simple (a|b|c) block.
+type BlockEndState struct {
+ *BaseATNState
+ startState ATNState
+}
+
+func NewBlockEndState() *BlockEndState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateBlockEnd
+
+ return &BlockEndState{BaseATNState: b}
+}
+
+// RuleStopState is the last node in the ATN for a rule, unless that rule is the
+// start symbol. In that case, there is one transition to EOF. Later, we might
+// encode references to all calls to this rule to compute FOLLOW sets for error
+// handling.
+type RuleStopState struct {
+ *BaseATNState
+}
+
+func NewRuleStopState() *RuleStopState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStop
+
+ return &RuleStopState{BaseATNState: b}
+}
+
+type RuleStartState struct {
+ *BaseATNState
+ stopState ATNState
+ isPrecedenceRule bool
+}
+
+func NewRuleStartState() *RuleStartState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateRuleStart
+
+ return &RuleStartState{BaseATNState: b}
+}
+
+// PlusLoopbackState is a decision state for A+ and (A|B)+. It has two
+// transitions: one to the loop back to start of the block, and one to exit.
+type PlusLoopbackState struct {
+ *BaseDecisionState
+}
+
+func NewPlusLoopbackState() *PlusLoopbackState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStatePlusLoopBack
+
+ return &PlusLoopbackState{BaseDecisionState: b}
+}
+
+// PlusBlockStartState is the start of a (A|B|...)+ loop. Technically it is a
+// decision state; we don't use it for code generation. Somebody might need it,
+// it is included for completeness. In reality, PlusLoopbackState is the real
+// decision-making node for A+.
+type PlusBlockStartState struct {
+ *BaseBlockStartState
+ loopBackState ATNState
+}
+
+func NewPlusBlockStartState() *PlusBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStatePlusBlockStart
+
+ return &PlusBlockStartState{BaseBlockStartState: b}
+}
+
+var _ BlockStartState = &PlusBlockStartState{}
+
+// StarBlockStartState is the block that begins a closure loop.
+type StarBlockStartState struct {
+ *BaseBlockStartState
+}
+
+func NewStarBlockStartState() *StarBlockStartState {
+ b := NewBlockStartState()
+
+ b.stateType = ATNStateStarBlockStart
+
+ return &StarBlockStartState{BaseBlockStartState: b}
+}
+
+var _ BlockStartState = &StarBlockStartState{}
+
+type StarLoopbackState struct {
+ *BaseATNState
+}
+
+func NewStarLoopbackState() *StarLoopbackState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateStarLoopBack
+
+ return &StarLoopbackState{BaseATNState: b}
+}
+
+type StarLoopEntryState struct {
+ *BaseDecisionState
+ loopBackState ATNState
+ precedenceRuleDecision bool
+}
+
+func NewStarLoopEntryState() *StarLoopEntryState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateStarLoopEntry
+
+ // False precedenceRuleDecision indicates whether s state can benefit from a precedence DFA during SLL decision making.
+ return &StarLoopEntryState{BaseDecisionState: b}
+}
+
+// LoopEndState marks the end of a * or + loop.
+type LoopEndState struct {
+ *BaseATNState
+ loopBackState ATNState
+}
+
+func NewLoopEndState() *LoopEndState {
+ b := NewBaseATNState()
+
+ b.stateType = ATNStateLoopEnd
+
+ return &LoopEndState{BaseATNState: b}
+}
+
+// TokensStartState is the Tokens rule start state linking to each lexer rule start state.
+type TokensStartState struct {
+ *BaseDecisionState
+}
+
+func NewTokensStartState() *TokensStartState {
+ b := NewBaseDecisionState()
+
+ b.stateType = ATNStateTokenStart
+
+ return &TokensStartState{BaseDecisionState: b}
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
new file mode 100644
index 000000000..a7b48976b
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/atn_type.go
@@ -0,0 +1,11 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represent the type of recognizer an ATN applies to.
+const (
+ ATNTypeLexer = 0
+ ATNTypeParser = 1
+)
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
new file mode 100644
index 000000000..70c1207f7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/char_stream.go
@@ -0,0 +1,12 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type CharStream interface {
+ IntStream
+ GetText(int, int) string
+ GetTextFromTokens(start, end Token) string
+ GetTextFromInterval(*Interval) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
new file mode 100644
index 000000000..330ff8f31
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_factory.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// TokenFactory creates CommonToken objects.
+type TokenFactory interface {
+ Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token
+}
+
+// CommonTokenFactory is the default TokenFactory implementation.
+type CommonTokenFactory struct {
+ // copyText indicates whether CommonToken.setText should be called after
+ // constructing tokens to explicitly set the text. This is useful for cases
+ // where the input stream might not be able to provide arbitrary substrings of
+ // text from the input after the lexer creates a token (e.g. the
+ // implementation of CharStream.GetText in UnbufferedCharStream panics an
+ // UnsupportedOperationException). Explicitly setting the token text allows
+ // Token.GetText to be called at any time regardless of the input stream
+ // implementation.
+ //
+ // The default value is false to avoid the performance and memory overhead of
+ // copying text for every token unless explicitly requested.
+ copyText bool
+}
+
+func NewCommonTokenFactory(copyText bool) *CommonTokenFactory {
+ return &CommonTokenFactory{copyText: copyText}
+}
+
+// CommonTokenFactoryDEFAULT is the default CommonTokenFactory. It does not
+// explicitly copy token text when constructing tokens.
+var CommonTokenFactoryDEFAULT = NewCommonTokenFactory(false)
+
+func (c *CommonTokenFactory) Create(source *TokenSourceCharStreamPair, ttype int, text string, channel, start, stop, line, column int) Token {
+ t := NewCommonToken(source, ttype, channel, start, stop)
+
+ t.line = line
+ t.column = column
+
+ if text != "" {
+ t.SetText(text)
+ } else if c.copyText && source.charStream != nil {
+ t.SetText(source.charStream.GetTextFromInterval(NewInterval(start, stop)))
+ }
+
+ return t
+}
+
+func (c *CommonTokenFactory) createThin(ttype int, text string) Token {
+ t := NewCommonToken(nil, ttype, TokenDefaultChannel, -1, -1)
+ t.SetText(text)
+
+ return t
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
new file mode 100644
index 000000000..c90e9b890
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/common_token_stream.go
@@ -0,0 +1,447 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// CommonTokenStream is an implementation of TokenStream that loads tokens from
+// a TokenSource on-demand and places the tokens in a buffer to provide access
+// to any previous token by index. This token stream ignores the value of
+// Token.getChannel. If your parser requires the token stream filter tokens to
+// only those on a particular channel, such as Token.DEFAULT_CHANNEL or
+// Token.HIDDEN_CHANNEL, use a filtering token stream such a CommonTokenStream.
+type CommonTokenStream struct {
+ channel int
+
+ // fetchedEOF indicates whether the Token.EOF token has been fetched from
+ // tokenSource and added to tokens. This field improves performance for the
+ // following cases:
+ //
+ // consume: The lookahead check in consume to preven consuming the EOF symbol is
+ // optimized by checking the values of fetchedEOF and p instead of calling LA.
+ //
+ // fetch: The check to prevent adding multiple EOF symbols into tokens is
+ // trivial with bt field.
+ fetchedEOF bool
+
+ // index indexs into tokens of the current token (next token to consume).
+ // tokens[p] should be LT(1). It is set to -1 when the stream is first
+ // constructed or when SetTokenSource is called, indicating that the first token
+ // has not yet been fetched from the token source. For additional information,
+ // see the documentation of IntStream for a description of initializing methods.
+ index int
+
+ // tokenSource is the TokenSource from which tokens for the bt stream are
+ // fetched.
+ tokenSource TokenSource
+
+ // tokens is all tokens fetched from the token source. The list is considered a
+ // complete view of the input once fetchedEOF is set to true.
+ tokens []Token
+}
+
+func NewCommonTokenStream(lexer Lexer, channel int) *CommonTokenStream {
+ return &CommonTokenStream{
+ channel: channel,
+ index: -1,
+ tokenSource: lexer,
+ tokens: make([]Token, 0),
+ }
+}
+
+func (c *CommonTokenStream) GetAllTokens() []Token {
+ return c.tokens
+}
+
+func (c *CommonTokenStream) Mark() int {
+ return 0
+}
+
+func (c *CommonTokenStream) Release(marker int) {}
+
+func (c *CommonTokenStream) reset() {
+ c.Seek(0)
+}
+
+func (c *CommonTokenStream) Seek(index int) {
+ c.lazyInit()
+ c.index = c.adjustSeekIndex(index)
+}
+
+func (c *CommonTokenStream) Get(index int) Token {
+ c.lazyInit()
+
+ return c.tokens[index]
+}
+
+func (c *CommonTokenStream) Consume() {
+ SkipEOFCheck := false
+
+ if c.index >= 0 {
+ if c.fetchedEOF {
+ // The last token in tokens is EOF. Skip the check if p indexes any fetched.
+ // token except the last.
+ SkipEOFCheck = c.index < len(c.tokens)-1
+ } else {
+ // No EOF token in tokens. Skip the check if p indexes a fetched token.
+ SkipEOFCheck = c.index < len(c.tokens)
+ }
+ } else {
+ // Not yet initialized
+ SkipEOFCheck = false
+ }
+
+ if !SkipEOFCheck && c.LA(1) == TokenEOF {
+ panic("cannot consume EOF")
+ }
+
+ if c.Sync(c.index + 1) {
+ c.index = c.adjustSeekIndex(c.index + 1)
+ }
+}
+
+// Sync makes sure index i in tokens has a token and returns true if a token is
+// located at index i and otherwise false.
+func (c *CommonTokenStream) Sync(i int) bool {
+ n := i - len(c.tokens) + 1 // TODO: How many more elements do we need?
+
+ if n > 0 {
+ fetched := c.fetch(n)
+ return fetched >= n
+ }
+
+ return true
+}
+
+// fetch adds n elements to buffer and returns the actual number of elements
+// added to the buffer.
+func (c *CommonTokenStream) fetch(n int) int {
+ if c.fetchedEOF {
+ return 0
+ }
+
+ for i := 0; i < n; i++ {
+ t := c.tokenSource.NextToken()
+
+ t.SetTokenIndex(len(c.tokens))
+ c.tokens = append(c.tokens, t)
+
+ if t.GetTokenType() == TokenEOF {
+ c.fetchedEOF = true
+
+ return i + 1
+ }
+ }
+
+ return n
+}
+
+// GetTokens gets all tokens from start to stop inclusive.
+func (c *CommonTokenStream) GetTokens(start int, stop int, types *IntervalSet) []Token {
+ if start < 0 || stop < 0 {
+ return nil
+ }
+
+ c.lazyInit()
+
+ subset := make([]Token, 0)
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ for i := start; i < stop; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ if types == nil || types.contains(t.GetTokenType()) {
+ subset = append(subset, t)
+ }
+ }
+
+ return subset
+}
+
+func (c *CommonTokenStream) LA(i int) int {
+ return c.LT(i).GetTokenType()
+}
+
+func (c *CommonTokenStream) lazyInit() {
+ if c.index == -1 {
+ c.setup()
+ }
+}
+
+func (c *CommonTokenStream) setup() {
+ c.Sync(0)
+ c.index = c.adjustSeekIndex(0)
+}
+
+func (c *CommonTokenStream) GetTokenSource() TokenSource {
+ return c.tokenSource
+}
+
+// SetTokenSource resets the c token stream by setting its token source.
+func (c *CommonTokenStream) SetTokenSource(tokenSource TokenSource) {
+ c.tokenSource = tokenSource
+ c.tokens = make([]Token, 0)
+ c.index = -1
+}
+
+// NextTokenOnChannel returns the index of the next token on channel given a
+// starting index. Returns i if tokens[i] is on channel. Returns -1 if there are
+// no tokens on channel between i and EOF.
+func (c *CommonTokenStream) NextTokenOnChannel(i, channel int) int {
+ c.Sync(i)
+
+ if i >= len(c.tokens) {
+ return -1
+ }
+
+ token := c.tokens[i]
+
+ for token.GetChannel() != c.channel {
+ if token.GetTokenType() == TokenEOF {
+ return -1
+ }
+
+ i++
+ c.Sync(i)
+ token = c.tokens[i]
+ }
+
+ return i
+}
+
+// previousTokenOnChannel returns the index of the previous token on channel
+// given a starting index. Returns i if tokens[i] is on channel. Returns -1 if
+// there are no tokens on channel between i and 0.
+func (c *CommonTokenStream) previousTokenOnChannel(i, channel int) int {
+ for i >= 0 && c.tokens[i].GetChannel() != channel {
+ i--
+ }
+
+ return i
+}
+
+// GetHiddenTokensToRight collects all tokens on a specified channel to the
+// right of the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
+// or EOF. If channel is -1, it finds any non-default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToRight(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ nextOnChannel := c.NextTokenOnChannel(tokenIndex+1, LexerDefaultTokenChannel)
+ from := tokenIndex + 1
+
+ // If no onchannel to the right, then nextOnChannel == -1, so set to to last token
+ var to int
+
+ if nextOnChannel == -1 {
+ to = len(c.tokens) - 1
+ } else {
+ to = nextOnChannel
+ }
+
+ return c.filterForChannel(from, to, channel)
+}
+
+// GetHiddenTokensToLeft collects all tokens on channel to the left of the
+// current token until we see a token on DEFAULT_TOKEN_CHANNEL. If channel is
+// -1, it finds any non default channel token.
+func (c *CommonTokenStream) GetHiddenTokensToLeft(tokenIndex, channel int) []Token {
+ c.lazyInit()
+
+ if tokenIndex < 0 || tokenIndex >= len(c.tokens) {
+ panic(strconv.Itoa(tokenIndex) + " not in 0.." + strconv.Itoa(len(c.tokens)-1))
+ }
+
+ prevOnChannel := c.previousTokenOnChannel(tokenIndex-1, LexerDefaultTokenChannel)
+
+ if prevOnChannel == tokenIndex-1 {
+ return nil
+ }
+
+ // If there are none on channel to the left and prevOnChannel == -1 then from = 0
+ from := prevOnChannel + 1
+ to := tokenIndex - 1
+
+ return c.filterForChannel(from, to, channel)
+}
+
+func (c *CommonTokenStream) filterForChannel(left, right, channel int) []Token {
+ hidden := make([]Token, 0)
+
+ for i := left; i < right+1; i++ {
+ t := c.tokens[i]
+
+ if channel == -1 {
+ if t.GetChannel() != LexerDefaultTokenChannel {
+ hidden = append(hidden, t)
+ }
+ } else if t.GetChannel() == channel {
+ hidden = append(hidden, t)
+ }
+ }
+
+ if len(hidden) == 0 {
+ return nil
+ }
+
+ return hidden
+}
+
+func (c *CommonTokenStream) GetSourceName() string {
+ return c.tokenSource.GetSourceName()
+}
+
+func (c *CommonTokenStream) Size() int {
+ return len(c.tokens)
+}
+
+func (c *CommonTokenStream) Index() int {
+ return c.index
+}
+
+func (c *CommonTokenStream) GetAllText() string {
+ return c.GetTextFromInterval(nil)
+}
+
+func (c *CommonTokenStream) GetTextFromTokens(start, end Token) string {
+ if start == nil || end == nil {
+ return ""
+ }
+
+ return c.GetTextFromInterval(NewInterval(start.GetTokenIndex(), end.GetTokenIndex()))
+}
+
+func (c *CommonTokenStream) GetTextFromRuleContext(interval RuleContext) string {
+ return c.GetTextFromInterval(interval.GetSourceInterval())
+}
+
+func (c *CommonTokenStream) GetTextFromInterval(interval *Interval) string {
+ c.lazyInit()
+ c.Fill()
+
+ if interval == nil {
+ interval = NewInterval(0, len(c.tokens)-1)
+ }
+
+ start := interval.Start
+ stop := interval.Stop
+
+ if start < 0 || stop < 0 {
+ return ""
+ }
+
+ if stop >= len(c.tokens) {
+ stop = len(c.tokens) - 1
+ }
+
+ s := ""
+
+ for i := start; i < stop+1; i++ {
+ t := c.tokens[i]
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+
+ s += t.GetText()
+ }
+
+ return s
+}
+
+// Fill gets all tokens from the lexer until EOF.
+func (c *CommonTokenStream) Fill() {
+ c.lazyInit()
+
+ for c.fetch(1000) == 1000 {
+ continue
+ }
+}
+
+func (c *CommonTokenStream) adjustSeekIndex(i int) int {
+ return c.NextTokenOnChannel(i, c.channel)
+}
+
+func (c *CommonTokenStream) LB(k int) Token {
+ if k == 0 || c.index-k < 0 {
+ return nil
+ }
+
+ i := c.index
+ n := 1
+
+ // Find k good tokens looking backward
+ for n <= k {
+ // Skip off-channel tokens
+ i = c.previousTokenOnChannel(i-1, c.channel)
+ n++
+ }
+
+ if i < 0 {
+ return nil
+ }
+
+ return c.tokens[i]
+}
+
+func (c *CommonTokenStream) LT(k int) Token {
+ c.lazyInit()
+
+ if k == 0 {
+ return nil
+ }
+
+ if k < 0 {
+ return c.LB(-k)
+ }
+
+ i := c.index
+ n := 1 // We know tokens[n] is valid
+
+ // Find k good tokens
+ for n < k {
+ // Skip off-channel tokens, but make sure to not look past EOF
+ if c.Sync(i + 1) {
+ i = c.NextTokenOnChannel(i+1, c.channel)
+ }
+
+ n++
+ }
+
+ return c.tokens[i]
+}
+
+// getNumberOfOnChannelTokens counts EOF once.
+func (c *CommonTokenStream) getNumberOfOnChannelTokens() int {
+ var n int
+
+ c.Fill()
+
+ for i := 0; i < len(c.tokens); i++ {
+ t := c.tokens[i]
+
+ if t.GetChannel() == c.channel {
+ n++
+ }
+
+ if t.GetTokenType() == TokenEOF {
+ break
+ }
+ }
+
+ return n
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
new file mode 100644
index 000000000..d55a2a87d
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa.go
@@ -0,0 +1,170 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "sort"
+)
+
+type DFA struct {
+ // atnStartState is the ATN state in which this was created
+ atnStartState DecisionState
+
+ decision int
+
+ // states is all the DFA states. Use Map to get the old state back; Set can only
+ // indicate whether it is there.
+ states map[int]*DFAState
+
+ s0 *DFAState
+
+ // precedenceDfa is the backing field for isPrecedenceDfa and setPrecedenceDfa.
+ // True if the DFA is for a precedence decision and false otherwise.
+ precedenceDfa bool
+}
+
+func NewDFA(atnStartState DecisionState, decision int) *DFA {
+ dfa := &DFA{
+ atnStartState: atnStartState,
+ decision: decision,
+ states: make(map[int]*DFAState),
+ }
+ if s, ok := atnStartState.(*StarLoopEntryState); ok && s.precedenceRuleDecision {
+ dfa.precedenceDfa = true
+ dfa.s0 = NewDFAState(-1, NewBaseATNConfigSet(false))
+ dfa.s0.isAcceptState = false
+ dfa.s0.requiresFullContext = false
+ }
+ return dfa
+}
+
+// getPrecedenceStartState gets the start state for the current precedence and
+// returns the start state corresponding to the specified precedence if a start
+// state exists for the specified precedence and nil otherwise. d must be a
+// precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) getPrecedenceStartState(precedence int) *DFAState {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ // s0.edges is never nil for a precedence DFA
+ if precedence < 0 || precedence >= len(d.getS0().getEdges()) {
+ return nil
+ }
+
+ return d.getS0().getIthEdge(precedence)
+}
+
+// setPrecedenceStartState sets the start state for the current precedence. d
+// must be a precedence DFA. See also isPrecedenceDfa.
+func (d *DFA) setPrecedenceStartState(precedence int, startState *DFAState) {
+ if !d.getPrecedenceDfa() {
+ panic("only precedence DFAs may contain a precedence start state")
+ }
+
+ if precedence < 0 {
+ return
+ }
+
+ // Synchronization on s0 here is ok. When the DFA is turned into a
+ // precedence DFA, s0 will be initialized once and not updated again. s0.edges
+ // is never nil for a precedence DFA.
+ s0 := d.getS0()
+ if precedence >= s0.numEdges() {
+ edges := append(s0.getEdges(), make([]*DFAState, precedence+1-s0.numEdges())...)
+ s0.setEdges(edges)
+ d.setS0(s0)
+ }
+
+ s0.setIthEdge(precedence, startState)
+}
+
+func (d *DFA) getPrecedenceDfa() bool {
+ return d.precedenceDfa
+}
+
+// setPrecedenceDfa sets whether d is a precedence DFA. If precedenceDfa differs
+// from the current DFA configuration, then d.states is cleared, the initial
+// state s0 is set to a new DFAState with an empty outgoing DFAState.edges to
+// store the start states for individual precedence values if precedenceDfa is
+// true or nil otherwise, and d.precedenceDfa is updated.
+func (d *DFA) setPrecedenceDfa(precedenceDfa bool) {
+ if d.getPrecedenceDfa() != precedenceDfa {
+ d.setStates(make(map[int]*DFAState))
+
+ if precedenceDfa {
+ precedenceState := NewDFAState(-1, NewBaseATNConfigSet(false))
+
+ precedenceState.setEdges(make([]*DFAState, 0))
+ precedenceState.isAcceptState = false
+ precedenceState.requiresFullContext = false
+ d.setS0(precedenceState)
+ } else {
+ d.setS0(nil)
+ }
+
+ d.precedenceDfa = precedenceDfa
+ }
+}
+
+func (d *DFA) getS0() *DFAState {
+ return d.s0
+}
+
+func (d *DFA) setS0(s *DFAState) {
+ d.s0 = s
+}
+
+func (d *DFA) getState(hash int) (*DFAState, bool) {
+ s, ok := d.states[hash]
+ return s, ok
+}
+
+func (d *DFA) setStates(states map[int]*DFAState) {
+ d.states = states
+}
+
+func (d *DFA) setState(hash int, state *DFAState) {
+ d.states[hash] = state
+}
+
+func (d *DFA) numStates() int {
+ return len(d.states)
+}
+
+type dfaStateList []*DFAState
+
+func (d dfaStateList) Len() int { return len(d) }
+func (d dfaStateList) Less(i, j int) bool { return d[i].stateNumber < d[j].stateNumber }
+func (d dfaStateList) Swap(i, j int) { d[i], d[j] = d[j], d[i] }
+
+// sortedStates returns the states in d sorted by their state number.
+func (d *DFA) sortedStates() []*DFAState {
+ vs := make([]*DFAState, 0, len(d.states))
+
+ for _, v := range d.states {
+ vs = append(vs, v)
+ }
+
+ sort.Sort(dfaStateList(vs))
+
+ return vs
+}
+
+func (d *DFA) String(literalNames []string, symbolicNames []string) string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewDFASerializer(d, literalNames, symbolicNames).String()
+}
+
+func (d *DFA) ToLexerString() string {
+ if d.getS0() == nil {
+ return ""
+ }
+
+ return NewLexerDFASerializer(d).String()
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
new file mode 100644
index 000000000..bf2ccc06c
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_serializer.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+// DFASerializer is a DFA walker that knows how to dump them to serialized
+// strings.
+type DFASerializer struct {
+ dfa *DFA
+ literalNames []string
+ symbolicNames []string
+}
+
+func NewDFASerializer(dfa *DFA, literalNames, symbolicNames []string) *DFASerializer {
+ if literalNames == nil {
+ literalNames = make([]string, 0)
+ }
+
+ if symbolicNames == nil {
+ symbolicNames = make([]string, 0)
+ }
+
+ return &DFASerializer{
+ dfa: dfa,
+ literalNames: literalNames,
+ symbolicNames: symbolicNames,
+ }
+}
+
+func (d *DFASerializer) String() string {
+ if d.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := d.dfa.sortedStates()
+
+ for _, s := range states {
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += d.GetStateString(s)
+ buf += "-"
+ buf += d.getEdgeLabel(j)
+ buf += "->"
+ buf += d.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
+
+func (d *DFASerializer) getEdgeLabel(i int) string {
+ if i == 0 {
+ return "EOF"
+ } else if d.literalNames != nil && i-1 < len(d.literalNames) {
+ return d.literalNames[i-1]
+ } else if d.symbolicNames != nil && i-1 < len(d.symbolicNames) {
+ return d.symbolicNames[i-1]
+ }
+
+ return strconv.Itoa(i - 1)
+}
+
+func (d *DFASerializer) GetStateString(s *DFAState) string {
+ var a, b string
+
+ if s.isAcceptState {
+ a = ":"
+ }
+
+ if s.requiresFullContext {
+ b = "^"
+ }
+
+ baseStateStr := a + "s" + strconv.Itoa(s.stateNumber) + b
+
+ if s.isAcceptState {
+ if s.predicates != nil {
+ return baseStateStr + "=>" + fmt.Sprint(s.predicates)
+ }
+
+ return baseStateStr + "=>" + fmt.Sprint(s.prediction)
+ }
+
+ return baseStateStr
+}
+
+type LexerDFASerializer struct {
+ *DFASerializer
+}
+
+func NewLexerDFASerializer(dfa *DFA) *LexerDFASerializer {
+ return &LexerDFASerializer{DFASerializer: NewDFASerializer(dfa, nil, nil)}
+}
+
+func (l *LexerDFASerializer) getEdgeLabel(i int) string {
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(i))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+func (l *LexerDFASerializer) String() string {
+ if l.dfa.getS0() == nil {
+ return ""
+ }
+
+ buf := ""
+ states := l.dfa.sortedStates()
+
+ for i := 0; i < len(states); i++ {
+ s := states[i]
+
+ if s.edges != nil {
+ n := len(s.edges)
+
+ for j := 0; j < n; j++ {
+ t := s.edges[j]
+
+ if t != nil && t.stateNumber != 0x7FFFFFFF {
+ buf += l.GetStateString(s)
+ buf += "-"
+ buf += l.getEdgeLabel(j)
+ buf += "->"
+ buf += l.GetStateString(t)
+ buf += "\n"
+ }
+ }
+ }
+ }
+
+ if len(buf) == 0 {
+ return ""
+ }
+
+ return buf
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
new file mode 100644
index 000000000..970ed1986
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/dfa_state.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+)
+
+// PredPrediction maps a predicate to a predicted alternative.
+type PredPrediction struct {
+ alt int
+ pred SemanticContext
+}
+
+func NewPredPrediction(pred SemanticContext, alt int) *PredPrediction {
+ return &PredPrediction{alt: alt, pred: pred}
+}
+
+func (p *PredPrediction) String() string {
+ return "(" + fmt.Sprint(p.pred) + ", " + fmt.Sprint(p.alt) + ")"
+}
+
+// DFAState represents a set of possible ATN configurations. As Aho, Sethi,
+// Ullman p. 117 says: "The DFA uses its state to keep track of all possible
+// states the ATN can be in after reading each input symbol. That is to say,
+// after reading input a1a2..an, the DFA is in a state that represents the
+// subset T of the states of the ATN that are reachable from the ATN's start
+// state along some path labeled a1a2..an." In conventional NFA-to-DFA
+// conversion, therefore, the subset T would be a bitset representing the set of
+// states the ATN could be in. We need to track the alt predicted by each state
+// as well, however. More importantly, we need to maintain a stack of states,
+// tracking the closure operations as they jump from rule to rule, emulating
+// rule invocations (method calls). I have to add a stack to simulate the proper
+// lookahead sequences for the underlying LL grammar from which the ATN was
+// derived.
+//
+// I use a set of ATNConfig objects, not simple states. An ATNConfig is both a
+// state (ala normal conversion) and a RuleContext describing the chain of rules
+// (if any) followed to arrive at that state.
+//
+// A DFAState may have multiple references to a particular state, but with
+// different ATN contexts (with same or different alts) meaning that state was
+// reached via a different set of rule invocations.
+type DFAState struct {
+ stateNumber int
+ configs ATNConfigSet
+
+ // edges elements point to the target of the symbol. Shift up by 1 so (-1)
+ // Token.EOF maps to the first element.
+ edges []*DFAState
+
+ isAcceptState bool
+
+ // prediction is the ttype we match or alt we predict if the state is accept.
+ // Set to ATN.INVALID_ALT_NUMBER when predicates != nil or
+ // requiresFullContext.
+ prediction int
+
+ lexerActionExecutor *LexerActionExecutor
+
+ // requiresFullContext indicates it was created during an SLL prediction that
+ // discovered a conflict between the configurations in the state. Future
+ // ParserATNSimulator.execATN invocations immediately jump doing
+ // full context prediction if true.
+ requiresFullContext bool
+
+ // predicates is the predicates associated with the ATN configurations of the
+ // DFA state during SLL parsing. When we have predicates, requiresFullContext
+ // is false, since full context prediction evaluates predicates on-the-fly. If
+ // d is
+ // not nil, then prediction is ATN.INVALID_ALT_NUMBER.
+ //
+ // We only use these for non-requiresFullContext but conflicting states. That
+ // means we know from the context (it's $ or we don't dip into outer context)
+ // that it's an ambiguity not a conflict.
+ //
+ // This list is computed by
+ // ParserATNSimulator.predicateDFAState.
+ predicates []*PredPrediction
+}
+
+func NewDFAState(stateNumber int, configs ATNConfigSet) *DFAState {
+ if configs == nil {
+ configs = NewBaseATNConfigSet(false)
+ }
+
+ return &DFAState{configs: configs, stateNumber: stateNumber}
+}
+
+// GetAltSet gets the set of all alts mentioned by all ATN configurations in d.
+func (d *DFAState) GetAltSet() Set {
+ alts := newArray2DHashSet(nil, nil)
+
+ if d.configs != nil {
+ for _, c := range d.configs.GetItems() {
+ alts.Add(c.GetAlt())
+ }
+ }
+
+ if alts.Len() == 0 {
+ return nil
+ }
+
+ return alts
+}
+
+func (d *DFAState) getEdges() []*DFAState {
+ return d.edges
+}
+
+func (d *DFAState) numEdges() int {
+ return len(d.edges)
+}
+
+func (d *DFAState) getIthEdge(i int) *DFAState {
+ return d.edges[i]
+}
+
+func (d *DFAState) setEdges(newEdges []*DFAState) {
+ d.edges = newEdges
+}
+
+func (d *DFAState) setIthEdge(i int, edge *DFAState) {
+ d.edges[i] = edge
+}
+
+func (d *DFAState) setPrediction(v int) {
+ d.prediction = v
+}
+
+// equals returns whether d equals other. Two DFAStates are equal if their ATN
+// configuration sets are the same. This method is used to see if a state
+// already exists.
+//
+// Because the number of alternatives and number of ATN configurations are
+// finite, there is a finite number of DFA states that can be processed. This is
+// necessary to show that the algorithm terminates.
+//
+// Cannot test the DFA state numbers here because in
+// ParserATNSimulator.addDFAState we need to know if any other state exists that
+// has d exact set of ATN configurations. The stateNumber is irrelevant.
+func (d *DFAState) equals(other interface{}) bool {
+ if d == other {
+ return true
+ } else if _, ok := other.(*DFAState); !ok {
+ return false
+ }
+
+ return d.configs.Equals(other.(*DFAState).configs)
+}
+
+func (d *DFAState) String() string {
+ var s string
+ if d.isAcceptState {
+ if d.predicates != nil {
+ s = "=>" + fmt.Sprint(d.predicates)
+ } else {
+ s = "=>" + fmt.Sprint(d.prediction)
+ }
+ }
+
+ return fmt.Sprintf("%d:%s%s", d.stateNumber, fmt.Sprint(d.configs), s)
+}
+
+func (d *DFAState) hash() int {
+ h := murmurInit(7)
+ h = murmurUpdate(h, d.configs.hash())
+ return murmurFinish(h, 1)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
new file mode 100644
index 000000000..1fec43d9d
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/diagnostic_error_listener.go
@@ -0,0 +1,111 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+//
+// This implementation of {@link ANTLRErrorListener} can be used to identify
+// certain potential correctness and performance problems in grammars. "reports"
+// are made by calling {@link Parser//NotifyErrorListeners} with the appropriate
+// message.
+//
+//
+// - Ambiguities: These are cases where more than one path through the
+// grammar can Match the input.
+// - Weak context sensitivity: These are cases where full-context
+// prediction resolved an SLL conflict to a unique alternative which equaled the
+// minimum alternative of the SLL conflict.
+// - Strong (forced) context sensitivity: These are cases where the
+// full-context prediction resolved an SLL conflict to a unique alternative,
+// and the minimum alternative of the SLL conflict was found to not be
+// a truly viable alternative. Two-stage parsing cannot be used for inputs where
+// d situation occurs.
+//
+
+type DiagnosticErrorListener struct {
+ *DefaultErrorListener
+
+ exactOnly bool
+}
+
+func NewDiagnosticErrorListener(exactOnly bool) *DiagnosticErrorListener {
+
+ n := new(DiagnosticErrorListener)
+
+ // whether all ambiguities or only exact ambiguities are Reported.
+ n.exactOnly = exactOnly
+ return n
+}
+
+func (d *DiagnosticErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if d.exactOnly && !exact {
+ return
+ }
+ msg := "reportAmbiguity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ": ambigAlts=" +
+ d.getConflictingAlts(ambigAlts, configs).String() +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+
+ msg := "reportAttemptingFullContext d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+ msg := "reportContextSensitivity d=" +
+ d.getDecisionDescription(recognizer, dfa) +
+ ", input='" +
+ recognizer.GetTokenStream().GetTextFromInterval(NewInterval(startIndex, stopIndex)) + "'"
+ recognizer.NotifyErrorListeners(msg, nil, nil)
+}
+
+func (d *DiagnosticErrorListener) getDecisionDescription(recognizer Parser, dfa *DFA) string {
+ decision := dfa.decision
+ ruleIndex := dfa.atnStartState.GetRuleIndex()
+
+ ruleNames := recognizer.GetRuleNames()
+ if ruleIndex < 0 || ruleIndex >= len(ruleNames) {
+ return strconv.Itoa(decision)
+ }
+ ruleName := ruleNames[ruleIndex]
+ if ruleName == "" {
+ return strconv.Itoa(decision)
+ }
+ return strconv.Itoa(decision) + " (" + ruleName + ")"
+}
+
+//
+// Computes the set of conflicting or ambiguous alternatives from a
+// configuration set, if that information was not already provided by the
+// parser.
+//
+// @param ReportedAlts The set of conflicting or ambiguous alternatives, as
+// Reported by the parser.
+// @param configs The conflicting or ambiguous configuration set.
+// @return Returns {@code ReportedAlts} if it is not {@code nil}, otherwise
+// returns the set of alternatives represented in {@code configs}.
+//
+func (d *DiagnosticErrorListener) getConflictingAlts(ReportedAlts *BitSet, set ATNConfigSet) *BitSet {
+ if ReportedAlts != nil {
+ return ReportedAlts
+ }
+ result := NewBitSet()
+ for _, c := range set.GetItems() {
+ result.add(c.GetAlt())
+ }
+
+ return result
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
new file mode 100644
index 000000000..028e1a9d7
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_listener.go
@@ -0,0 +1,108 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "os"
+ "strconv"
+)
+
+// Provides an empty default implementation of {@link ANTLRErrorListener}. The
+// default implementation of each method does nothing, but can be overridden as
+// necessary.
+
+type ErrorListener interface {
+ SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException)
+ ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet)
+ ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet)
+ ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet)
+}
+
+type DefaultErrorListener struct {
+}
+
+func NewDefaultErrorListener() *DefaultErrorListener {
+ return new(DefaultErrorListener)
+}
+
+func (d *DefaultErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+}
+
+func (d *DefaultErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+}
+
+func (d *DefaultErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+}
+
+type ConsoleErrorListener struct {
+ *DefaultErrorListener
+}
+
+func NewConsoleErrorListener() *ConsoleErrorListener {
+ return new(ConsoleErrorListener)
+}
+
+//
+// Provides a default instance of {@link ConsoleErrorListener}.
+//
+var ConsoleErrorListenerINSTANCE = NewConsoleErrorListener()
+
+//
+// {@inheritDoc}
+//
+//
+// This implementation prints messages to {@link System//err} containing the
+// values of {@code line}, {@code charPositionInLine}, and {@code msg} using
+// the following format.
+//
+//
+// line line:charPositionInLine msg
+//
+//
+func (c *ConsoleErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ fmt.Fprintln(os.Stderr, "line "+strconv.Itoa(line)+":"+strconv.Itoa(column)+" "+msg)
+}
+
+type ProxyErrorListener struct {
+ *DefaultErrorListener
+ delegates []ErrorListener
+}
+
+func NewProxyErrorListener(delegates []ErrorListener) *ProxyErrorListener {
+ if delegates == nil {
+ panic("delegates is not provided")
+ }
+ l := new(ProxyErrorListener)
+ l.delegates = delegates
+ return l
+}
+
+func (p *ProxyErrorListener) SyntaxError(recognizer Recognizer, offendingSymbol interface{}, line, column int, msg string, e RecognitionException) {
+ for _, d := range p.delegates {
+ d.SyntaxError(recognizer, offendingSymbol, line, column, msg, e)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAmbiguity(recognizer Parser, dfa *DFA, startIndex, stopIndex int, exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportAttemptingFullContext(recognizer Parser, dfa *DFA, startIndex, stopIndex int, conflictingAlts *BitSet, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ProxyErrorListener) ReportContextSensitivity(recognizer Parser, dfa *DFA, startIndex, stopIndex, prediction int, configs ATNConfigSet) {
+ for _, d := range p.delegates {
+ d.ReportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
new file mode 100644
index 000000000..c4080dbfd
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/error_strategy.go
@@ -0,0 +1,762 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+)
+
+type ErrorStrategy interface {
+ reset(Parser)
+ RecoverInline(Parser) Token
+ Recover(Parser, RecognitionException)
+ Sync(Parser)
+ InErrorRecoveryMode(Parser) bool
+ ReportError(Parser, RecognitionException)
+ ReportMatch(Parser)
+}
+
+// This is the default implementation of {@link ANTLRErrorStrategy} used for
+// error Reporting and recovery in ANTLR parsers.
+//
+type DefaultErrorStrategy struct {
+ errorRecoveryMode bool
+ lastErrorIndex int
+ lastErrorStates *IntervalSet
+}
+
+var _ ErrorStrategy = &DefaultErrorStrategy{}
+
+func NewDefaultErrorStrategy() *DefaultErrorStrategy {
+
+ d := new(DefaultErrorStrategy)
+
+ // Indicates whether the error strategy is currently "recovering from an
+ // error". This is used to suppress Reporting multiple error messages while
+ // attempting to recover from a detected syntax error.
+ //
+ // @see //InErrorRecoveryMode
+ //
+ d.errorRecoveryMode = false
+
+ // The index into the input stream where the last error occurred.
+ // This is used to prevent infinite loops where an error is found
+ // but no token is consumed during recovery...another error is found,
+ // ad nauseum. This is a failsafe mechanism to guarantee that at least
+ // one token/tree node is consumed for two errors.
+ //
+ d.lastErrorIndex = -1
+ d.lastErrorStates = nil
+ return d
+}
+
+// The default implementation simply calls {@link //endErrorCondition} to
+// ensure that the handler is not in error recovery mode.
+func (d *DefaultErrorStrategy) reset(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+//
+// This method is called to enter error recovery mode when a recognition
+// exception is Reported.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) beginErrorCondition(recognizer Parser) {
+ d.errorRecoveryMode = true
+}
+
+func (d *DefaultErrorStrategy) InErrorRecoveryMode(recognizer Parser) bool {
+ return d.errorRecoveryMode
+}
+
+//
+// This method is called to leave error recovery mode after recovering from
+// a recognition exception.
+//
+// @param recognizer
+//
+func (d *DefaultErrorStrategy) endErrorCondition(recognizer Parser) {
+ d.errorRecoveryMode = false
+ d.lastErrorStates = nil
+ d.lastErrorIndex = -1
+}
+
+//
+// {@inheritDoc}
+//
+// The default implementation simply calls {@link //endErrorCondition}.
+//
+func (d *DefaultErrorStrategy) ReportMatch(recognizer Parser) {
+ d.endErrorCondition(recognizer)
+}
+
+//
+// {@inheritDoc}
+//
+// The default implementation returns immediately if the handler is already
+// in error recovery mode. Otherwise, it calls {@link //beginErrorCondition}
+// and dispatches the Reporting task based on the runtime type of {@code e}
+// according to the following table.
+//
+//
+// - {@link NoViableAltException}: Dispatches the call to
+// {@link //ReportNoViableAlternative}
+// - {@link InputMisMatchException}: Dispatches the call to
+// {@link //ReportInputMisMatch}
+// - {@link FailedPredicateException}: Dispatches the call to
+// {@link //ReportFailedPredicate}
+// - All other types: calls {@link Parser//NotifyErrorListeners} to Report
+// the exception
+//
+//
+func (d *DefaultErrorStrategy) ReportError(recognizer Parser, e RecognitionException) {
+ // if we've already Reported an error and have not Matched a token
+ // yet successfully, don't Report any errors.
+ if d.InErrorRecoveryMode(recognizer) {
+ return // don't Report spurious errors
+ }
+ d.beginErrorCondition(recognizer)
+
+ switch t := e.(type) {
+ default:
+ fmt.Println("unknown recognition error type: " + reflect.TypeOf(e).Name())
+ // fmt.Println(e.stack)
+ recognizer.NotifyErrorListeners(e.GetMessage(), e.GetOffendingToken(), e)
+ case *NoViableAltException:
+ d.ReportNoViableAlternative(recognizer, t)
+ case *InputMisMatchException:
+ d.ReportInputMisMatch(recognizer, t)
+ case *FailedPredicateException:
+ d.ReportFailedPredicate(recognizer, t)
+ }
+}
+
+// {@inheritDoc}
+//
+// The default implementation reSynchronizes the parser by consuming tokens
+// until we find one in the reSynchronization set--loosely the set of tokens
+// that can follow the current rule.
+//
+func (d *DefaultErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+
+ if d.lastErrorIndex == recognizer.GetInputStream().Index() &&
+ d.lastErrorStates != nil && d.lastErrorStates.contains(recognizer.GetState()) {
+ // uh oh, another error at same token index and previously-Visited
+ // state in ATN must be a case where LT(1) is in the recovery
+ // token set so nothing got consumed. Consume a single token
+ // at least to prevent an infinite loop d is a failsafe.
+ recognizer.Consume()
+ }
+ d.lastErrorIndex = recognizer.GetInputStream().Index()
+ if d.lastErrorStates == nil {
+ d.lastErrorStates = NewIntervalSet()
+ }
+ d.lastErrorStates.addOne(recognizer.GetState())
+ followSet := d.getErrorRecoverySet(recognizer)
+ d.consumeUntil(recognizer, followSet)
+}
+
+// The default implementation of {@link ANTLRErrorStrategy//Sync} makes sure
+// that the current lookahead symbol is consistent with what were expecting
+// at d point in the ATN. You can call d anytime but ANTLR only
+// generates code to check before subrules/loops and each iteration.
+//
+// Implements Jim Idle's magic Sync mechanism in closures and optional
+// subrules. E.g.,
+//
+//
+// a : Sync ( stuff Sync )*
+// Sync : {consume to what can follow Sync}
+//
+//
+// At the start of a sub rule upon error, {@link //Sync} performs single
+// token deletion, if possible. If it can't do that, it bails on the current
+// rule and uses the default error recovery, which consumes until the
+// reSynchronization set of the current rule.
+//
+// If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
+// with an empty alternative), then the expected set includes what follows
+// the subrule.
+//
+// During loop iteration, it consumes until it sees a token that can start a
+// sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
+// stay in the loop as long as possible.
+//
+// ORIGINS
+//
+// Previous versions of ANTLR did a poor job of their recovery within loops.
+// A single mismatch token or missing token would force the parser to bail
+// out of the entire rules surrounding the loop. So, for rule
+//
+//
+// classfunc : 'class' ID '{' member* '}'
+//
+//
+// input with an extra token between members would force the parser to
+// consume until it found the next class definition rather than the next
+// member definition of the current class.
+//
+// This functionality cost a little bit of effort because the parser has to
+// compare token set at the start of the loop and at each iteration. If for
+// some reason speed is suffering for you, you can turn off d
+// functionality by simply overriding d method as a blank { }.
+//
+func (d *DefaultErrorStrategy) Sync(recognizer Parser) {
+ // If already recovering, don't try to Sync
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ la := recognizer.GetTokenStream().LA(1)
+
+ // try cheaper subset first might get lucky. seems to shave a wee bit off
+ nextTokens := recognizer.GetATN().NextTokens(s, nil)
+ if nextTokens.contains(TokenEpsilon) || nextTokens.contains(la) {
+ return
+ }
+
+ switch s.GetStateType() {
+ case ATNStateBlockStart, ATNStateStarBlockStart, ATNStatePlusBlockStart, ATNStateStarLoopEntry:
+ // Report error and recover if possible
+ if d.SingleTokenDeletion(recognizer) != nil {
+ return
+ }
+ panic(NewInputMisMatchException(recognizer))
+ case ATNStatePlusLoopBack, ATNStateStarLoopBack:
+ d.ReportUnwantedToken(recognizer)
+ expecting := NewIntervalSet()
+ expecting.addSet(recognizer.GetExpectedTokens())
+ whatFollowsLoopIterationOrRule := expecting.addSet(d.getErrorRecoverySet(recognizer))
+ d.consumeUntil(recognizer, whatFollowsLoopIterationOrRule)
+ default:
+ // do nothing if we can't identify the exact kind of ATN state
+ }
+}
+
+// This is called by {@link //ReportError} when the exception is a
+// {@link NoViableAltException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (d *DefaultErrorStrategy) ReportNoViableAlternative(recognizer Parser, e *NoViableAltException) {
+ tokens := recognizer.GetTokenStream()
+ var input string
+ if tokens != nil {
+ if e.startToken.GetTokenType() == TokenEOF {
+ input = ""
+ } else {
+ input = tokens.GetTextFromTokens(e.startToken, e.offendingToken)
+ }
+ } else {
+ input = ""
+ }
+ msg := "no viable alternative at input " + d.escapeWSAndQuote(input)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+//
+// This is called by {@link //ReportError} when the exception is an
+// {@link InputMisMatchException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (this *DefaultErrorStrategy) ReportInputMisMatch(recognizer Parser, e *InputMisMatchException) {
+ msg := "mismatched input " + this.GetTokenErrorDisplay(e.offendingToken) +
+ " expecting " + e.getExpectedTokens().StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+//
+// This is called by {@link //ReportError} when the exception is a
+// {@link FailedPredicateException}.
+//
+// @see //ReportError
+//
+// @param recognizer the parser instance
+// @param e the recognition exception
+//
+func (d *DefaultErrorStrategy) ReportFailedPredicate(recognizer Parser, e *FailedPredicateException) {
+ ruleName := recognizer.GetRuleNames()[recognizer.GetParserRuleContext().GetRuleIndex()]
+ msg := "rule " + ruleName + " " + e.message
+ recognizer.NotifyErrorListeners(msg, e.offendingToken, e)
+}
+
+// This method is called to Report a syntax error which requires the removal
+// of a token from the input stream. At the time d method is called, the
+// erroneous symbol is current {@code LT(1)} symbol and has not yet been
+// removed from the input stream. When d method returns,
+// {@code recognizer} is in error recovery mode.
+//
+// This method is called when {@link //singleTokenDeletion} identifies
+// single-token deletion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) ReportUnwantedToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ tokenName := d.GetTokenErrorDisplay(t)
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "extraneous input " + tokenName + " expecting " +
+ expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// This method is called to Report a syntax error which requires the
+// insertion of a missing token into the input stream. At the time d
+// method is called, the missing token has not yet been inserted. When d
+// method returns, {@code recognizer} is in error recovery mode.
+//
+// This method is called when {@link //singleTokenInsertion} identifies
+// single-token insertion as a viable recovery strategy for a mismatched
+// input error.
+//
+// The default implementation simply returns if the handler is already in
+// error recovery mode. Otherwise, it calls {@link //beginErrorCondition} to
+// enter error recovery mode, followed by calling
+// {@link Parser//NotifyErrorListeners}.
+//
+// @param recognizer the parser instance
+//
+func (d *DefaultErrorStrategy) ReportMissingToken(recognizer Parser) {
+ if d.InErrorRecoveryMode(recognizer) {
+ return
+ }
+ d.beginErrorCondition(recognizer)
+ t := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ msg := "missing " + expecting.StringVerbose(recognizer.GetLiteralNames(), recognizer.GetSymbolicNames(), false) +
+ " at " + d.GetTokenErrorDisplay(t)
+ recognizer.NotifyErrorListeners(msg, t, nil)
+}
+
+// The default implementation attempts to recover from the mismatched input
+// by using single token insertion and deletion as described below. If the
+// recovery attempt fails, d method panics an
+// {@link InputMisMatchException}.
+//
+// EXTRA TOKEN (single token deletion)
+//
+// {@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
+// right token, however, then assume {@code LA(1)} is some extra spurious
+// token and delete it. Then consume and return the next token (which was
+// the {@code LA(2)} token) as the successful result of the Match operation.
+//
+// This recovery strategy is implemented by {@link
+// //singleTokenDeletion}.
+//
+// MISSING TOKEN (single token insertion)
+//
+// If current token (at {@code LA(1)}) is consistent with what could come
+// after the expected {@code LA(1)} token, then assume the token is missing
+// and use the parser's {@link TokenFactory} to create it on the fly. The
+// "insertion" is performed by returning the created token as the successful
+// result of the Match operation.
+//
+// This recovery strategy is implemented by {@link
+// //singleTokenInsertion}.
+//
+// EXAMPLE
+//
+// For example, Input {@code i=(3} is clearly missing the {@code ')'}. When
+// the parser returns from the nested call to {@code expr}, it will have
+// call chain:
+//
+//
+// stat &rarr expr &rarr atom
+//
+//
+// and it will be trying to Match the {@code ')'} at d point in the
+// derivation:
+//
+//
+// => ID '=' '(' INT ')' ('+' atom)* ''
+// ^
+//
+//
+// The attempt to Match {@code ')'} will fail when it sees {@code ''} and
+// call {@link //recoverInline}. To recover, it sees that {@code LA(1)==''}
+// is in the set of tokens that can follow the {@code ')'} token reference
+// in rule {@code atom}. It can assume that you forgot the {@code ')'}.
+//
+func (d *DefaultErrorStrategy) RecoverInline(recognizer Parser) Token {
+ // SINGLE TOKEN DELETION
+ MatchedSymbol := d.SingleTokenDeletion(recognizer)
+ if MatchedSymbol != nil {
+ // we have deleted the extra token.
+ // now, move past ttype token as if all were ok
+ recognizer.Consume()
+ return MatchedSymbol
+ }
+ // SINGLE TOKEN INSERTION
+ if d.SingleTokenInsertion(recognizer) {
+ return d.GetMissingSymbol(recognizer)
+ }
+ // even that didn't work must panic the exception
+ panic(NewInputMisMatchException(recognizer))
+}
+
+//
+// This method implements the single-token insertion inline error recovery
+// strategy. It is called by {@link //recoverInline} if the single-token
+// deletion strategy fails to recover from the mismatched input. If this
+// method returns {@code true}, {@code recognizer} will be in error recovery
+// mode.
+//
+// This method determines whether or not single-token insertion is viable by
+// checking if the {@code LA(1)} input symbol could be successfully Matched
+// if it were instead the {@code LA(2)} symbol. If d method returns
+// {@code true}, the caller is responsible for creating and inserting a
+// token with the correct type to produce d behavior.
+//
+// @param recognizer the parser instance
+// @return {@code true} if single-token insertion is a viable recovery
+// strategy for the current mismatched input, otherwise {@code false}
+//
+func (d *DefaultErrorStrategy) SingleTokenInsertion(recognizer Parser) bool {
+ currentSymbolType := recognizer.GetTokenStream().LA(1)
+ // if current token is consistent with what could come after current
+ // ATN state, then we know we're missing a token error recovery
+ // is free to conjure up and insert the missing token
+ atn := recognizer.GetInterpreter().atn
+ currentState := atn.states[recognizer.GetState()]
+ next := currentState.GetTransitions()[0].getTarget()
+ expectingAtLL2 := atn.NextTokens(next, recognizer.GetParserRuleContext())
+ if expectingAtLL2.contains(currentSymbolType) {
+ d.ReportMissingToken(recognizer)
+ return true
+ }
+
+ return false
+}
+
+// This method implements the single-token deletion inline error recovery
+// strategy. It is called by {@link //recoverInline} to attempt to recover
+// from mismatched input. If this method returns nil, the parser and error
+// handler state will not have changed. If this method returns non-nil,
+// {@code recognizer} will not be in error recovery mode since the
+// returned token was a successful Match.
+//
+// If the single-token deletion is successful, d method calls
+// {@link //ReportUnwantedToken} to Report the error, followed by
+// {@link Parser//consume} to actually "delete" the extraneous token. Then,
+// before returning {@link //ReportMatch} is called to signal a successful
+// Match.
+//
+// @param recognizer the parser instance
+// @return the successfully Matched {@link Token} instance if single-token
+// deletion successfully recovers from the mismatched input, otherwise
+// {@code nil}
+//
+func (d *DefaultErrorStrategy) SingleTokenDeletion(recognizer Parser) Token {
+ NextTokenType := recognizer.GetTokenStream().LA(2)
+ expecting := d.GetExpectedTokens(recognizer)
+ if expecting.contains(NextTokenType) {
+ d.ReportUnwantedToken(recognizer)
+ // print("recoverFromMisMatchedToken deleting " \
+ // + str(recognizer.GetTokenStream().LT(1)) \
+ // + " since " + str(recognizer.GetTokenStream().LT(2)) \
+ // + " is what we want", file=sys.stderr)
+ recognizer.Consume() // simply delete extra token
+ // we want to return the token we're actually Matching
+ MatchedSymbol := recognizer.GetCurrentToken()
+ d.ReportMatch(recognizer) // we know current token is correct
+ return MatchedSymbol
+ }
+
+ return nil
+}
+
+// Conjure up a missing token during error recovery.
+//
+// The recognizer attempts to recover from single missing
+// symbols. But, actions might refer to that missing symbol.
+// For example, x=ID {f($x)}. The action clearly assumes
+// that there has been an identifier Matched previously and that
+// $x points at that token. If that token is missing, but
+// the next token in the stream is what we want we assume that
+// d token is missing and we keep going. Because we
+// have to return some token to replace the missing token,
+// we have to conjure one up. This method gives the user control
+// over the tokens returned for missing tokens. Mostly,
+// you will want to create something special for identifier
+// tokens. For literals such as '{' and ',', the default
+// action in the parser or tree parser works. It simply creates
+// a CommonToken of the appropriate type. The text will be the token.
+// If you change what tokens must be created by the lexer,
+// override d method to create the appropriate tokens.
+//
+func (d *DefaultErrorStrategy) GetMissingSymbol(recognizer Parser) Token {
+ currentSymbol := recognizer.GetCurrentToken()
+ expecting := d.GetExpectedTokens(recognizer)
+ expectedTokenType := expecting.first()
+ var tokenText string
+
+ if expectedTokenType == TokenEOF {
+ tokenText = ""
+ } else {
+ ln := recognizer.GetLiteralNames()
+ if expectedTokenType > 0 && expectedTokenType < len(ln) {
+ tokenText = ""
+ } else {
+ tokenText = "" // TODO matches the JS impl
+ }
+ }
+ current := currentSymbol
+ lookback := recognizer.GetTokenStream().LT(-1)
+ if current.GetTokenType() == TokenEOF && lookback != nil {
+ current = lookback
+ }
+
+ tf := recognizer.GetTokenFactory()
+
+ return tf.Create(current.GetSource(), expectedTokenType, tokenText, TokenDefaultChannel, -1, -1, current.GetLine(), current.GetColumn())
+}
+
+func (d *DefaultErrorStrategy) GetExpectedTokens(recognizer Parser) *IntervalSet {
+ return recognizer.GetExpectedTokens()
+}
+
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+func (d *DefaultErrorStrategy) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ return d.escapeWSAndQuote(s)
+}
+
+func (d *DefaultErrorStrategy) escapeWSAndQuote(s string) string {
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ return "'" + s + "'"
+}
+
+// Compute the error recovery set for the current rule. During
+// rule invocation, the parser pushes the set of tokens that can
+// follow that rule reference on the stack d amounts to
+// computing FIRST of what follows the rule reference in the
+// enclosing rule. See LinearApproximator.FIRST().
+// This local follow set only includes tokens
+// from within the rule i.e., the FIRST computation done by
+// ANTLR stops at the end of a rule.
+//
+// EXAMPLE
+//
+// When you find a "no viable alt exception", the input is not
+// consistent with any of the alternatives for rule r. The best
+// thing to do is to consume tokens until you see something that
+// can legally follow a call to r//or* any rule that called r.
+// You don't want the exact set of viable next tokens because the
+// input might just be missing a token--you might consume the
+// rest of the input looking for one of the missing tokens.
+//
+// Consider grammar:
+//
+// a : '[' b ']'
+// | '(' b ')'
+//
+// b : c '^' INT
+// c : ID
+// | INT
+//
+//
+// At each rule invocation, the set of tokens that could follow
+// that rule is pushed on a stack. Here are the various
+// context-sensitive follow sets:
+//
+// FOLLOW(b1_in_a) = FIRST(']') = ']'
+// FOLLOW(b2_in_a) = FIRST(')') = ')'
+// FOLLOW(c_in_b) = FIRST('^') = '^'
+//
+// Upon erroneous input "[]", the call chain is
+//
+// a -> b -> c
+//
+// and, hence, the follow context stack is:
+//
+// depth follow set start of rule execution
+// 0 a (from main())
+// 1 ']' b
+// 2 '^' c
+//
+// Notice that ')' is not included, because b would have to have
+// been called from a different context in rule a for ')' to be
+// included.
+//
+// For error recovery, we cannot consider FOLLOW(c)
+// (context-sensitive or otherwise). We need the combined set of
+// all context-sensitive FOLLOW sets--the set of all tokens that
+// could follow any reference in the call chain. We need to
+// reSync to one of those tokens. Note that FOLLOW(c)='^' and if
+// we reSync'd to that token, we'd consume until EOF. We need to
+// Sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
+// In this case, for input "[]", LA(1) is ']' and in the set, so we would
+// not consume anything. After printing an error, rule c would
+// return normally. Rule b would not find the required '^' though.
+// At this point, it gets a mismatched token error and panics an
+// exception (since LA(1) is not in the viable following token
+// set). The rule exception handler tries to recover, but finds
+// the same recovery set and doesn't consume anything. Rule b
+// exits normally returning to rule a. Now it finds the ']' (and
+// with the successful Match exits errorRecovery mode).
+//
+// So, you can see that the parser walks up the call chain looking
+// for the token that was a member of the recovery set.
+//
+// Errors are not generated in errorRecovery mode.
+//
+// ANTLR's error recovery mechanism is based upon original ideas:
+//
+// "Algorithms + Data Structures = Programs" by Niklaus Wirth
+//
+// and
+//
+// "A note on error recovery in recursive descent parsers":
+// http://portal.acm.org/citation.cfm?id=947902.947905
+//
+// Later, Josef Grosch had some good ideas:
+//
+// "Efficient and Comfortable Error Recovery in Recursive Descent
+// Parsers":
+// ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
+//
+// Like Grosch I implement context-sensitive FOLLOW sets that are combined
+// at run-time upon error to avoid overhead during parsing.
+//
+func (d *DefaultErrorStrategy) getErrorRecoverySet(recognizer Parser) *IntervalSet {
+ atn := recognizer.GetInterpreter().atn
+ ctx := recognizer.GetParserRuleContext()
+ recoverSet := NewIntervalSet()
+ for ctx != nil && ctx.GetInvokingState() >= 0 {
+ // compute what follows who invoked us
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ follow := atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ recoverSet.addSet(follow)
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ recoverSet.removeOne(TokenEpsilon)
+ return recoverSet
+}
+
+// Consume tokens until one Matches the given token set.//
+func (d *DefaultErrorStrategy) consumeUntil(recognizer Parser, set *IntervalSet) {
+ ttype := recognizer.GetTokenStream().LA(1)
+ for ttype != TokenEOF && !set.contains(ttype) {
+ recognizer.Consume()
+ ttype = recognizer.GetTokenStream().LA(1)
+ }
+}
+
+//
+// This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
+// by immediately canceling the parse operation with a
+// {@link ParseCancellationException}. The implementation ensures that the
+// {@link ParserRuleContext//exception} field is set for all parse tree nodes
+// that were not completed prior to encountering the error.
+//
+//
+// This error strategy is useful in the following scenarios.
+//
+//
+// - Two-stage parsing: This error strategy allows the first
+// stage of two-stage parsing to immediately terminate if an error is
+// encountered, and immediately fall back to the second stage. In addition to
+// avoiding wasted work by attempting to recover from errors here, the empty
+// implementation of {@link BailErrorStrategy//Sync} improves the performance of
+// the first stage.
+// - Silent validation: When syntax errors are not being
+// Reported or logged, and the parse result is simply ignored if errors occur,
+// the {@link BailErrorStrategy} avoids wasting work on recovering from errors
+// when the result will be ignored either way.
+//
+//
+//
+// {@code myparser.setErrorHandler(NewBailErrorStrategy())}
+//
+// @see Parser//setErrorHandler(ANTLRErrorStrategy)
+
+type BailErrorStrategy struct {
+ *DefaultErrorStrategy
+}
+
+var _ ErrorStrategy = &BailErrorStrategy{}
+
+func NewBailErrorStrategy() *BailErrorStrategy {
+
+ b := new(BailErrorStrategy)
+
+ b.DefaultErrorStrategy = NewDefaultErrorStrategy()
+
+ return b
+}
+
+// Instead of recovering from exception {@code e}, re-panic it wrapped
+// in a {@link ParseCancellationException} so it is not caught by the
+// rule func catches. Use {@link Exception//getCause()} to get the
+// original {@link RecognitionException}.
+//
+func (b *BailErrorStrategy) Recover(recognizer Parser, e RecognitionException) {
+ context := recognizer.GetParserRuleContext()
+ for context != nil {
+ context.SetException(e)
+ if parent, ok := context.GetParent().(ParserRuleContext); ok {
+ context = parent
+ } else {
+ context = nil
+ }
+ }
+ panic(NewParseCancellationException()) // TODO we don't emit e properly
+}
+
+// Make sure we don't attempt to recover inline if the parser
+// successfully recovers, it won't panic an exception.
+//
+func (b *BailErrorStrategy) RecoverInline(recognizer Parser) Token {
+ b.Recover(recognizer, NewInputMisMatchException(recognizer))
+
+ return nil
+}
+
+// Make sure we don't attempt to recover from problems in subrules.//
+func (b *BailErrorStrategy) Sync(recognizer Parser) {
+ // pass
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
new file mode 100644
index 000000000..2ef74926e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/errors.go
@@ -0,0 +1,241 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
+// 3 kinds of errors: prediction errors, failed predicate errors, and
+// mismatched input errors. In each case, the parser knows where it is
+// in the input, where it is in the ATN, the rule invocation stack,
+// and what kind of problem occurred.
+
+type RecognitionException interface {
+ GetOffendingToken() Token
+ GetMessage() string
+ GetInputStream() IntStream
+}
+
+type BaseRecognitionException struct {
+ message string
+ recognizer Recognizer
+ offendingToken Token
+ offendingState int
+ ctx RuleContext
+ input IntStream
+}
+
+func NewBaseRecognitionException(message string, recognizer Recognizer, input IntStream, ctx RuleContext) *BaseRecognitionException {
+
+ // todo
+ // Error.call(this)
+ //
+ // if (!!Error.captureStackTrace) {
+ // Error.captureStackTrace(this, RecognitionException)
+ // } else {
+ // stack := NewError().stack
+ // }
+ // TODO may be able to use - "runtime" func Stack(buf []byte, all bool) int
+
+ t := new(BaseRecognitionException)
+
+ t.message = message
+ t.recognizer = recognizer
+ t.input = input
+ t.ctx = ctx
+ // The current {@link Token} when an error occurred. Since not all streams
+ // support accessing symbols by index, we have to track the {@link Token}
+ // instance itself.
+ t.offendingToken = nil
+ // Get the ATN state number the parser was in at the time the error
+ // occurred. For {@link NoViableAltException} and
+ // {@link LexerNoViableAltException} exceptions, this is the
+ // {@link DecisionState} number. For others, it is the state whose outgoing
+ // edge we couldn't Match.
+ t.offendingState = -1
+ if t.recognizer != nil {
+ t.offendingState = t.recognizer.GetState()
+ }
+
+ return t
+}
+
+func (b *BaseRecognitionException) GetMessage() string {
+ return b.message
+}
+
+func (b *BaseRecognitionException) GetOffendingToken() Token {
+ return b.offendingToken
+}
+
+func (b *BaseRecognitionException) GetInputStream() IntStream {
+ return b.input
+}
+
+// If the state number is not known, b method returns -1.
+
+//
+// Gets the set of input symbols which could potentially follow the
+// previously Matched symbol at the time b exception was panicn.
+//
+// If the set of expected tokens is not known and could not be computed,
+// b method returns {@code nil}.
+//
+// @return The set of token types that could potentially follow the current
+// state in the ATN, or {@code nil} if the information is not available.
+// /
+func (b *BaseRecognitionException) getExpectedTokens() *IntervalSet {
+ if b.recognizer != nil {
+ return b.recognizer.GetATN().getExpectedTokens(b.offendingState, b.ctx)
+ }
+
+ return nil
+}
+
+func (b *BaseRecognitionException) String() string {
+ return b.message
+}
+
+type LexerNoViableAltException struct {
+ *BaseRecognitionException
+
+ startIndex int
+ deadEndConfigs ATNConfigSet
+}
+
+func NewLexerNoViableAltException(lexer Lexer, input CharStream, startIndex int, deadEndConfigs ATNConfigSet) *LexerNoViableAltException {
+
+ l := new(LexerNoViableAltException)
+
+ l.BaseRecognitionException = NewBaseRecognitionException("", lexer, input, nil)
+
+ l.startIndex = startIndex
+ l.deadEndConfigs = deadEndConfigs
+
+ return l
+}
+
+func (l *LexerNoViableAltException) String() string {
+ symbol := ""
+ if l.startIndex >= 0 && l.startIndex < l.input.Size() {
+ symbol = l.input.(CharStream).GetTextFromInterval(NewInterval(l.startIndex, l.startIndex))
+ }
+ return "LexerNoViableAltException" + symbol
+}
+
+type NoViableAltException struct {
+ *BaseRecognitionException
+
+ startToken Token
+ offendingToken Token
+ ctx ParserRuleContext
+ deadEndConfigs ATNConfigSet
+}
+
+// Indicates that the parser could not decide which of two or more paths
+// to take based upon the remaining input. It tracks the starting token
+// of the offending input and also knows where the parser was
+// in the various paths when the error. Reported by ReportNoViableAlternative()
+//
+func NewNoViableAltException(recognizer Parser, input TokenStream, startToken Token, offendingToken Token, deadEndConfigs ATNConfigSet, ctx ParserRuleContext) *NoViableAltException {
+
+ if ctx == nil {
+ ctx = recognizer.GetParserRuleContext()
+ }
+
+ if offendingToken == nil {
+ offendingToken = recognizer.GetCurrentToken()
+ }
+
+ if startToken == nil {
+ startToken = recognizer.GetCurrentToken()
+ }
+
+ if input == nil {
+ input = recognizer.GetInputStream().(TokenStream)
+ }
+
+ n := new(NoViableAltException)
+ n.BaseRecognitionException = NewBaseRecognitionException("", recognizer, input, ctx)
+
+ // Which configurations did we try at input.Index() that couldn't Match
+ // input.LT(1)?//
+ n.deadEndConfigs = deadEndConfigs
+ // The token object at the start index the input stream might
+ // not be buffering tokens so get a reference to it. (At the
+ // time the error occurred, of course the stream needs to keep a
+ // buffer all of the tokens but later we might not have access to those.)
+ n.startToken = startToken
+ n.offendingToken = offendingToken
+
+ return n
+}
+
+type InputMisMatchException struct {
+ *BaseRecognitionException
+}
+
+// This signifies any kind of mismatched input exceptions such as
+// when the current input does not Match the expected token.
+//
+func NewInputMisMatchException(recognizer Parser) *InputMisMatchException {
+
+ i := new(InputMisMatchException)
+ i.BaseRecognitionException = NewBaseRecognitionException("", recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ i.offendingToken = recognizer.GetCurrentToken()
+
+ return i
+
+}
+
+// A semantic predicate failed during validation. Validation of predicates
+// occurs when normally parsing the alternative just like Matching a token.
+// Disambiguating predicate evaluation occurs when we test a predicate during
+// prediction.
+
+type FailedPredicateException struct {
+ *BaseRecognitionException
+
+ ruleIndex int
+ predicateIndex int
+ predicate string
+}
+
+func NewFailedPredicateException(recognizer Parser, predicate string, message string) *FailedPredicateException {
+
+ f := new(FailedPredicateException)
+
+ f.BaseRecognitionException = NewBaseRecognitionException(f.formatMessage(predicate, message), recognizer, recognizer.GetInputStream(), recognizer.GetParserRuleContext())
+
+ s := recognizer.GetInterpreter().atn.states[recognizer.GetState()]
+ trans := s.GetTransitions()[0]
+ if trans2, ok := trans.(*PredicateTransition); ok {
+ f.ruleIndex = trans2.ruleIndex
+ f.predicateIndex = trans2.predIndex
+ } else {
+ f.ruleIndex = 0
+ f.predicateIndex = 0
+ }
+ f.predicate = predicate
+ f.offendingToken = recognizer.GetCurrentToken()
+
+ return f
+}
+
+func (f *FailedPredicateException) formatMessage(predicate, message string) string {
+ if message != "" {
+ return message
+ }
+
+ return "failed predicate: {" + predicate + "}?"
+}
+
+type ParseCancellationException struct {
+}
+
+func NewParseCancellationException() *ParseCancellationException {
+ // Error.call(this)
+ // Error.captureStackTrace(this, ParseCancellationException)
+ return new(ParseCancellationException)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
new file mode 100644
index 000000000..842170c08
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/file_stream.go
@@ -0,0 +1,49 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "io"
+ "os"
+)
+
+// This is an InputStream that is loaded from a file all at once
+// when you construct the object.
+
+type FileStream struct {
+ *InputStream
+
+ filename string
+}
+
+func NewFileStream(fileName string) (*FileStream, error) {
+
+ buf := bytes.NewBuffer(nil)
+
+ f, err := os.Open(fileName)
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ _, err = io.Copy(buf, f)
+ if err != nil {
+ return nil, err
+ }
+
+ fs := new(FileStream)
+
+ fs.filename = fileName
+ s := string(buf.Bytes())
+
+ fs.InputStream = NewInputStream(s)
+
+ return fs, nil
+
+}
+
+func (f *FileStream) GetSourceName() string {
+ return f.filename
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
new file mode 100644
index 000000000..5ff270f53
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/input_stream.go
@@ -0,0 +1,113 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type InputStream struct {
+ name string
+ index int
+ data []rune
+ size int
+}
+
+func NewInputStream(data string) *InputStream {
+
+ is := new(InputStream)
+
+ is.name = ""
+ is.index = 0
+ is.data = []rune(data)
+ is.size = len(is.data) // number of runes
+
+ return is
+}
+
+func (is *InputStream) reset() {
+ is.index = 0
+}
+
+func (is *InputStream) Consume() {
+ if is.index >= is.size {
+ // assert is.LA(1) == TokenEOF
+ panic("cannot consume EOF")
+ }
+ is.index++
+}
+
+func (is *InputStream) LA(offset int) int {
+
+ if offset == 0 {
+ return 0 // nil
+ }
+ if offset < 0 {
+ offset++ // e.g., translate LA(-1) to use offset=0
+ }
+ pos := is.index + offset - 1
+
+ if pos < 0 || pos >= is.size { // invalid
+ return TokenEOF
+ }
+
+ return int(is.data[pos])
+}
+
+func (is *InputStream) LT(offset int) int {
+ return is.LA(offset)
+}
+
+func (is *InputStream) Index() int {
+ return is.index
+}
+
+func (is *InputStream) Size() int {
+ return is.size
+}
+
+// mark/release do nothing we have entire buffer
+func (is *InputStream) Mark() int {
+ return -1
+}
+
+func (is *InputStream) Release(marker int) {
+}
+
+func (is *InputStream) Seek(index int) {
+ if index <= is.index {
+ is.index = index // just jump don't update stream state (line,...)
+ return
+ }
+ // seek forward
+ is.index = intMin(index, is.size)
+}
+
+func (is *InputStream) GetText(start int, stop int) string {
+ if stop >= is.size {
+ stop = is.size - 1
+ }
+ if start >= is.size {
+ return ""
+ }
+
+ return string(is.data[start : stop+1])
+}
+
+func (is *InputStream) GetTextFromTokens(start, stop Token) string {
+ if start != nil && stop != nil {
+ return is.GetTextFromInterval(NewInterval(start.GetTokenIndex(), stop.GetTokenIndex()))
+ }
+
+ return ""
+}
+
+func (is *InputStream) GetTextFromInterval(i *Interval) string {
+ return is.GetText(i.Start, i.Stop)
+}
+
+func (*InputStream) GetSourceName() string {
+ return "Obtained from string"
+}
+
+func (is *InputStream) String() string {
+ return string(is.data)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
new file mode 100644
index 000000000..438e0ea6e
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/int_stream.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type IntStream interface {
+ Consume()
+ LA(int) int
+ Mark() int
+ Release(marker int)
+ Index() int
+ Seek(index int)
+ Size() int
+ GetSourceName() string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
new file mode 100644
index 000000000..1e9393adb
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/interval_set.go
@@ -0,0 +1,308 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type Interval struct {
+ Start int
+ Stop int
+}
+
+/* stop is not included! */
+func NewInterval(start, stop int) *Interval {
+ i := new(Interval)
+
+ i.Start = start
+ i.Stop = stop
+ return i
+}
+
+func (i *Interval) Contains(item int) bool {
+ return item >= i.Start && item < i.Stop
+}
+
+func (i *Interval) String() string {
+ if i.Start == i.Stop-1 {
+ return strconv.Itoa(i.Start)
+ }
+
+ return strconv.Itoa(i.Start) + ".." + strconv.Itoa(i.Stop-1)
+}
+
+func (i *Interval) length() int {
+ return i.Stop - i.Start
+}
+
+type IntervalSet struct {
+ intervals []*Interval
+ readOnly bool
+}
+
+func NewIntervalSet() *IntervalSet {
+
+ i := new(IntervalSet)
+
+ i.intervals = nil
+ i.readOnly = false
+
+ return i
+}
+
+func (i *IntervalSet) first() int {
+ if len(i.intervals) == 0 {
+ return TokenInvalidType
+ }
+
+ return i.intervals[0].Start
+}
+
+func (i *IntervalSet) addOne(v int) {
+ i.addInterval(NewInterval(v, v+1))
+}
+
+func (i *IntervalSet) addRange(l, h int) {
+ i.addInterval(NewInterval(l, h+1))
+}
+
+func (i *IntervalSet) addInterval(v *Interval) {
+ if i.intervals == nil {
+ i.intervals = make([]*Interval, 0)
+ i.intervals = append(i.intervals, v)
+ } else {
+ // find insert pos
+ for k, interval := range i.intervals {
+ // distinct range -> insert
+ if v.Stop < interval.Start {
+ i.intervals = append(i.intervals[0:k], append([]*Interval{v}, i.intervals[k:]...)...)
+ return
+ } else if v.Stop == interval.Start {
+ i.intervals[k].Start = v.Start
+ return
+ } else if v.Start <= interval.Stop {
+ i.intervals[k] = NewInterval(intMin(interval.Start, v.Start), intMax(interval.Stop, v.Stop))
+
+ // if not applying to end, merge potential overlaps
+ if k < len(i.intervals)-1 {
+ l := i.intervals[k]
+ r := i.intervals[k+1]
+ // if r contained in l
+ if l.Stop >= r.Stop {
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ } else if l.Stop >= r.Start { // partial overlap
+ i.intervals[k] = NewInterval(l.Start, r.Stop)
+ i.intervals = append(i.intervals[0:k+1], i.intervals[k+2:]...)
+ }
+ }
+ return
+ }
+ }
+ // greater than any exiting
+ i.intervals = append(i.intervals, v)
+ }
+}
+
+func (i *IntervalSet) addSet(other *IntervalSet) *IntervalSet {
+ if other.intervals != nil {
+ for k := 0; k < len(other.intervals); k++ {
+ i2 := other.intervals[k]
+ i.addInterval(NewInterval(i2.Start, i2.Stop))
+ }
+ }
+ return i
+}
+
+func (i *IntervalSet) complement(start int, stop int) *IntervalSet {
+ result := NewIntervalSet()
+ result.addInterval(NewInterval(start, stop+1))
+ for j := 0; j < len(i.intervals); j++ {
+ result.removeRange(i.intervals[j])
+ }
+ return result
+}
+
+func (i *IntervalSet) contains(item int) bool {
+ if i.intervals == nil {
+ return false
+ }
+ for k := 0; k < len(i.intervals); k++ {
+ if i.intervals[k].Contains(item) {
+ return true
+ }
+ }
+ return false
+}
+
+func (i *IntervalSet) length() int {
+ len := 0
+
+ for _, v := range i.intervals {
+ len += v.length()
+ }
+
+ return len
+}
+
+func (i *IntervalSet) removeRange(v *Interval) {
+ if v.Start == v.Stop-1 {
+ i.removeOne(v.Start)
+ } else if i.intervals != nil {
+ k := 0
+ for n := 0; n < len(i.intervals); n++ {
+ ni := i.intervals[k]
+ // intervals are ordered
+ if v.Stop <= ni.Start {
+ return
+ } else if v.Start > ni.Start && v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ x := NewInterval(v.Stop, ni.Stop)
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ return
+ } else if v.Start <= ni.Start && v.Stop >= ni.Stop {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ k = k - 1 // need another pass
+ } else if v.Start < ni.Stop {
+ i.intervals[k] = NewInterval(ni.Start, v.Start)
+ } else if v.Stop < ni.Stop {
+ i.intervals[k] = NewInterval(v.Stop, ni.Stop)
+ }
+ k++
+ }
+ }
+}
+
+func (i *IntervalSet) removeOne(v int) {
+ if i.intervals != nil {
+ for k := 0; k < len(i.intervals); k++ {
+ ki := i.intervals[k]
+ // intervals i ordered
+ if v < ki.Start {
+ return
+ } else if v == ki.Start && v == ki.Stop-1 {
+ // i.intervals.splice(k, 1)
+ i.intervals = append(i.intervals[0:k], i.intervals[k+1:]...)
+ return
+ } else if v == ki.Start {
+ i.intervals[k] = NewInterval(ki.Start+1, ki.Stop)
+ return
+ } else if v == ki.Stop-1 {
+ i.intervals[k] = NewInterval(ki.Start, ki.Stop-1)
+ return
+ } else if v < ki.Stop-1 {
+ x := NewInterval(ki.Start, v)
+ ki.Start = v + 1
+ // i.intervals.splice(k, 0, x)
+ i.intervals = append(i.intervals[0:k], append([]*Interval{x}, i.intervals[k:]...)...)
+ return
+ }
+ }
+ }
+}
+
+func (i *IntervalSet) String() string {
+ return i.StringVerbose(nil, nil, false)
+}
+
+func (i *IntervalSet) StringVerbose(literalNames []string, symbolicNames []string, elemsAreChar bool) string {
+
+ if i.intervals == nil {
+ return "{}"
+ } else if literalNames != nil || symbolicNames != nil {
+ return i.toTokenString(literalNames, symbolicNames)
+ } else if elemsAreChar {
+ return i.toCharString()
+ }
+
+ return i.toIndexString()
+}
+
+func (i *IntervalSet) toCharString() string {
+ names := make([]string, len(i.intervals))
+
+ var sb strings.Builder
+
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ } else {
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(v.Start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(v.Stop - 1))
+ sb.WriteByte('\'')
+ names = append(names, sb.String())
+ sb.Reset()
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toIndexString() string {
+
+ names := make([]string, 0)
+ for j := 0; j < len(i.intervals); j++ {
+ v := i.intervals[j]
+ if v.Stop == v.Start+1 {
+ if v.Start == TokenEOF {
+ names = append(names, "")
+ } else {
+ names = append(names, strconv.Itoa(v.Start))
+ }
+ } else {
+ names = append(names, strconv.Itoa(v.Start)+".."+strconv.Itoa(v.Stop-1))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) toTokenString(literalNames []string, symbolicNames []string) string {
+ names := make([]string, 0)
+ for _, v := range i.intervals {
+ for j := v.Start; j < v.Stop; j++ {
+ names = append(names, i.elementName(literalNames, symbolicNames, j))
+ }
+ }
+ if len(names) > 1 {
+ return "{" + strings.Join(names, ", ") + "}"
+ }
+
+ return names[0]
+}
+
+func (i *IntervalSet) elementName(literalNames []string, symbolicNames []string, a int) string {
+ if a == TokenEOF {
+ return ""
+ } else if a == TokenEpsilon {
+ return ""
+ } else {
+ if a < len(literalNames) && literalNames[a] != "" {
+ return literalNames[a]
+ }
+
+ return symbolicNames[a]
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
new file mode 100644
index 000000000..b04f04572
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer.go
@@ -0,0 +1,418 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A lexer is recognizer that draws input symbols from a character stream.
+// lexer grammars result in a subclass of this object. A Lexer object
+// uses simplified Match() and error recovery mechanisms in the interest
+// of speed.
+///
+
+type Lexer interface {
+ TokenSource
+ Recognizer
+
+ Emit() Token
+
+ SetChannel(int)
+ PushMode(int)
+ PopMode() int
+ SetType(int)
+ SetMode(int)
+}
+
+type BaseLexer struct {
+ *BaseRecognizer
+
+ Interpreter ILexerATNSimulator
+ TokenStartCharIndex int
+ TokenStartLine int
+ TokenStartColumn int
+ ActionType int
+ Virt Lexer // The most derived lexer implementation. Allows virtual method calls.
+
+ input CharStream
+ factory TokenFactory
+ tokenFactorySourcePair *TokenSourceCharStreamPair
+ token Token
+ hitEOF bool
+ channel int
+ thetype int
+ modeStack IntStack
+ mode int
+ text string
+}
+
+func NewBaseLexer(input CharStream) *BaseLexer {
+
+ lexer := new(BaseLexer)
+
+ lexer.BaseRecognizer = NewBaseRecognizer()
+
+ lexer.input = input
+ lexer.factory = CommonTokenFactoryDEFAULT
+ lexer.tokenFactorySourcePair = &TokenSourceCharStreamPair{lexer, input}
+
+ lexer.Virt = lexer
+
+ lexer.Interpreter = nil // child classes must populate it
+
+ // The goal of all lexer rules/methods is to create a token object.
+ // l is an instance variable as multiple rules may collaborate to
+ // create a single token. NextToken will return l object after
+ // Matching lexer rule(s). If you subclass to allow multiple token
+ // emissions, then set l to the last token to be Matched or
+ // something nonnil so that the auto token emit mechanism will not
+ // emit another token.
+ lexer.token = nil
+
+ // What character index in the stream did the current token start at?
+ // Needed, for example, to get the text for current token. Set at
+ // the start of NextToken.
+ lexer.TokenStartCharIndex = -1
+
+ // The line on which the first character of the token resides///
+ lexer.TokenStartLine = -1
+
+ // The character position of first character within the line///
+ lexer.TokenStartColumn = -1
+
+ // Once we see EOF on char stream, next token will be EOF.
+ // If you have DONE : EOF then you see DONE EOF.
+ lexer.hitEOF = false
+
+ // The channel number for the current token///
+ lexer.channel = TokenDefaultChannel
+
+ // The token type for the current token///
+ lexer.thetype = TokenInvalidType
+
+ lexer.modeStack = make([]int, 0)
+ lexer.mode = LexerDefaultMode
+
+ // You can set the text for the current token to override what is in
+ // the input char buffer. Use setText() or can set l instance var.
+ // /
+ lexer.text = ""
+
+ return lexer
+}
+
+const (
+ LexerDefaultMode = 0
+ LexerMore = -2
+ LexerSkip = -3
+)
+
+const (
+ LexerDefaultTokenChannel = TokenDefaultChannel
+ LexerHidden = TokenHiddenChannel
+ LexerMinCharValue = 0x0000
+ LexerMaxCharValue = 0x10FFFF
+)
+
+func (b *BaseLexer) reset() {
+ // wack Lexer state variables
+ if b.input != nil {
+ b.input.Seek(0) // rewind the input
+ }
+ b.token = nil
+ b.thetype = TokenInvalidType
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = -1
+ b.TokenStartColumn = -1
+ b.TokenStartLine = -1
+ b.text = ""
+
+ b.hitEOF = false
+ b.mode = LexerDefaultMode
+ b.modeStack = make([]int, 0)
+
+ b.Interpreter.reset()
+}
+
+func (b *BaseLexer) GetInterpreter() ILexerATNSimulator {
+ return b.Interpreter
+}
+
+func (b *BaseLexer) GetInputStream() CharStream {
+ return b.input
+}
+
+func (b *BaseLexer) GetSourceName() string {
+ return b.GrammarFileName
+}
+
+func (b *BaseLexer) SetChannel(v int) {
+ b.channel = v
+}
+
+func (b *BaseLexer) GetTokenFactory() TokenFactory {
+ return b.factory
+}
+
+func (b *BaseLexer) setTokenFactory(f TokenFactory) {
+ b.factory = f
+}
+
+func (b *BaseLexer) safeMatch() (ret int) {
+ defer func() {
+ if e := recover(); e != nil {
+ if re, ok := e.(RecognitionException); ok {
+ b.notifyListeners(re) // Report error
+ b.Recover(re)
+ ret = LexerSkip // default
+ }
+ }
+ }()
+
+ return b.Interpreter.Match(b.input, b.mode)
+}
+
+// Return a token from l source i.e., Match a token on the char stream.
+func (b *BaseLexer) NextToken() Token {
+ if b.input == nil {
+ panic("NextToken requires a non-nil input stream.")
+ }
+
+ tokenStartMarker := b.input.Mark()
+
+ // previously in finally block
+ defer func() {
+ // make sure we release marker after Match or
+ // unbuffered char stream will keep buffering
+ b.input.Release(tokenStartMarker)
+ }()
+
+ for {
+ if b.hitEOF {
+ b.EmitEOF()
+ return b.token
+ }
+ b.token = nil
+ b.channel = TokenDefaultChannel
+ b.TokenStartCharIndex = b.input.Index()
+ b.TokenStartColumn = b.Interpreter.GetCharPositionInLine()
+ b.TokenStartLine = b.Interpreter.GetLine()
+ b.text = ""
+ continueOuter := false
+ for {
+ b.thetype = TokenInvalidType
+ ttype := LexerSkip
+
+ ttype = b.safeMatch()
+
+ if b.input.LA(1) == TokenEOF {
+ b.hitEOF = true
+ }
+ if b.thetype == TokenInvalidType {
+ b.thetype = ttype
+ }
+ if b.thetype == LexerSkip {
+ continueOuter = true
+ break
+ }
+ if b.thetype != LexerMore {
+ break
+ }
+ }
+
+ if continueOuter {
+ continue
+ }
+ if b.token == nil {
+ b.Virt.Emit()
+ }
+ return b.token
+ }
+
+ return nil
+}
+
+// Instruct the lexer to Skip creating a token for current lexer rule
+// and look for another token. NextToken() knows to keep looking when
+// a lexer rule finishes with token set to SKIPTOKEN. Recall that
+// if token==nil at end of any token rule, it creates one for you
+// and emits it.
+// /
+func (b *BaseLexer) Skip() {
+ b.thetype = LexerSkip
+}
+
+func (b *BaseLexer) More() {
+ b.thetype = LexerMore
+}
+
+func (b *BaseLexer) SetMode(m int) {
+ b.mode = m
+}
+
+func (b *BaseLexer) PushMode(m int) {
+ if LexerATNSimulatorDebug {
+ fmt.Println("pushMode " + strconv.Itoa(m))
+ }
+ b.modeStack.Push(b.mode)
+ b.mode = m
+}
+
+func (b *BaseLexer) PopMode() int {
+ if len(b.modeStack) == 0 {
+ panic("Empty Stack")
+ }
+ if LexerATNSimulatorDebug {
+ fmt.Println("popMode back to " + fmt.Sprint(b.modeStack[0:len(b.modeStack)-1]))
+ }
+ i, _ := b.modeStack.Pop()
+ b.mode = i
+ return b.mode
+}
+
+func (b *BaseLexer) inputStream() CharStream {
+ return b.input
+}
+
+// SetInputStream resets the lexer input stream and associated lexer state.
+func (b *BaseLexer) SetInputStream(input CharStream) {
+ b.input = nil
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+ b.reset()
+ b.input = input
+ b.tokenFactorySourcePair = &TokenSourceCharStreamPair{b, b.input}
+}
+
+func (b *BaseLexer) GetTokenSourceCharStreamPair() *TokenSourceCharStreamPair {
+ return b.tokenFactorySourcePair
+}
+
+// By default does not support multiple emits per NextToken invocation
+// for efficiency reasons. Subclass and override l method, NextToken,
+// and GetToken (to push tokens into a list and pull from that list
+// rather than a single variable as l implementation does).
+// /
+func (b *BaseLexer) EmitToken(token Token) {
+ b.token = token
+}
+
+// The standard method called to automatically emit a token at the
+// outermost lexical rule. The token object should point into the
+// char buffer start..stop. If there is a text override in 'text',
+// use that to set the token's text. Override l method to emit
+// custom Token objects or provide a Newfactory.
+// /
+func (b *BaseLexer) Emit() Token {
+ t := b.factory.Create(b.tokenFactorySourcePair, b.thetype, b.text, b.channel, b.TokenStartCharIndex, b.GetCharIndex()-1, b.TokenStartLine, b.TokenStartColumn)
+ b.EmitToken(t)
+ return t
+}
+
+func (b *BaseLexer) EmitEOF() Token {
+ cpos := b.GetCharPositionInLine()
+ lpos := b.GetLine()
+ eof := b.factory.Create(b.tokenFactorySourcePair, TokenEOF, "", TokenDefaultChannel, b.input.Index(), b.input.Index()-1, lpos, cpos)
+ b.EmitToken(eof)
+ return eof
+}
+
+func (b *BaseLexer) GetCharPositionInLine() int {
+ return b.Interpreter.GetCharPositionInLine()
+}
+
+func (b *BaseLexer) GetLine() int {
+ return b.Interpreter.GetLine()
+}
+
+func (b *BaseLexer) GetType() int {
+ return b.thetype
+}
+
+func (b *BaseLexer) SetType(t int) {
+ b.thetype = t
+}
+
+// What is the index of the current character of lookahead?///
+func (b *BaseLexer) GetCharIndex() int {
+ return b.input.Index()
+}
+
+// Return the text Matched so far for the current token or any text override.
+//Set the complete text of l token it wipes any previous changes to the text.
+func (b *BaseLexer) GetText() string {
+ if b.text != "" {
+ return b.text
+ }
+
+ return b.Interpreter.GetText(b.input)
+}
+
+func (b *BaseLexer) SetText(text string) {
+ b.text = text
+}
+
+func (b *BaseLexer) GetATN() *ATN {
+ return b.Interpreter.ATN()
+}
+
+// Return a list of all Token objects in input char stream.
+// Forces load of all tokens. Does not include EOF token.
+// /
+func (b *BaseLexer) GetAllTokens() []Token {
+ vl := b.Virt
+ tokens := make([]Token, 0)
+ t := vl.NextToken()
+ for t.GetTokenType() != TokenEOF {
+ tokens = append(tokens, t)
+ t = vl.NextToken()
+ }
+ return tokens
+}
+
+func (b *BaseLexer) notifyListeners(e RecognitionException) {
+ start := b.TokenStartCharIndex
+ stop := b.input.Index()
+ text := b.input.GetTextFromInterval(NewInterval(start, stop))
+ msg := "token recognition error at: '" + text + "'"
+ listener := b.GetErrorListenerDispatch()
+ listener.SyntaxError(b, nil, b.TokenStartLine, b.TokenStartColumn, msg, e)
+}
+
+func (b *BaseLexer) getErrorDisplayForChar(c rune) string {
+ if c == TokenEOF {
+ return ""
+ } else if c == '\n' {
+ return "\\n"
+ } else if c == '\t' {
+ return "\\t"
+ } else if c == '\r' {
+ return "\\r"
+ } else {
+ return string(c)
+ }
+}
+
+func (b *BaseLexer) getCharErrorDisplay(c rune) string {
+ return "'" + b.getErrorDisplayForChar(c) + "'"
+}
+
+// Lexers can normally Match any char in it's vocabulary after Matching
+// a token, so do the easy thing and just kill a character and hope
+// it all works out. You can instead use the rule invocation stack
+// to do sophisticated error recovery if you are in a fragment rule.
+// /
+func (b *BaseLexer) Recover(re RecognitionException) {
+ if b.input.LA(1) != TokenEOF {
+ if _, ok := re.(*LexerNoViableAltException); ok {
+ // Skip a char and try again
+ b.Interpreter.Consume(b.input)
+ } else {
+ // TODO: Do we lose character or line position information?
+ b.input.Consume()
+ }
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
new file mode 100644
index 000000000..5a325be13
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action.go
@@ -0,0 +1,430 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "strconv"
+
+const (
+ LexerActionTypeChannel = 0 //The type of a {@link LexerChannelAction} action.
+ LexerActionTypeCustom = 1 //The type of a {@link LexerCustomAction} action.
+ LexerActionTypeMode = 2 //The type of a {@link LexerModeAction} action.
+ LexerActionTypeMore = 3 //The type of a {@link LexerMoreAction} action.
+ LexerActionTypePopMode = 4 //The type of a {@link LexerPopModeAction} action.
+ LexerActionTypePushMode = 5 //The type of a {@link LexerPushModeAction} action.
+ LexerActionTypeSkip = 6 //The type of a {@link LexerSkipAction} action.
+ LexerActionTypeType = 7 //The type of a {@link LexerTypeAction} action.
+)
+
+type LexerAction interface {
+ getActionType() int
+ getIsPositionDependent() bool
+ execute(lexer Lexer)
+ hash() int
+ equals(other LexerAction) bool
+}
+
+type BaseLexerAction struct {
+ actionType int
+ isPositionDependent bool
+}
+
+func NewBaseLexerAction(action int) *BaseLexerAction {
+ la := new(BaseLexerAction)
+
+ la.actionType = action
+ la.isPositionDependent = false
+
+ return la
+}
+
+func (b *BaseLexerAction) execute(lexer Lexer) {
+ panic("Not implemented")
+}
+
+func (b *BaseLexerAction) getActionType() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) getIsPositionDependent() bool {
+ return b.isPositionDependent
+}
+
+func (b *BaseLexerAction) hash() int {
+ return b.actionType
+}
+
+func (b *BaseLexerAction) equals(other LexerAction) bool {
+ return b == other
+}
+
+//
+// Implements the {@code Skip} lexer action by calling {@link Lexer//Skip}.
+//
+// The {@code Skip} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+type LexerSkipAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerSkipAction() *LexerSkipAction {
+ la := new(LexerSkipAction)
+ la.BaseLexerAction = NewBaseLexerAction(LexerActionTypeSkip)
+ return la
+}
+
+// Provides a singleton instance of l parameterless lexer action.
+var LexerSkipActionINSTANCE = NewLexerSkipAction()
+
+func (l *LexerSkipAction) execute(lexer Lexer) {
+ lexer.Skip()
+}
+
+func (l *LexerSkipAction) String() string {
+ return "skip"
+}
+
+// Implements the {@code type} lexer action by calling {@link Lexer//setType}
+// with the assigned type.
+type LexerTypeAction struct {
+ *BaseLexerAction
+
+ thetype int
+}
+
+func NewLexerTypeAction(thetype int) *LexerTypeAction {
+ l := new(LexerTypeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeType)
+ l.thetype = thetype
+ return l
+}
+
+func (l *LexerTypeAction) execute(lexer Lexer) {
+ lexer.SetType(l.thetype)
+}
+
+func (l *LexerTypeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.thetype)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerTypeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerTypeAction); !ok {
+ return false
+ } else {
+ return l.thetype == other.(*LexerTypeAction).thetype
+ }
+}
+
+func (l *LexerTypeAction) String() string {
+ return "actionType(" + strconv.Itoa(l.thetype) + ")"
+}
+
+// Implements the {@code pushMode} lexer action by calling
+// {@link Lexer//pushMode} with the assigned mode.
+type LexerPushModeAction struct {
+ *BaseLexerAction
+
+ mode int
+}
+
+func NewLexerPushModeAction(mode int) *LexerPushModeAction {
+
+ l := new(LexerPushModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePushMode)
+
+ l.mode = mode
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//pushMode} with the
+// value provided by {@link //getMode}.
+func (l *LexerPushModeAction) execute(lexer Lexer) {
+ lexer.PushMode(l.mode)
+}
+
+func (l *LexerPushModeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerPushModeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerPushModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerPushModeAction).mode
+ }
+}
+
+func (l *LexerPushModeAction) String() string {
+ return "pushMode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Implements the {@code popMode} lexer action by calling {@link Lexer//popMode}.
+//
+// The {@code popMode} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+type LexerPopModeAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerPopModeAction() *LexerPopModeAction {
+
+ l := new(LexerPopModeAction)
+
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypePopMode)
+
+ return l
+}
+
+var LexerPopModeActionINSTANCE = NewLexerPopModeAction()
+
+// This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerPopModeAction) execute(lexer Lexer) {
+ lexer.PopMode()
+}
+
+func (l *LexerPopModeAction) String() string {
+ return "popMode"
+}
+
+// Implements the {@code more} lexer action by calling {@link Lexer//more}.
+//
+// The {@code more} command does not have any parameters, so l action is
+// implemented as a singleton instance exposed by {@link //INSTANCE}.
+
+type LexerMoreAction struct {
+ *BaseLexerAction
+}
+
+func NewLexerMoreAction() *LexerMoreAction {
+ l := new(LexerMoreAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMore)
+
+ return l
+}
+
+var LexerMoreActionINSTANCE = NewLexerMoreAction()
+
+// This action is implemented by calling {@link Lexer//popMode}.
+func (l *LexerMoreAction) execute(lexer Lexer) {
+ lexer.More()
+}
+
+func (l *LexerMoreAction) String() string {
+ return "more"
+}
+
+// Implements the {@code mode} lexer action by calling {@link Lexer//mode} with
+// the assigned mode.
+type LexerModeAction struct {
+ *BaseLexerAction
+
+ mode int
+}
+
+func NewLexerModeAction(mode int) *LexerModeAction {
+ l := new(LexerModeAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeMode)
+ l.mode = mode
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//mode} with the
+// value provided by {@link //getMode}.
+func (l *LexerModeAction) execute(lexer Lexer) {
+ lexer.SetMode(l.mode)
+}
+
+func (l *LexerModeAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.mode)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerModeAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerModeAction); !ok {
+ return false
+ } else {
+ return l.mode == other.(*LexerModeAction).mode
+ }
+}
+
+func (l *LexerModeAction) String() string {
+ return "mode(" + strconv.Itoa(l.mode) + ")"
+}
+
+// Executes a custom lexer action by calling {@link Recognizer//action} with the
+// rule and action indexes assigned to the custom action. The implementation of
+// a custom action is added to the generated code for the lexer in an override
+// of {@link Recognizer//action} when the grammar is compiled.
+//
+// This class may represent embedded actions created with the {...}
+// syntax in ANTLR 4, as well as actions created for lexer commands where the
+// command argument could not be evaluated when the grammar was compiled.
+
+// Constructs a custom lexer action with the specified rule and action
+// indexes.
+//
+// @param ruleIndex The rule index to use for calls to
+// {@link Recognizer//action}.
+// @param actionIndex The action index to use for calls to
+// {@link Recognizer//action}.
+
+type LexerCustomAction struct {
+ *BaseLexerAction
+ ruleIndex, actionIndex int
+}
+
+func NewLexerCustomAction(ruleIndex, actionIndex int) *LexerCustomAction {
+ l := new(LexerCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeCustom)
+ l.ruleIndex = ruleIndex
+ l.actionIndex = actionIndex
+ l.isPositionDependent = true
+ return l
+}
+
+// Custom actions are implemented by calling {@link Lexer//action} with the
+// appropriate rule and action indexes.
+func (l *LexerCustomAction) execute(lexer Lexer) {
+ lexer.Action(nil, l.ruleIndex, l.actionIndex)
+}
+
+func (l *LexerCustomAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.ruleIndex)
+ h = murmurUpdate(h, l.actionIndex)
+ return murmurFinish(h, 3)
+}
+
+func (l *LexerCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerCustomAction); !ok {
+ return false
+ } else {
+ return l.ruleIndex == other.(*LexerCustomAction).ruleIndex && l.actionIndex == other.(*LexerCustomAction).actionIndex
+ }
+}
+
+// Implements the {@code channel} lexer action by calling
+// {@link Lexer//setChannel} with the assigned channel.
+// Constructs a New{@code channel} action with the specified channel value.
+// @param channel The channel value to pass to {@link Lexer//setChannel}.
+type LexerChannelAction struct {
+ *BaseLexerAction
+
+ channel int
+}
+
+func NewLexerChannelAction(channel int) *LexerChannelAction {
+ l := new(LexerChannelAction)
+ l.BaseLexerAction = NewBaseLexerAction(LexerActionTypeChannel)
+ l.channel = channel
+ return l
+}
+
+// This action is implemented by calling {@link Lexer//setChannel} with the
+// value provided by {@link //getChannel}.
+func (l *LexerChannelAction) execute(lexer Lexer) {
+ lexer.SetChannel(l.channel)
+}
+
+func (l *LexerChannelAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.actionType)
+ h = murmurUpdate(h, l.channel)
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerChannelAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerChannelAction); !ok {
+ return false
+ } else {
+ return l.channel == other.(*LexerChannelAction).channel
+ }
+}
+
+func (l *LexerChannelAction) String() string {
+ return "channel(" + strconv.Itoa(l.channel) + ")"
+}
+
+// This implementation of {@link LexerAction} is used for tracking input offsets
+// for position-dependent actions within a {@link LexerActionExecutor}.
+//
+// This action is not serialized as part of the ATN, and is only required for
+// position-dependent lexer actions which appear at a location other than the
+// end of a rule. For more information about DFA optimizations employed for
+// lexer actions, see {@link LexerActionExecutor//append} and
+// {@link LexerActionExecutor//fixOffsetBeforeMatch}.
+
+// Constructs a Newindexed custom action by associating a character offset
+// with a {@link LexerAction}.
+//
+// Note: This class is only required for lexer actions for which
+// {@link LexerAction//isPositionDependent} returns {@code true}.
+//
+// @param offset The offset into the input {@link CharStream}, relative to
+// the token start index, at which the specified lexer action should be
+// executed.
+// @param action The lexer action to execute at a particular offset in the
+// input {@link CharStream}.
+type LexerIndexedCustomAction struct {
+ *BaseLexerAction
+
+ offset int
+ lexerAction LexerAction
+ isPositionDependent bool
+}
+
+func NewLexerIndexedCustomAction(offset int, lexerAction LexerAction) *LexerIndexedCustomAction {
+
+ l := new(LexerIndexedCustomAction)
+ l.BaseLexerAction = NewBaseLexerAction(lexerAction.getActionType())
+
+ l.offset = offset
+ l.lexerAction = lexerAction
+ l.isPositionDependent = true
+
+ return l
+}
+
+// This method calls {@link //execute} on the result of {@link //getAction}
+// using the provided {@code lexer}.
+func (l *LexerIndexedCustomAction) execute(lexer Lexer) {
+ // assume the input stream position was properly set by the calling code
+ l.lexerAction.execute(lexer)
+}
+
+func (l *LexerIndexedCustomAction) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, l.offset)
+ h = murmurUpdate(h, l.lexerAction.hash())
+ return murmurFinish(h, 2)
+}
+
+func (l *LexerIndexedCustomAction) equals(other LexerAction) bool {
+ if l == other {
+ return true
+ } else if _, ok := other.(*LexerIndexedCustomAction); !ok {
+ return false
+ } else {
+ return l.offset == other.(*LexerIndexedCustomAction).offset && l.lexerAction == other.(*LexerIndexedCustomAction).lexerAction
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
new file mode 100644
index 000000000..056941dd6
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_action_executor.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// Represents an executor for a sequence of lexer actions which traversed during
+// the Matching operation of a lexer rule (token).
+//
+// The executor tracks position information for position-dependent lexer actions
+// efficiently, ensuring that actions appearing only at the end of the rule do
+// not cause bloating of the {@link DFA} created for the lexer.
+
+type LexerActionExecutor struct {
+ lexerActions []LexerAction
+ cachedHash int
+}
+
+func NewLexerActionExecutor(lexerActions []LexerAction) *LexerActionExecutor {
+
+ if lexerActions == nil {
+ lexerActions = make([]LexerAction, 0)
+ }
+
+ l := new(LexerActionExecutor)
+
+ l.lexerActions = lexerActions
+
+ // Caches the result of {@link //hashCode} since the hash code is an element
+ // of the performance-critical {@link LexerATNConfig//hashCode} operation.
+ l.cachedHash = murmurInit(57)
+ for _, a := range lexerActions {
+ l.cachedHash = murmurUpdate(l.cachedHash, a.hash())
+ }
+
+ return l
+}
+
+// Creates a {@link LexerActionExecutor} which executes the actions for
+// the input {@code lexerActionExecutor} followed by a specified
+// {@code lexerAction}.
+//
+// @param lexerActionExecutor The executor for actions already traversed by
+// the lexer while Matching a token within a particular
+// {@link LexerATNConfig}. If this is {@code nil}, the method behaves as
+// though it were an empty executor.
+// @param lexerAction The lexer action to execute after the actions
+// specified in {@code lexerActionExecutor}.
+//
+// @return A {@link LexerActionExecutor} for executing the combine actions
+// of {@code lexerActionExecutor} and {@code lexerAction}.
+func LexerActionExecutorappend(lexerActionExecutor *LexerActionExecutor, lexerAction LexerAction) *LexerActionExecutor {
+ if lexerActionExecutor == nil {
+ return NewLexerActionExecutor([]LexerAction{lexerAction})
+ }
+
+ return NewLexerActionExecutor(append(lexerActionExecutor.lexerActions, lexerAction))
+}
+
+// Creates a {@link LexerActionExecutor} which encodes the current offset
+// for position-dependent lexer actions.
+//
+// Normally, when the executor encounters lexer actions where
+// {@link LexerAction//isPositionDependent} returns {@code true}, it calls
+// {@link IntStream//seek} on the input {@link CharStream} to set the input
+// position to the end of the current token. This behavior provides
+// for efficient DFA representation of lexer actions which appear at the end
+// of a lexer rule, even when the lexer rule Matches a variable number of
+// characters.
+//
+// Prior to traversing a Match transition in the ATN, the current offset
+// from the token start index is assigned to all position-dependent lexer
+// actions which have not already been assigned a fixed offset. By storing
+// the offsets relative to the token start index, the DFA representation of
+// lexer actions which appear in the middle of tokens remains efficient due
+// to sharing among tokens of the same length, regardless of their absolute
+// position in the input stream.
+//
+// If the current executor already has offsets assigned to all
+// position-dependent lexer actions, the method returns {@code this}.
+//
+// @param offset The current offset to assign to all position-dependent
+// lexer actions which do not already have offsets assigned.
+//
+// @return A {@link LexerActionExecutor} which stores input stream offsets
+// for all position-dependent lexer actions.
+// /
+func (l *LexerActionExecutor) fixOffsetBeforeMatch(offset int) *LexerActionExecutor {
+ var updatedLexerActions []LexerAction
+ for i := 0; i < len(l.lexerActions); i++ {
+ _, ok := l.lexerActions[i].(*LexerIndexedCustomAction)
+ if l.lexerActions[i].getIsPositionDependent() && !ok {
+ if updatedLexerActions == nil {
+ updatedLexerActions = make([]LexerAction, 0)
+
+ for _, a := range l.lexerActions {
+ updatedLexerActions = append(updatedLexerActions, a)
+ }
+ }
+
+ updatedLexerActions[i] = NewLexerIndexedCustomAction(offset, l.lexerActions[i])
+ }
+ }
+ if updatedLexerActions == nil {
+ return l
+ }
+
+ return NewLexerActionExecutor(updatedLexerActions)
+}
+
+// Execute the actions encapsulated by l executor within the context of a
+// particular {@link Lexer}.
+//
+// This method calls {@link IntStream//seek} to set the position of the
+// {@code input} {@link CharStream} prior to calling
+// {@link LexerAction//execute} on a position-dependent action. Before the
+// method returns, the input position will be restored to the same position
+// it was in when the method was invoked.
+//
+// @param lexer The lexer instance.
+// @param input The input stream which is the source for the current token.
+// When l method is called, the current {@link IntStream//index} for
+// {@code input} should be the start of the following token, i.e. 1
+// character past the end of the current token.
+// @param startIndex The token start index. This value may be passed to
+// {@link IntStream//seek} to set the {@code input} position to the beginning
+// of the token.
+// /
+func (l *LexerActionExecutor) execute(lexer Lexer, input CharStream, startIndex int) {
+ requiresSeek := false
+ stopIndex := input.Index()
+
+ defer func() {
+ if requiresSeek {
+ input.Seek(stopIndex)
+ }
+ }()
+
+ for i := 0; i < len(l.lexerActions); i++ {
+ lexerAction := l.lexerActions[i]
+ if la, ok := lexerAction.(*LexerIndexedCustomAction); ok {
+ offset := la.offset
+ input.Seek(startIndex + offset)
+ lexerAction = la.lexerAction
+ requiresSeek = (startIndex + offset) != stopIndex
+ } else if lexerAction.getIsPositionDependent() {
+ input.Seek(stopIndex)
+ requiresSeek = false
+ }
+ lexerAction.execute(lexer)
+ }
+}
+
+func (l *LexerActionExecutor) hash() int {
+ if l == nil {
+ return 61
+ }
+ return l.cachedHash
+}
+
+func (l *LexerActionExecutor) equals(other interface{}) bool {
+ if l == other {
+ return true
+ }
+ othert, ok := other.(*LexerActionExecutor)
+ if !ok {
+ return false
+ }
+ if othert == nil {
+ return false
+ }
+ return l.cachedHash == othert.cachedHash && &l.lexerActions == &othert.lexerActions
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
new file mode 100644
index 000000000..dc05153ea
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/lexer_atn_simulator.go
@@ -0,0 +1,679 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ LexerATNSimulatorDebug = false
+ LexerATNSimulatorDFADebug = false
+
+ LexerATNSimulatorMinDFAEdge = 0
+ LexerATNSimulatorMaxDFAEdge = 127 // forces unicode to stay in ATN
+
+ LexerATNSimulatorMatchCalls = 0
+)
+
+type ILexerATNSimulator interface {
+ IATNSimulator
+
+ reset()
+ Match(input CharStream, mode int) int
+ GetCharPositionInLine() int
+ GetLine() int
+ GetText(input CharStream) string
+ Consume(input CharStream)
+}
+
+type LexerATNSimulator struct {
+ *BaseATNSimulator
+
+ recog Lexer
+ predictionMode int
+ mergeCache DoubleDict
+ startIndex int
+ Line int
+ CharPositionInLine int
+ mode int
+ prevAccept *SimState
+ MatchCalls int
+}
+
+func NewLexerATNSimulator(recog Lexer, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *LexerATNSimulator {
+ l := new(LexerATNSimulator)
+
+ l.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+
+ l.decisionToDFA = decisionToDFA
+ l.recog = recog
+ // The current token's starting index into the character stream.
+ // Shared across DFA to ATN simulation in case the ATN fails and the
+ // DFA did not have a previous accept state. In l case, we use the
+ // ATN-generated exception object.
+ l.startIndex = -1
+ // line number 1..n within the input///
+ l.Line = 1
+ // The index of the character relative to the beginning of the line
+ // 0..n-1///
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+ // Used during DFA/ATN exec to record the most recent accept configuration
+ // info
+ l.prevAccept = NewSimState()
+ // done
+ return l
+}
+
+func (l *LexerATNSimulator) copyState(simulator *LexerATNSimulator) {
+ l.CharPositionInLine = simulator.CharPositionInLine
+ l.Line = simulator.Line
+ l.mode = simulator.mode
+ l.startIndex = simulator.startIndex
+}
+
+func (l *LexerATNSimulator) Match(input CharStream, mode int) int {
+ l.MatchCalls++
+ l.mode = mode
+ mark := input.Mark()
+
+ defer func() {
+ input.Release(mark)
+ }()
+
+ l.startIndex = input.Index()
+ l.prevAccept.reset()
+
+ dfa := l.decisionToDFA[mode]
+
+ var s0 *DFAState
+ l.atn.stateMu.RLock()
+ s0 = dfa.getS0()
+ l.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ return l.MatchATN(input)
+ }
+
+ return l.execATN(input, s0)
+}
+
+func (l *LexerATNSimulator) reset() {
+ l.prevAccept.reset()
+ l.startIndex = -1
+ l.Line = 1
+ l.CharPositionInLine = 0
+ l.mode = LexerDefaultMode
+}
+
+func (l *LexerATNSimulator) MatchATN(input CharStream) int {
+ startState := l.atn.modeToStartState[l.mode]
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("MatchATN mode " + strconv.Itoa(l.mode) + " start: " + startState.String())
+ }
+ oldMode := l.mode
+ s0Closure := l.computeStartState(input, startState)
+ suppressEdge := s0Closure.hasSemanticContext
+ s0Closure.hasSemanticContext = false
+
+ next := l.addDFAState(s0Closure, suppressEdge)
+
+ predict := l.execATN(input, next)
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("DFA after MatchATN: " + l.decisionToDFA[oldMode].ToLexerString())
+ }
+ return predict
+}
+
+func (l *LexerATNSimulator) execATN(input CharStream, ds0 *DFAState) int {
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("start state closure=" + ds0.configs.String())
+ }
+ if ds0.isAcceptState {
+ // allow zero-length tokens
+ l.captureSimState(l.prevAccept, input, ds0)
+ }
+ t := input.LA(1)
+ s := ds0 // s is current/from DFA state
+
+ for { // while more work
+ if LexerATNSimulatorDebug {
+ fmt.Println("execATN loop starting closure: " + s.configs.String())
+ }
+
+ // As we move src->trg, src->trg, we keep track of the previous trg to
+ // avoid looking up the DFA state again, which is expensive.
+ // If the previous target was already part of the DFA, we might
+ // be able to avoid doing a reach operation upon t. If s!=nil,
+ // it means that semantic predicates didn't prevent us from
+ // creating a DFA state. Once we know s!=nil, we check to see if
+ // the DFA state has an edge already for t. If so, we can just reuse
+ // it's configuration set there's no point in re-computing it.
+ // This is kind of like doing DFA simulation within the ATN
+ // simulation because DFA simulation is really just a way to avoid
+ // computing reach/closure sets. Technically, once we know that
+ // we have a previously added DFA state, we could jump over to
+ // the DFA simulator. But, that would mean popping back and forth
+ // a lot and making things more complicated algorithmically.
+ // This optimization makes a lot of sense for loops within DFA.
+ // A character will take us back to an existing DFA state
+ // that already has lots of edges out of it. e.g., .* in comments.
+ target := l.getExistingTargetState(s, t)
+ if target == nil {
+ target = l.computeTargetState(input, s, t)
+ // print("Computed:" + str(target))
+ }
+ if target == ATNSimulatorError {
+ break
+ }
+ // If l is a consumable input element, make sure to consume before
+ // capturing the accept state so the input index, line, and char
+ // position accurately reflect the state of the interpreter at the
+ // end of the token.
+ if t != TokenEOF {
+ l.Consume(input)
+ }
+ if target.isAcceptState {
+ l.captureSimState(l.prevAccept, input, target)
+ if t == TokenEOF {
+ break
+ }
+ }
+ t = input.LA(1)
+ s = target // flip current DFA target becomes Newsrc/from state
+ }
+
+ return l.failOrAccept(l.prevAccept, input, s.configs, t)
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// l method returns {@code nil}.
+//
+// @param s The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for l edge is not
+// already cached
+func (l *LexerATNSimulator) getExistingTargetState(s *DFAState, t int) *DFAState {
+ if t < LexerATNSimulatorMinDFAEdge || t > LexerATNSimulatorMaxDFAEdge {
+ return nil
+ }
+
+ l.atn.edgeMu.RLock()
+ defer l.atn.edgeMu.RUnlock()
+ if s.getEdges() == nil {
+ return nil
+ }
+ target := s.getIthEdge(t - LexerATNSimulatorMinDFAEdge)
+ if LexerATNSimulatorDebug && target != nil {
+ fmt.Println("reuse state " + strconv.Itoa(s.stateNumber) + " edge to " + strconv.Itoa(target.stateNumber))
+ }
+ return target
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param input The input stream
+// @param s The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, l method
+// returns {@link //ERROR}.
+func (l *LexerATNSimulator) computeTargetState(input CharStream, s *DFAState, t int) *DFAState {
+ reach := NewOrderedATNConfigSet()
+
+ // if we don't find an existing DFA state
+ // Fill reach starting from closure, following t transitions
+ l.getReachableConfigSet(input, s.configs, reach.BaseATNConfigSet, t)
+
+ if len(reach.configs) == 0 { // we got nowhere on t from s
+ if !reach.hasSemanticContext {
+ // we got nowhere on t, don't panic out l knowledge it'd
+ // cause a failover from DFA later.
+ l.addDFAEdge(s, t, ATNSimulatorError, nil)
+ }
+ // stop when we can't Match any more char
+ return ATNSimulatorError
+ }
+ // Add an edge from s to target DFA found/created for reach
+ return l.addDFAEdge(s, t, nil, reach.BaseATNConfigSet)
+}
+
+func (l *LexerATNSimulator) failOrAccept(prevAccept *SimState, input CharStream, reach ATNConfigSet, t int) int {
+ if l.prevAccept.dfaState != nil {
+ lexerActionExecutor := prevAccept.dfaState.lexerActionExecutor
+ l.accept(input, lexerActionExecutor, l.startIndex, prevAccept.index, prevAccept.line, prevAccept.column)
+ return prevAccept.dfaState.prediction
+ }
+
+ // if no accept and EOF is first char, return EOF
+ if t == TokenEOF && input.Index() == l.startIndex {
+ return TokenEOF
+ }
+
+ panic(NewLexerNoViableAltException(l.recog, input, l.startIndex, reach))
+}
+
+// Given a starting configuration set, figure out all ATN configurations
+// we can reach upon input {@code t}. Parameter {@code reach} is a return
+// parameter.
+func (l *LexerATNSimulator) getReachableConfigSet(input CharStream, closure ATNConfigSet, reach ATNConfigSet, t int) {
+ // l is used to Skip processing for configs which have a lower priority
+ // than a config that already reached an accept state for the same rule
+ SkipAlt := ATNInvalidAltNumber
+
+ for _, cfg := range closure.GetItems() {
+ currentAltReachedAcceptState := (cfg.GetAlt() == SkipAlt)
+ if currentAltReachedAcceptState && cfg.(*LexerATNConfig).passedThroughNonGreedyDecision {
+ continue
+ }
+
+ if LexerATNSimulatorDebug {
+
+ fmt.Printf("testing %s at %s\n", l.GetTokenName(t), cfg.String()) // l.recog, true))
+ }
+
+ for _, trans := range cfg.GetState().GetTransitions() {
+ target := l.getReachableTarget(trans, t)
+ if target != nil {
+ lexerActionExecutor := cfg.(*LexerATNConfig).lexerActionExecutor
+ if lexerActionExecutor != nil {
+ lexerActionExecutor = lexerActionExecutor.fixOffsetBeforeMatch(input.Index() - l.startIndex)
+ }
+ treatEOFAsEpsilon := (t == TokenEOF)
+ config := NewLexerATNConfig3(cfg.(*LexerATNConfig), target, lexerActionExecutor)
+ if l.closure(input, config, reach,
+ currentAltReachedAcceptState, true, treatEOFAsEpsilon) {
+ // any remaining configs for l alt have a lower priority
+ // than the one that just reached an accept state.
+ SkipAlt = cfg.GetAlt()
+ }
+ }
+ }
+ }
+}
+
+func (l *LexerATNSimulator) accept(input CharStream, lexerActionExecutor *LexerActionExecutor, startIndex, index, line, charPos int) {
+ if LexerATNSimulatorDebug {
+ fmt.Printf("ACTION %v\n", lexerActionExecutor)
+ }
+ // seek to after last char in token
+ input.Seek(index)
+ l.Line = line
+ l.CharPositionInLine = charPos
+ if lexerActionExecutor != nil && l.recog != nil {
+ lexerActionExecutor.execute(l.recog, input, startIndex)
+ }
+}
+
+func (l *LexerATNSimulator) getReachableTarget(trans Transition, t int) ATNState {
+ if trans.Matches(t, 0, LexerMaxCharValue) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (l *LexerATNSimulator) computeStartState(input CharStream, p ATNState) *OrderedATNConfigSet {
+ configs := NewOrderedATNConfigSet()
+ for i := 0; i < len(p.GetTransitions()); i++ {
+ target := p.GetTransitions()[i].getTarget()
+ cfg := NewLexerATNConfig6(target, i+1, BasePredictionContextEMPTY)
+ l.closure(input, cfg, configs, false, false, false)
+ }
+
+ return configs
+}
+
+// Since the alternatives within any lexer decision are ordered by
+// preference, l method stops pursuing the closure as soon as an accept
+// state is reached. After the first accept state is reached by depth-first
+// search from {@code config}, all other (potentially reachable) states for
+// l rule would have a lower priority.
+//
+// @return {@code true} if an accept state is reached, otherwise
+// {@code false}.
+func (l *LexerATNSimulator) closure(input CharStream, config *LexerATNConfig, configs ATNConfigSet,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon bool) bool {
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")") // config.String(l.recog, true) + ")")
+ }
+
+ _, ok := config.state.(*RuleStopState)
+ if ok {
+
+ if LexerATNSimulatorDebug {
+ if l.recog != nil {
+ fmt.Printf("closure at %s rule stop %s\n", l.recog.GetRuleNames()[config.state.GetRuleIndex()], config)
+ } else {
+ fmt.Printf("closure at rule stop %s\n", config)
+ }
+ }
+
+ if config.context == nil || config.context.hasEmptyPath() {
+ if config.context == nil || config.context.isEmpty() {
+ configs.Add(config, nil)
+ return true
+ }
+
+ configs.Add(NewLexerATNConfig2(config, config.state, BasePredictionContextEMPTY), nil)
+ currentAltReachedAcceptState = true
+ }
+ if config.context != nil && !config.context.isEmpty() {
+ for i := 0; i < config.context.length(); i++ {
+ if config.context.getReturnState(i) != BasePredictionContextEmptyReturnState {
+ newContext := config.context.GetParent(i) // "pop" return state
+ returnState := l.atn.states[config.context.getReturnState(i)]
+ cfg := NewLexerATNConfig2(config, returnState, newContext)
+ currentAltReachedAcceptState = l.closure(input, cfg, configs, currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ }
+ return currentAltReachedAcceptState
+ }
+ // optimization
+ if !config.state.GetEpsilonOnlyTransitions() {
+ if !currentAltReachedAcceptState || !config.passedThroughNonGreedyDecision {
+ configs.Add(config, nil)
+ }
+ }
+ for j := 0; j < len(config.state.GetTransitions()); j++ {
+ trans := config.state.GetTransitions()[j]
+ cfg := l.getEpsilonTarget(input, config, trans, configs, speculative, treatEOFAsEpsilon)
+ if cfg != nil {
+ currentAltReachedAcceptState = l.closure(input, cfg, configs,
+ currentAltReachedAcceptState, speculative, treatEOFAsEpsilon)
+ }
+ }
+ return currentAltReachedAcceptState
+}
+
+// side-effect: can alter configs.hasSemanticContext
+func (l *LexerATNSimulator) getEpsilonTarget(input CharStream, config *LexerATNConfig, trans Transition,
+ configs ATNConfigSet, speculative, treatEOFAsEpsilon bool) *LexerATNConfig {
+
+ var cfg *LexerATNConfig
+
+ if trans.getSerializationType() == TransitionRULE {
+
+ rt := trans.(*RuleTransition)
+ newContext := SingletonBasePredictionContextCreate(config.context, rt.followState.GetStateNumber())
+ cfg = NewLexerATNConfig2(config, trans.getTarget(), newContext)
+
+ } else if trans.getSerializationType() == TransitionPRECEDENCE {
+ panic("Precedence predicates are not supported in lexers.")
+ } else if trans.getSerializationType() == TransitionPREDICATE {
+ // Track traversing semantic predicates. If we traverse,
+ // we cannot add a DFA state for l "reach" computation
+ // because the DFA would not test the predicate again in the
+ // future. Rather than creating collections of semantic predicates
+ // like v3 and testing them on prediction, v4 will test them on the
+ // fly all the time using the ATN not the DFA. This is slower but
+ // semantically it's not used that often. One of the key elements to
+ // l predicate mechanism is not adding DFA states that see
+ // predicates immediately afterwards in the ATN. For example,
+
+ // a : ID {p1}? | ID {p2}?
+
+ // should create the start state for rule 'a' (to save start state
+ // competition), but should not create target of ID state. The
+ // collection of ATN states the following ID references includes
+ // states reached by traversing predicates. Since l is when we
+ // test them, we cannot cash the DFA state target of ID.
+
+ pt := trans.(*PredicateTransition)
+
+ if LexerATNSimulatorDebug {
+ fmt.Println("EVAL rule " + strconv.Itoa(trans.(*PredicateTransition).ruleIndex) + ":" + strconv.Itoa(pt.predIndex))
+ }
+ configs.SetHasSemanticContext(true)
+ if l.evaluatePredicate(input, pt.ruleIndex, pt.predIndex, speculative) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionACTION {
+ if config.context == nil || config.context.hasEmptyPath() {
+ // execute actions anywhere in the start rule for a token.
+ //
+ // TODO: if the entry rule is invoked recursively, some
+ // actions may be executed during the recursive call. The
+ // problem can appear when hasEmptyPath() is true but
+ // isEmpty() is false. In l case, the config needs to be
+ // split into two contexts - one with just the empty path
+ // and another with everything but the empty path.
+ // Unfortunately, the current algorithm does not allow
+ // getEpsilonTarget to return two configurations, so
+ // additional modifications are needed before we can support
+ // the split operation.
+ lexerActionExecutor := LexerActionExecutorappend(config.lexerActionExecutor, l.atn.lexerActions[trans.(*ActionTransition).actionIndex])
+ cfg = NewLexerATNConfig3(config, trans.getTarget(), lexerActionExecutor)
+ } else {
+ // ignore actions in referenced rules
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ } else if trans.getSerializationType() == TransitionEPSILON {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ } else if trans.getSerializationType() == TransitionATOM ||
+ trans.getSerializationType() == TransitionRANGE ||
+ trans.getSerializationType() == TransitionSET {
+ if treatEOFAsEpsilon {
+ if trans.Matches(TokenEOF, 0, LexerMaxCharValue) {
+ cfg = NewLexerATNConfig4(config, trans.getTarget())
+ }
+ }
+ }
+ return cfg
+}
+
+// Evaluate a predicate specified in the lexer.
+//
+// If {@code speculative} is {@code true}, l method was called before
+// {@link //consume} for the Matched character. This method should call
+// {@link //consume} before evaluating the predicate to ensure position
+// sensitive values, including {@link Lexer//GetText}, {@link Lexer//GetLine},
+// and {@link Lexer//getcolumn}, properly reflect the current
+// lexer state. This method should restore {@code input} and the simulator
+// to the original state before returning (i.e. undo the actions made by the
+// call to {@link //consume}.
+//
+// @param input The input stream.
+// @param ruleIndex The rule containing the predicate.
+// @param predIndex The index of the predicate within the rule.
+// @param speculative {@code true} if the current index in {@code input} is
+// one character before the predicate's location.
+//
+// @return {@code true} if the specified predicate evaluates to
+// {@code true}.
+// /
+func (l *LexerATNSimulator) evaluatePredicate(input CharStream, ruleIndex, predIndex int, speculative bool) bool {
+ // assume true if no recognizer was provided
+ if l.recog == nil {
+ return true
+ }
+ if !speculative {
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+ }
+ savedcolumn := l.CharPositionInLine
+ savedLine := l.Line
+ index := input.Index()
+ marker := input.Mark()
+
+ defer func() {
+ l.CharPositionInLine = savedcolumn
+ l.Line = savedLine
+ input.Seek(index)
+ input.Release(marker)
+ }()
+
+ l.Consume(input)
+ return l.recog.Sempred(nil, ruleIndex, predIndex)
+}
+
+func (l *LexerATNSimulator) captureSimState(settings *SimState, input CharStream, dfaState *DFAState) {
+ settings.index = input.Index()
+ settings.line = l.Line
+ settings.column = l.CharPositionInLine
+ settings.dfaState = dfaState
+}
+
+func (l *LexerATNSimulator) addDFAEdge(from *DFAState, tk int, to *DFAState, cfgs ATNConfigSet) *DFAState {
+ if to == nil && cfgs != nil {
+ // leading to l call, ATNConfigSet.hasSemanticContext is used as a
+ // marker indicating dynamic predicate evaluation makes l edge
+ // dependent on the specific input sequence, so the static edge in the
+ // DFA should be omitted. The target DFAState is still created since
+ // execATN has the ability to reSynchronize with the DFA state cache
+ // following the predicate evaluation step.
+ //
+ // TJP notes: next time through the DFA, we see a pred again and eval.
+ // If that gets us to a previously created (but dangling) DFA
+ // state, we can continue in pure DFA mode from there.
+ // /
+ suppressEdge := cfgs.HasSemanticContext()
+ cfgs.SetHasSemanticContext(false)
+
+ to = l.addDFAState(cfgs, true)
+
+ if suppressEdge {
+ return to
+ }
+ }
+ // add the edge
+ if tk < LexerATNSimulatorMinDFAEdge || tk > LexerATNSimulatorMaxDFAEdge {
+ // Only track edges within the DFA bounds
+ return to
+ }
+ if LexerATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + strconv.Itoa(tk))
+ }
+ l.atn.edgeMu.Lock()
+ defer l.atn.edgeMu.Unlock()
+ if from.getEdges() == nil {
+ // make room for tokens 1..n and -1 masquerading as index 0
+ from.setEdges(make([]*DFAState, LexerATNSimulatorMaxDFAEdge-LexerATNSimulatorMinDFAEdge+1))
+ }
+ from.setIthEdge(tk-LexerATNSimulatorMinDFAEdge, to) // connect
+
+ return to
+}
+
+// Add a NewDFA state if there isn't one with l set of
+// configurations already. This method also detects the first
+// configuration containing an ATN rule stop state. Later, when
+// traversing the DFA, we will know which rule to accept.
+func (l *LexerATNSimulator) addDFAState(configs ATNConfigSet, suppressEdge bool) *DFAState {
+
+ proposed := NewDFAState(-1, configs)
+ var firstConfigWithRuleStopState ATNConfig
+
+ for _, cfg := range configs.GetItems() {
+
+ _, ok := cfg.GetState().(*RuleStopState)
+
+ if ok {
+ firstConfigWithRuleStopState = cfg
+ break
+ }
+ }
+ if firstConfigWithRuleStopState != nil {
+ proposed.isAcceptState = true
+ proposed.lexerActionExecutor = firstConfigWithRuleStopState.(*LexerATNConfig).lexerActionExecutor
+ proposed.setPrediction(l.atn.ruleToTokenType[firstConfigWithRuleStopState.GetState().GetRuleIndex()])
+ }
+ hash := proposed.hash()
+ dfa := l.decisionToDFA[l.mode]
+
+ l.atn.stateMu.Lock()
+ defer l.atn.stateMu.Unlock()
+ existing, ok := dfa.getState(hash)
+ if ok {
+ proposed = existing
+ } else {
+ proposed.stateNumber = dfa.numStates()
+ configs.SetReadOnly(true)
+ proposed.configs = configs
+ dfa.setState(hash, proposed)
+ }
+ if !suppressEdge {
+ dfa.setS0(proposed)
+ }
+ return proposed
+}
+
+func (l *LexerATNSimulator) getDFA(mode int) *DFA {
+ return l.decisionToDFA[mode]
+}
+
+// Get the text Matched so far for the current token.
+func (l *LexerATNSimulator) GetText(input CharStream) string {
+ // index is first lookahead char, don't include.
+ return input.GetTextFromInterval(NewInterval(l.startIndex, input.Index()-1))
+}
+
+func (l *LexerATNSimulator) Consume(input CharStream) {
+ curChar := input.LA(1)
+ if curChar == int('\n') {
+ l.Line++
+ l.CharPositionInLine = 0
+ } else {
+ l.CharPositionInLine++
+ }
+ input.Consume()
+}
+
+func (l *LexerATNSimulator) GetCharPositionInLine() int {
+ return l.CharPositionInLine
+}
+
+func (l *LexerATNSimulator) GetLine() int {
+ return l.Line
+}
+
+func (l *LexerATNSimulator) GetTokenName(tt int) string {
+ if tt == -1 {
+ return "EOF"
+ }
+
+ var sb strings.Builder
+ sb.Grow(6)
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(tt))
+ sb.WriteByte('\'')
+
+ return sb.String()
+}
+
+func resetSimState(sim *SimState) {
+ sim.index = -1
+ sim.line = 0
+ sim.column = -1
+ sim.dfaState = nil
+}
+
+type SimState struct {
+ index int
+ line int
+ column int
+ dfaState *DFAState
+}
+
+func NewSimState() *SimState {
+ s := new(SimState)
+ resetSimState(s)
+ return s
+}
+
+func (s *SimState) reset() {
+ resetSimState(s)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
new file mode 100644
index 000000000..6ffb37de6
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/ll1_analyzer.go
@@ -0,0 +1,212 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type LL1Analyzer struct {
+ atn *ATN
+}
+
+func NewLL1Analyzer(atn *ATN) *LL1Analyzer {
+ la := new(LL1Analyzer)
+ la.atn = atn
+ return la
+}
+
+//* Special value added to the lookahead sets to indicate that we hit
+// a predicate during analysis if {@code seeThruPreds==false}.
+///
+const (
+ LL1AnalyzerHitPred = TokenInvalidType
+)
+
+//*
+// Calculates the SLL(1) expected lookahead set for each outgoing transition
+// of an {@link ATNState}. The returned array has one element for each
+// outgoing transition in {@code s}. If the closure from transition
+// i leads to a semantic predicate before Matching a symbol, the
+// element at index i of the result will be {@code nil}.
+//
+// @param s the ATN state
+// @return the expected symbols for each outgoing transition of {@code s}.
+func (la *LL1Analyzer) getDecisionLookahead(s ATNState) []*IntervalSet {
+ if s == nil {
+ return nil
+ }
+ count := len(s.GetTransitions())
+ look := make([]*IntervalSet, count)
+ for alt := 0; alt < count; alt++ {
+ look[alt] = NewIntervalSet()
+ lookBusy := newArray2DHashSet(nil, nil)
+ seeThruPreds := false // fail to get lookahead upon pred
+ la.look1(s.GetTransitions()[alt].getTarget(), nil, BasePredictionContextEMPTY, look[alt], lookBusy, NewBitSet(), seeThruPreds, false)
+ // Wipe out lookahead for la alternative if we found nothing
+ // or we had a predicate when we !seeThruPreds
+ if look[alt].length() == 0 || look[alt].contains(LL1AnalyzerHitPred) {
+ look[alt] = nil
+ }
+ }
+ return look
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+// If {@code ctx} is {@code nil} and the end of the rule containing
+// {@code s} is reached, {@link Token//EPSILON} is added to the result set.
+// If {@code ctx} is not {@code nil} and the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx the complete parser context, or {@code nil} if the context
+// should be ignored
+//
+// @return The set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+///
+func (la *LL1Analyzer) Look(s, stopState ATNState, ctx RuleContext) *IntervalSet {
+ r := NewIntervalSet()
+ seeThruPreds := true // ignore preds get all lookahead
+ var lookContext PredictionContext
+ if ctx != nil {
+ lookContext = predictionContextFromRuleContext(s.GetATN(), ctx)
+ }
+ la.look1(s, stopState, lookContext, r, newArray2DHashSet(nil, nil), NewBitSet(), seeThruPreds, true)
+ return r
+}
+
+//*
+// Compute set of tokens that can follow {@code s} in the ATN in the
+// specified {@code ctx}.
+//
+// If {@code ctx} is {@code nil} and {@code stopState} or the end of the
+// rule containing {@code s} is reached, {@link Token//EPSILON} is added to
+// the result set. If {@code ctx} is not {@code nil} and {@code addEOF} is
+// {@code true} and {@code stopState} or the end of the outermost rule is
+// reached, {@link Token//EOF} is added to the result set.
+//
+// @param s the ATN state.
+// @param stopState the ATN state to stop at. This can be a
+// {@link BlockEndState} to detect epsilon paths through a closure.
+// @param ctx The outer context, or {@code nil} if the outer context should
+// not be used.
+// @param look The result lookahead set.
+// @param lookBusy A set used for preventing epsilon closures in the ATN
+// from causing a stack overflow. Outside code should pass
+// {@code NewSet} for la argument.
+// @param calledRuleStack A set used for preventing left recursion in the
+// ATN from causing a stack overflow. Outside code should pass
+// {@code NewBitSet()} for la argument.
+// @param seeThruPreds {@code true} to true semantic predicates as
+// implicitly {@code true} and "see through them", otherwise {@code false}
+// to treat semantic predicates as opaque and add {@link //HitPred} to the
+// result if one is encountered.
+// @param addEOF Add {@link Token//EOF} to the result if the end of the
+// outermost context is reached. This parameter has no effect if {@code ctx}
+// is {@code nil}.
+
+func (la *LL1Analyzer) look2(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, i int) {
+
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look1(returnState, stopState, ctx.GetParent(i), look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
+
+func (la *LL1Analyzer) look1(s, stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool) {
+
+ c := NewBaseATNConfig6(s, 0, ctx)
+
+ if lookBusy.Contains(c) {
+ return
+ }
+
+ lookBusy.Add(c)
+
+ if s == stopState {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+ }
+
+ _, ok := s.(*RuleStopState)
+
+ if ok {
+ if ctx == nil {
+ look.addOne(TokenEpsilon)
+ return
+ } else if ctx.isEmpty() && addEOF {
+ look.addOne(TokenEOF)
+ return
+ }
+
+ if ctx != BasePredictionContextEMPTY {
+ removed := calledRuleStack.contains(s.GetRuleIndex())
+ defer func() {
+ if removed {
+ calledRuleStack.add(s.GetRuleIndex())
+ }
+ }()
+ calledRuleStack.remove(s.GetRuleIndex())
+ // run thru all possible stack tops in ctx
+ for i := 0; i < ctx.length(); i++ {
+ returnState := la.atn.states[ctx.getReturnState(i)]
+ la.look2(returnState, stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, i)
+ }
+ return
+ }
+ }
+
+ n := len(s.GetTransitions())
+
+ for i := 0; i < n; i++ {
+ t := s.GetTransitions()[i]
+
+ if t1, ok := t.(*RuleTransition); ok {
+ if calledRuleStack.contains(t1.getTarget().GetRuleIndex()) {
+ continue
+ }
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+ la.look3(stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF, t1)
+ } else if t2, ok := t.(AbstractPredicateTransition); ok {
+ if seeThruPreds {
+ la.look1(t2.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else {
+ look.addOne(LL1AnalyzerHitPred)
+ }
+ } else if t.getIsEpsilon() {
+ la.look1(t.getTarget(), stopState, ctx, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+ } else if _, ok := t.(*WildcardTransition); ok {
+ look.addRange(TokenMinUserTokenType, la.atn.maxTokenType)
+ } else {
+ set := t.getLabel()
+ if set != nil {
+ if _, ok := t.(*NotSetTransition); ok {
+ set = set.complement(TokenMinUserTokenType, la.atn.maxTokenType)
+ }
+ look.addSet(set)
+ }
+ }
+ }
+}
+
+func (la *LL1Analyzer) look3(stopState ATNState, ctx PredictionContext, look *IntervalSet, lookBusy Set, calledRuleStack *BitSet, seeThruPreds, addEOF bool, t1 *RuleTransition) {
+
+ newContext := SingletonBasePredictionContextCreate(ctx, t1.followState.GetStateNumber())
+
+ defer func() {
+ calledRuleStack.remove(t1.getTarget().GetRuleIndex())
+ }()
+
+ calledRuleStack.add(t1.getTarget().GetRuleIndex())
+ la.look1(t1.getTarget(), stopState, newContext, look, lookBusy, calledRuleStack, seeThruPreds, addEOF)
+
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
new file mode 100644
index 000000000..2ab2f5605
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser.go
@@ -0,0 +1,718 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+type Parser interface {
+ Recognizer
+
+ GetInterpreter() *ParserATNSimulator
+
+ GetTokenStream() TokenStream
+ GetTokenFactory() TokenFactory
+ GetParserRuleContext() ParserRuleContext
+ SetParserRuleContext(ParserRuleContext)
+ Consume() Token
+ GetParseListeners() []ParseTreeListener
+
+ GetErrorHandler() ErrorStrategy
+ SetErrorHandler(ErrorStrategy)
+ GetInputStream() IntStream
+ GetCurrentToken() Token
+ GetExpectedTokens() *IntervalSet
+ NotifyErrorListeners(string, Token, RecognitionException)
+ IsExpectedToken(int) bool
+ GetPrecedence() int
+ GetRuleInvocationStack(ParserRuleContext) []string
+}
+
+type BaseParser struct {
+ *BaseRecognizer
+
+ Interpreter *ParserATNSimulator
+ BuildParseTrees bool
+
+ input TokenStream
+ errHandler ErrorStrategy
+ precedenceStack IntStack
+ ctx ParserRuleContext
+
+ tracer *TraceListener
+ parseListeners []ParseTreeListener
+ _SyntaxErrors int
+}
+
+// p.is all the parsing support code essentially most of it is error
+// recovery stuff.//
+func NewBaseParser(input TokenStream) *BaseParser {
+
+ p := new(BaseParser)
+
+ p.BaseRecognizer = NewBaseRecognizer()
+
+ // The input stream.
+ p.input = nil
+ // The error handling strategy for the parser. The default value is a new
+ // instance of {@link DefaultErrorStrategy}.
+ p.errHandler = NewDefaultErrorStrategy()
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ // The {@link ParserRuleContext} object for the currently executing rule.
+ // p.is always non-nil during the parsing process.
+ p.ctx = nil
+ // Specifies whether or not the parser should construct a parse tree during
+ // the parsing process. The default value is {@code true}.
+ p.BuildParseTrees = true
+ // When {@link //setTrace}{@code (true)} is called, a reference to the
+ // {@link TraceListener} is stored here so it can be easily removed in a
+ // later call to {@link //setTrace}{@code (false)}. The listener itself is
+ // implemented as a parser listener so p.field is not directly used by
+ // other parser methods.
+ p.tracer = nil
+ // The list of {@link ParseTreeListener} listeners registered to receive
+ // events during the parse.
+ p.parseListeners = nil
+ // The number of syntax errors Reported during parsing. p.value is
+ // incremented each time {@link //NotifyErrorListeners} is called.
+ p._SyntaxErrors = 0
+ p.SetInputStream(input)
+
+ return p
+}
+
+// p.field maps from the serialized ATN string to the deserialized {@link
+// ATN} with
+// bypass alternatives.
+//
+// @see ATNDeserializationOptions//isGenerateRuleBypassTransitions()
+//
+var bypassAltsAtnCache = make(map[string]int)
+
+// reset the parser's state//
+func (p *BaseParser) reset() {
+ if p.input != nil {
+ p.input.Seek(0)
+ }
+ p.errHandler.reset(p)
+ p.ctx = nil
+ p._SyntaxErrors = 0
+ p.SetTrace(nil)
+ p.precedenceStack = make([]int, 0)
+ p.precedenceStack.Push(0)
+ if p.Interpreter != nil {
+ p.Interpreter.reset()
+ }
+}
+
+func (p *BaseParser) GetErrorHandler() ErrorStrategy {
+ return p.errHandler
+}
+
+func (p *BaseParser) SetErrorHandler(e ErrorStrategy) {
+ p.errHandler = e
+}
+
+// Match current input symbol against {@code ttype}. If the symbol type
+// Matches, {@link ANTLRErrorStrategy//ReportMatch} and {@link //consume} are
+// called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @param ttype the token type to Match
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// {@code ttype} and the error strategy could not recover from the
+// mismatched symbol
+
+func (p *BaseParser) Match(ttype int) Token {
+
+ t := p.GetCurrentToken()
+
+ if t.GetTokenType() == ttype {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+
+ return t
+}
+
+// Match current input symbol as a wildcard. If the symbol type Matches
+// (i.e. has a value greater than 0), {@link ANTLRErrorStrategy//ReportMatch}
+// and {@link //consume} are called to complete the Match process.
+//
+// If the symbol type does not Match,
+// {@link ANTLRErrorStrategy//recoverInline} is called on the current error
+// strategy to attempt recovery. If {@link //getBuildParseTree} is
+// {@code true} and the token index of the symbol returned by
+// {@link ANTLRErrorStrategy//recoverInline} is -1, the symbol is added to
+// the parse tree by calling {@link ParserRuleContext//addErrorNode}.
+//
+// @return the Matched symbol
+// @panics RecognitionException if the current input symbol did not Match
+// a wildcard and the error strategy could not recover from the mismatched
+// symbol
+
+func (p *BaseParser) MatchWildcard() Token {
+ t := p.GetCurrentToken()
+ if t.GetTokenType() > 0 {
+ p.errHandler.ReportMatch(p)
+ p.Consume()
+ } else {
+ t = p.errHandler.RecoverInline(p)
+ if p.BuildParseTrees && t.GetTokenIndex() == -1 {
+ // we must have conjured up a Newtoken during single token
+ // insertion
+ // if it's not the current symbol
+ p.ctx.AddErrorNode(t)
+ }
+ }
+ return t
+}
+
+func (p *BaseParser) GetParserRuleContext() ParserRuleContext {
+ return p.ctx
+}
+
+func (p *BaseParser) SetParserRuleContext(v ParserRuleContext) {
+ p.ctx = v
+}
+
+func (p *BaseParser) GetParseListeners() []ParseTreeListener {
+ if p.parseListeners == nil {
+ return make([]ParseTreeListener, 0)
+ }
+ return p.parseListeners
+}
+
+// Registers {@code listener} to receive events during the parsing process.
+//
+// To support output-preserving grammar transformations (including but not
+// limited to left-recursion removal, automated left-factoring, and
+// optimized code generation), calls to listener methods during the parse
+// may differ substantially from calls made by
+// {@link ParseTreeWalker//DEFAULT} used after the parse is complete. In
+// particular, rule entry and exit events may occur in a different order
+// during the parse than after the parser. In addition, calls to certain
+// rule entry methods may be omitted.
+//
+// With the following specific exceptions, calls to listener events are
+// deterministic, i.e. for identical input the calls to listener
+// methods will be the same.
+//
+//
+// - Alterations to the grammar used to generate code may change the
+// behavior of the listener calls.
+// - Alterations to the command line options passed to ANTLR 4 when
+// generating the parser may change the behavior of the listener calls.
+// - Changing the version of the ANTLR Tool used to generate the parser
+// may change the behavior of the listener calls.
+//
+//
+// @param listener the listener to add
+//
+// @panics nilPointerException if {@code} listener is {@code nil}
+//
+func (p *BaseParser) AddParseListener(listener ParseTreeListener) {
+ if listener == nil {
+ panic("listener")
+ }
+ if p.parseListeners == nil {
+ p.parseListeners = make([]ParseTreeListener, 0)
+ }
+ p.parseListeners = append(p.parseListeners, listener)
+}
+
+//
+// Remove {@code listener} from the list of parse listeners.
+//
+// If {@code listener} is {@code nil} or has not been added as a parse
+// listener, p.method does nothing.
+// @param listener the listener to remove
+//
+func (p *BaseParser) RemoveParseListener(listener ParseTreeListener) {
+
+ if p.parseListeners != nil {
+
+ idx := -1
+ for i, v := range p.parseListeners {
+ if v == listener {
+ idx = i
+ break
+ }
+ }
+
+ if idx == -1 {
+ return
+ }
+
+ // remove the listener from the slice
+ p.parseListeners = append(p.parseListeners[0:idx], p.parseListeners[idx+1:]...)
+
+ if len(p.parseListeners) == 0 {
+ p.parseListeners = nil
+ }
+ }
+}
+
+// Remove all parse listeners.
+func (p *BaseParser) removeParseListeners() {
+ p.parseListeners = nil
+}
+
+// Notify any parse listeners of an enter rule event.
+func (p *BaseParser) TriggerEnterRuleEvent() {
+ if p.parseListeners != nil {
+ ctx := p.ctx
+ for _, listener := range p.parseListeners {
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+ }
+ }
+}
+
+//
+// Notify any parse listeners of an exit rule event.
+//
+// @see //addParseListener
+//
+func (p *BaseParser) TriggerExitRuleEvent() {
+ if p.parseListeners != nil {
+ // reverse order walk of listeners
+ ctx := p.ctx
+ l := len(p.parseListeners) - 1
+
+ for i := range p.parseListeners {
+ listener := p.parseListeners[l-i]
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+ }
+ }
+}
+
+func (p *BaseParser) GetInterpreter() *ParserATNSimulator {
+ return p.Interpreter
+}
+
+func (p *BaseParser) GetATN() *ATN {
+ return p.Interpreter.atn
+}
+
+func (p *BaseParser) GetTokenFactory() TokenFactory {
+ return p.input.GetTokenSource().GetTokenFactory()
+}
+
+// Tell our token source and error strategy about a Newway to create tokens.//
+func (p *BaseParser) setTokenFactory(factory TokenFactory) {
+ p.input.GetTokenSource().setTokenFactory(factory)
+}
+
+// The ATN with bypass alternatives is expensive to create so we create it
+// lazily.
+//
+// @panics UnsupportedOperationException if the current parser does not
+// implement the {@link //getSerializedATN()} method.
+//
+func (p *BaseParser) GetATNWithBypassAlts() {
+
+ // TODO
+ panic("Not implemented!")
+
+ // serializedAtn := p.getSerializedATN()
+ // if (serializedAtn == nil) {
+ // panic("The current parser does not support an ATN with bypass alternatives.")
+ // }
+ // result := p.bypassAltsAtnCache[serializedAtn]
+ // if (result == nil) {
+ // deserializationOptions := NewATNDeserializationOptions(nil)
+ // deserializationOptions.generateRuleBypassTransitions = true
+ // result = NewATNDeserializer(deserializationOptions).deserialize(serializedAtn)
+ // p.bypassAltsAtnCache[serializedAtn] = result
+ // }
+ // return result
+}
+
+// The preferred method of getting a tree pattern. For example, here's a
+// sample use:
+//
+//
+// ParseTree t = parser.expr()
+// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0",
+// MyParser.RULE_expr)
+// ParseTreeMatch m = p.Match(t)
+// String id = m.Get("ID")
+//
+
+func (p *BaseParser) compileParseTreePattern(pattern, patternRuleIndex, lexer Lexer) {
+
+ panic("NewParseTreePatternMatcher not implemented!")
+ //
+ // if (lexer == nil) {
+ // if (p.GetTokenStream() != nil) {
+ // tokenSource := p.GetTokenStream().GetTokenSource()
+ // if _, ok := tokenSource.(ILexer); ok {
+ // lexer = tokenSource
+ // }
+ // }
+ // }
+ // if (lexer == nil) {
+ // panic("Parser can't discover a lexer to use")
+ // }
+
+ // m := NewParseTreePatternMatcher(lexer, p)
+ // return m.compile(pattern, patternRuleIndex)
+}
+
+func (p *BaseParser) GetInputStream() IntStream {
+ return p.GetTokenStream()
+}
+
+func (p *BaseParser) SetInputStream(input TokenStream) {
+ p.SetTokenStream(input)
+}
+
+func (p *BaseParser) GetTokenStream() TokenStream {
+ return p.input
+}
+
+// Set the token stream and reset the parser.//
+func (p *BaseParser) SetTokenStream(input TokenStream) {
+ p.input = nil
+ p.reset()
+ p.input = input
+}
+
+// Match needs to return the current input symbol, which gets put
+// into the label for the associated token ref e.g., x=ID.
+//
+func (p *BaseParser) GetCurrentToken() Token {
+ return p.input.LT(1)
+}
+
+func (p *BaseParser) NotifyErrorListeners(msg string, offendingToken Token, err RecognitionException) {
+ if offendingToken == nil {
+ offendingToken = p.GetCurrentToken()
+ }
+ p._SyntaxErrors++
+ line := offendingToken.GetLine()
+ column := offendingToken.GetColumn()
+ listener := p.GetErrorListenerDispatch()
+ listener.SyntaxError(p, offendingToken, line, column, msg, err)
+}
+
+func (p *BaseParser) Consume() Token {
+ o := p.GetCurrentToken()
+ if o.GetTokenType() != TokenEOF {
+ p.GetInputStream().Consume()
+ }
+ hasListener := p.parseListeners != nil && len(p.parseListeners) > 0
+ if p.BuildParseTrees || hasListener {
+ if p.errHandler.InErrorRecoveryMode(p) {
+ node := p.ctx.AddErrorNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitErrorNode(node)
+ }
+ }
+
+ } else {
+ node := p.ctx.AddTokenNode(o)
+ if p.parseListeners != nil {
+ for _, l := range p.parseListeners {
+ l.VisitTerminal(node)
+ }
+ }
+ }
+ // node.invokingState = p.state
+ }
+
+ return o
+}
+
+func (p *BaseParser) addContextToParseTree() {
+ // add current context to parent if we have a parent
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).AddChild(p.ctx)
+ }
+}
+
+func (p *BaseParser) EnterRule(localctx ParserRuleContext, state, ruleIndex int) {
+ p.SetState(state)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.BuildParseTrees {
+ p.addContextToParseTree()
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent()
+ }
+}
+
+func (p *BaseParser) ExitRule() {
+ p.ctx.SetStop(p.input.LT(-1))
+ // trigger event on ctx, before it reverts to parent
+ if p.parseListeners != nil {
+ p.TriggerExitRuleEvent()
+ }
+ p.SetState(p.ctx.GetInvokingState())
+ if p.ctx.GetParent() != nil {
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ } else {
+ p.ctx = nil
+ }
+}
+
+func (p *BaseParser) EnterOuterAlt(localctx ParserRuleContext, altNum int) {
+ localctx.SetAltNumber(altNum)
+ // if we have Newlocalctx, make sure we replace existing ctx
+ // that is previous child of parse tree
+ if p.BuildParseTrees && p.ctx != localctx {
+ if p.ctx.GetParent() != nil {
+ p.ctx.GetParent().(ParserRuleContext).RemoveLastChild()
+ p.ctx.GetParent().(ParserRuleContext).AddChild(localctx)
+ }
+ }
+ p.ctx = localctx
+}
+
+// Get the precedence level for the top-most precedence rule.
+//
+// @return The precedence level for the top-most precedence rule, or -1 if
+// the parser context is not nested within a precedence rule.
+
+func (p *BaseParser) GetPrecedence() int {
+ if len(p.precedenceStack) == 0 {
+ return -1
+ }
+
+ return p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) EnterRecursionRule(localctx ParserRuleContext, state, ruleIndex, precedence int) {
+ p.SetState(state)
+ p.precedenceStack.Push(precedence)
+ p.ctx = localctx
+ p.ctx.SetStart(p.input.LT(1))
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+//
+// Like {@link //EnterRule} but for recursive rules.
+
+func (p *BaseParser) PushNewRecursionContext(localctx ParserRuleContext, state, ruleIndex int) {
+ previous := p.ctx
+ previous.SetParent(localctx)
+ previous.SetInvokingState(state)
+ previous.SetStop(p.input.LT(-1))
+
+ p.ctx = localctx
+ p.ctx.SetStart(previous.GetStart())
+ if p.BuildParseTrees {
+ p.ctx.AddChild(previous)
+ }
+ if p.parseListeners != nil {
+ p.TriggerEnterRuleEvent() // simulates rule entry for
+ // left-recursive rules
+ }
+}
+
+func (p *BaseParser) UnrollRecursionContexts(parentCtx ParserRuleContext) {
+ p.precedenceStack.Pop()
+ p.ctx.SetStop(p.input.LT(-1))
+ retCtx := p.ctx // save current ctx (return value)
+ // unroll so ctx is as it was before call to recursive method
+ if p.parseListeners != nil {
+ for p.ctx != parentCtx {
+ p.TriggerExitRuleEvent()
+ p.ctx = p.ctx.GetParent().(ParserRuleContext)
+ }
+ } else {
+ p.ctx = parentCtx
+ }
+ // hook into tree
+ retCtx.SetParent(parentCtx)
+ if p.BuildParseTrees && parentCtx != nil {
+ // add return ctx into invoking rule's tree
+ parentCtx.AddChild(retCtx)
+ }
+}
+
+func (p *BaseParser) GetInvokingContext(ruleIndex int) ParserRuleContext {
+ ctx := p.ctx
+ for ctx != nil {
+ if ctx.GetRuleIndex() == ruleIndex {
+ return ctx
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ return nil
+}
+
+func (p *BaseParser) Precpred(localctx RuleContext, precedence int) bool {
+ return precedence >= p.precedenceStack[len(p.precedenceStack)-1]
+}
+
+func (p *BaseParser) inContext(context ParserRuleContext) bool {
+ // TODO: useful in parser?
+ return false
+}
+
+//
+// Checks whether or not {@code symbol} can follow the current state in the
+// ATN. The behavior of p.method is equivalent to the following, but is
+// implemented such that the complete context-sensitive follow set does not
+// need to be explicitly constructed.
+//
+//
+// return getExpectedTokens().contains(symbol)
+//
+//
+// @param symbol the symbol type to check
+// @return {@code true} if {@code symbol} can follow the current state in
+// the ATN, otherwise {@code false}.
+
+func (p *BaseParser) IsExpectedToken(symbol int) bool {
+ atn := p.Interpreter.atn
+ ctx := p.ctx
+ s := atn.states[p.state]
+ following := atn.NextTokens(s, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ if !following.contains(TokenEpsilon) {
+ return false
+ }
+ for ctx != nil && ctx.GetInvokingState() >= 0 && following.contains(TokenEpsilon) {
+ invokingState := atn.states[ctx.GetInvokingState()]
+ rt := invokingState.GetTransitions()[0]
+ following = atn.NextTokens(rt.(*RuleTransition).followState, nil)
+ if following.contains(symbol) {
+ return true
+ }
+ ctx = ctx.GetParent().(ParserRuleContext)
+ }
+ if following.contains(TokenEpsilon) && symbol == TokenEOF {
+ return true
+ }
+
+ return false
+}
+
+// Computes the set of input symbols which could follow the current parser
+// state and context, as given by {@link //GetState} and {@link //GetContext},
+// respectively.
+//
+// @see ATN//getExpectedTokens(int, RuleContext)
+//
+func (p *BaseParser) GetExpectedTokens() *IntervalSet {
+ return p.Interpreter.atn.getExpectedTokens(p.state, p.ctx)
+}
+
+func (p *BaseParser) GetExpectedTokensWithinCurrentRule() *IntervalSet {
+ atn := p.Interpreter.atn
+ s := atn.states[p.state]
+ return atn.NextTokens(s, nil)
+}
+
+// Get a rule's index (i.e., {@code RULE_ruleName} field) or -1 if not found.//
+func (p *BaseParser) GetRuleIndex(ruleName string) int {
+ var ruleIndex, ok = p.GetRuleIndexMap()[ruleName]
+ if ok {
+ return ruleIndex
+ }
+
+ return -1
+}
+
+// Return List<String> of the rule names in your parser instance
+// leading up to a call to the current rule. You could override if
+// you want more details such as the file/line info of where
+// in the ATN a rule is invoked.
+//
+// this very useful for error messages.
+
+func (p *BaseParser) GetRuleInvocationStack(c ParserRuleContext) []string {
+ if c == nil {
+ c = p.ctx
+ }
+ stack := make([]string, 0)
+ for c != nil {
+ // compute what follows who invoked us
+ ruleIndex := c.GetRuleIndex()
+ if ruleIndex < 0 {
+ stack = append(stack, "n/a")
+ } else {
+ stack = append(stack, p.GetRuleNames()[ruleIndex])
+ }
+
+ vp := c.GetParent()
+
+ if vp == nil {
+ break
+ }
+
+ c = vp.(ParserRuleContext)
+ }
+ return stack
+}
+
+// For debugging and other purposes.//
+func (p *BaseParser) GetDFAStrings() string {
+ return fmt.Sprint(p.Interpreter.decisionToDFA)
+}
+
+// For debugging and other purposes.//
+func (p *BaseParser) DumpDFA() {
+ seenOne := false
+ for _, dfa := range p.Interpreter.decisionToDFA {
+ if dfa.numStates() > 0 {
+ if seenOne {
+ fmt.Println()
+ }
+ fmt.Println("Decision " + strconv.Itoa(dfa.decision) + ":")
+ fmt.Print(dfa.String(p.LiteralNames, p.SymbolicNames))
+ seenOne = true
+ }
+ }
+}
+
+func (p *BaseParser) GetSourceName() string {
+ return p.GrammarFileName
+}
+
+// During a parse is sometimes useful to listen in on the rule entry and exit
+// events as well as token Matches. p.is for quick and dirty debugging.
+//
+func (p *BaseParser) SetTrace(trace *TraceListener) {
+ if trace == nil {
+ p.RemoveParseListener(p.tracer)
+ p.tracer = nil
+ } else {
+ if p.tracer != nil {
+ p.RemoveParseListener(p.tracer)
+ }
+ p.tracer = NewTraceListener(p)
+ p.AddParseListener(p.tracer)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
new file mode 100644
index 000000000..888d51297
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_atn_simulator.go
@@ -0,0 +1,1544 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+var (
+ ParserATNSimulatorDebug = false
+ ParserATNSimulatorListATNDecisions = false
+ ParserATNSimulatorDFADebug = false
+ ParserATNSimulatorRetryDebug = false
+ TurnOffLRLoopEntryBranchOpt = false
+)
+
+type ParserATNSimulator struct {
+ *BaseATNSimulator
+
+ parser Parser
+ predictionMode int
+ input TokenStream
+ startIndex int
+ dfa *DFA
+ mergeCache *DoubleDict
+ outerContext ParserRuleContext
+}
+
+func NewParserATNSimulator(parser Parser, atn *ATN, decisionToDFA []*DFA, sharedContextCache *PredictionContextCache) *ParserATNSimulator {
+
+ p := new(ParserATNSimulator)
+
+ p.BaseATNSimulator = NewBaseATNSimulator(atn, sharedContextCache)
+
+ p.parser = parser
+ p.decisionToDFA = decisionToDFA
+ // SLL, LL, or LL + exact ambig detection?//
+ p.predictionMode = PredictionModeLL
+ // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
+ p.input = nil
+ p.startIndex = 0
+ p.outerContext = nil
+ p.dfa = nil
+ // Each prediction operation uses a cache for merge of prediction contexts.
+ // Don't keep around as it wastes huge amounts of memory. DoubleKeyMap
+ // isn't Synchronized but we're ok since two threads shouldn't reuse same
+ // parser/atnsim object because it can only handle one input at a time.
+ // This maps graphs a and b to merged result c. (a,b)&rarrc. We can avoid
+ // the merge if we ever see a and b again. Note that (b,a)&rarrc should
+ // also be examined during cache lookup.
+ //
+ p.mergeCache = nil
+
+ return p
+}
+
+func (p *ParserATNSimulator) GetPredictionMode() int {
+ return p.predictionMode
+}
+
+func (p *ParserATNSimulator) SetPredictionMode(v int) {
+ p.predictionMode = v
+}
+
+func (p *ParserATNSimulator) reset() {
+}
+
+func (p *ParserATNSimulator) AdaptivePredict(input TokenStream, decision int, outerContext ParserRuleContext) int {
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("AdaptivePredict decision " + strconv.Itoa(decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" +
+ strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ p.input = input
+ p.startIndex = input.Index()
+ p.outerContext = outerContext
+
+ dfa := p.decisionToDFA[decision]
+ p.dfa = dfa
+ m := input.Mark()
+ index := input.Index()
+
+ defer func() {
+ p.dfa = nil
+ p.mergeCache = nil // wack cache after each prediction
+ input.Seek(index)
+ input.Release(m)
+ }()
+
+ // Now we are certain to have a specific decision's DFA
+ // But, do we still need an initial state?
+ var s0 *DFAState
+ p.atn.stateMu.RLock()
+ if dfa.getPrecedenceDfa() {
+ p.atn.edgeMu.RLock()
+ // the start state for a precedence DFA depends on the current
+ // parser precedence, and is provided by a DFA method.
+ s0 = dfa.getPrecedenceStartState(p.parser.GetPrecedence())
+ p.atn.edgeMu.RUnlock()
+ } else {
+ // the start state for a "regular" DFA is just s0
+ s0 = dfa.getS0()
+ }
+ p.atn.stateMu.RUnlock()
+
+ if s0 == nil {
+ if outerContext == nil {
+ outerContext = RuleContextEmpty
+ }
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("predictATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ ", outerContext=" + outerContext.String(p.parser.GetRuleNames(), nil))
+ }
+ fullCtx := false
+ s0Closure := p.computeStartState(dfa.atnStartState, RuleContextEmpty, fullCtx)
+
+ p.atn.stateMu.Lock()
+ if dfa.getPrecedenceDfa() {
+ // If p is a precedence DFA, we use applyPrecedenceFilter
+ // to convert the computed start state to a precedence start
+ // state. We then use DFA.setPrecedenceStartState to set the
+ // appropriate start state for the precedence level rather
+ // than simply setting DFA.s0.
+ //
+ dfa.s0.configs = s0Closure
+ s0Closure = p.applyPrecedenceFilter(s0Closure)
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ p.atn.edgeMu.Lock()
+ dfa.setPrecedenceStartState(p.parser.GetPrecedence(), s0)
+ p.atn.edgeMu.Unlock()
+ } else {
+ s0 = p.addDFAState(dfa, NewDFAState(-1, s0Closure))
+ dfa.setS0(s0)
+ }
+ p.atn.stateMu.Unlock()
+ }
+
+ alt := p.execATN(dfa, s0, input, index, outerContext)
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA after predictATN: " + dfa.String(p.parser.GetLiteralNames(), nil))
+ }
+ return alt
+
+}
+
+// Performs ATN simulation to compute a predicted alternative based
+// upon the remaining input, but also updates the DFA cache to avoid
+// having to traverse the ATN again for the same input sequence.
+
+// There are some key conditions we're looking for after computing a new
+// set of ATN configs (proposed DFA state):
+// if the set is empty, there is no viable alternative for current symbol
+// does the state uniquely predict an alternative?
+// does the state have a conflict that would prevent us from
+// putting it on the work list?
+
+// We also have some key operations to do:
+// add an edge from previous DFA state to potentially NewDFA state, D,
+// upon current symbol but only if adding to work list, which means in all
+// cases except no viable alternative (and possibly non-greedy decisions?)
+// collecting predicates and adding semantic context to DFA accept states
+// adding rule context to context-sensitive DFA accept states
+// consuming an input symbol
+// Reporting a conflict
+// Reporting an ambiguity
+// Reporting a context sensitivity
+// Reporting insufficient predicates
+
+// cover these cases:
+// dead end
+// single alt
+// single alt + preds
+// conflict
+// conflict + preds
+//
+func (p *ParserATNSimulator) execATN(dfa *DFA, s0 *DFAState, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("execATN decision " + strconv.Itoa(dfa.decision) +
+ " exec LA(1)==" + p.getLookaheadName(input) +
+ " line " + strconv.Itoa(input.LT(1).GetLine()) + ":" + strconv.Itoa(input.LT(1).GetColumn()))
+ }
+
+ previousD := s0
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("s0 = " + s0.String())
+ }
+ t := input.LA(1)
+ for { // for more work
+ D := p.getExistingTargetState(previousD, t)
+ if D == nil {
+ D = p.computeTargetState(dfa, previousD, t)
+ }
+ if D == ATNSimulatorError {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for SLL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previousD.configs, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD.configs, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt
+ }
+
+ panic(e)
+ }
+ if D.requiresFullContext && p.predictionMode != PredictionModeSLL {
+ // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
+ conflictingAlts := D.configs.GetConflictingAlts()
+ if D.predicates != nil {
+ if ParserATNSimulatorDebug {
+ fmt.Println("DFA state has preds in DFA sim LL failover")
+ }
+ conflictIndex := input.Index()
+ if conflictIndex != startIndex {
+ input.Seek(startIndex)
+ }
+ conflictingAlts = p.evalSemanticContext(D.predicates, outerContext, true)
+ if conflictingAlts.length() == 1 {
+ if ParserATNSimulatorDebug {
+ fmt.Println("Full LL avoided")
+ }
+ return conflictingAlts.minValue()
+ }
+ if conflictIndex != startIndex {
+ // restore the index so Reporting the fallback to full
+ // context occurs with the index at the correct spot
+ input.Seek(conflictIndex)
+ }
+ }
+ if ParserATNSimulatorDFADebug {
+ fmt.Println("ctx sensitive state " + outerContext.String(nil, nil) + " in " + D.String())
+ }
+ fullCtx := true
+ s0Closure := p.computeStartState(dfa.atnStartState, outerContext, fullCtx)
+ p.ReportAttemptingFullContext(dfa, conflictingAlts, D.configs, startIndex, input.Index())
+ alt := p.execATNWithFullContext(dfa, D, s0Closure, input, startIndex, outerContext)
+ return alt
+ }
+ if D.isAcceptState {
+ if D.predicates == nil {
+ return D.prediction
+ }
+ stopIndex := input.Index()
+ input.Seek(startIndex)
+ alts := p.evalSemanticContext(D.predicates, outerContext, true)
+
+ switch alts.length() {
+ case 0:
+ panic(p.noViableAlt(input, outerContext, D.configs, startIndex))
+ case 1:
+ return alts.minValue()
+ default:
+ // Report ambiguity after predicate evaluation to make sure the correct set of ambig alts is Reported.
+ p.ReportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D.configs)
+ return alts.minValue()
+ }
+ }
+ previousD = D
+
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+
+ panic("Should not have reached p state")
+}
+
+// Get an existing target state for an edge in the DFA. If the target state
+// for the edge has not yet been computed or is otherwise not available,
+// p method returns {@code nil}.
+//
+// @param previousD The current DFA state
+// @param t The next input symbol
+// @return The existing target DFA state for the given input symbol
+// {@code t}, or {@code nil} if the target state for p edge is not
+// already cached
+
+func (p *ParserATNSimulator) getExistingTargetState(previousD *DFAState, t int) *DFAState {
+ if t+1 < 0 {
+ return nil
+ }
+
+ p.atn.edgeMu.RLock()
+ defer p.atn.edgeMu.RUnlock()
+ edges := previousD.getEdges()
+ if edges == nil || t+1 >= len(edges) {
+ return nil
+ }
+ return previousD.getIthEdge(t + 1)
+}
+
+// Compute a target state for an edge in the DFA, and attempt to add the
+// computed state and corresponding edge to the DFA.
+//
+// @param dfa The DFA
+// @param previousD The current DFA state
+// @param t The next input symbol
+//
+// @return The computed target DFA state for the given input symbol
+// {@code t}. If {@code t} does not lead to a valid DFA state, p method
+// returns {@link //ERROR}.
+
+func (p *ParserATNSimulator) computeTargetState(dfa *DFA, previousD *DFAState, t int) *DFAState {
+ reach := p.computeReachSet(previousD.configs, t, false)
+
+ if reach == nil {
+ p.addDFAEdge(dfa, previousD, t, ATNSimulatorError)
+ return ATNSimulatorError
+ }
+ // create Newtarget state we'll add to DFA after it's complete
+ D := NewDFAState(-1, reach)
+
+ predictedAlt := p.getUniqueAlt(reach)
+
+ if ParserATNSimulatorDebug {
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ fmt.Println("SLL altSubSets=" + fmt.Sprint(altSubSets) +
+ ", previous=" + previousD.configs.String() +
+ ", configs=" + reach.String() +
+ ", predict=" + strconv.Itoa(predictedAlt) +
+ ", allSubsetsConflict=" +
+ fmt.Sprint(PredictionModeallSubsetsConflict(altSubSets)) +
+ ", conflictingAlts=" + p.getConflictingAlts(reach).String())
+ }
+ if predictedAlt != ATNInvalidAltNumber {
+ // NO CONFLICT, UNIQUELY PREDICTED ALT
+ D.isAcceptState = true
+ D.configs.SetUniqueAlt(predictedAlt)
+ D.setPrediction(predictedAlt)
+ } else if PredictionModehasSLLConflictTerminatingPrediction(p.predictionMode, reach) {
+ // MORE THAN ONE VIABLE ALTERNATIVE
+ D.configs.SetConflictingAlts(p.getConflictingAlts(reach))
+ D.requiresFullContext = true
+ // in SLL-only mode, we will stop at p state and return the minimum alt
+ D.isAcceptState = true
+ D.setPrediction(D.configs.GetConflictingAlts().minValue())
+ }
+ if D.isAcceptState && D.configs.HasSemanticContext() {
+ p.predicateDFAState(D, p.atn.getDecisionState(dfa.decision))
+ if D.predicates != nil {
+ D.setPrediction(ATNInvalidAltNumber)
+ }
+ }
+ // all adds to dfa are done after we've created full D state
+ D = p.addDFAEdge(dfa, previousD, t, D)
+ return D
+}
+
+func (p *ParserATNSimulator) predicateDFAState(dfaState *DFAState, decisionState DecisionState) {
+ // We need to test all predicates, even in DFA states that
+ // uniquely predict alternative.
+ nalts := len(decisionState.GetTransitions())
+ // Update DFA so reach becomes accept state with (predicate,alt)
+ // pairs if preds found for conflicting alts
+ altsToCollectPredsFrom := p.getConflictingAltsOrUniqueAlt(dfaState.configs)
+ altToPred := p.getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState.configs, nalts)
+ if altToPred != nil {
+ dfaState.predicates = p.getPredicatePredictions(altsToCollectPredsFrom, altToPred)
+ dfaState.setPrediction(ATNInvalidAltNumber) // make sure we use preds
+ } else {
+ // There are preds in configs but they might go away
+ // when OR'd together like {p}? || NONE == NONE. If neither
+ // alt has preds, resolve to min alt
+ dfaState.setPrediction(altsToCollectPredsFrom.minValue())
+ }
+}
+
+// comes back with reach.uniqueAlt set to a valid alt
+func (p *ParserATNSimulator) execATNWithFullContext(dfa *DFA, D *DFAState, s0 ATNConfigSet, input TokenStream, startIndex int, outerContext ParserRuleContext) int {
+
+ if ParserATNSimulatorDebug || ParserATNSimulatorListATNDecisions {
+ fmt.Println("execATNWithFullContext " + s0.String())
+ }
+
+ fullCtx := true
+ foundExactAmbig := false
+ var reach ATNConfigSet
+ previous := s0
+ input.Seek(startIndex)
+ t := input.LA(1)
+ predictedAlt := -1
+
+ for { // for more work
+ reach = p.computeReachSet(previous, t, fullCtx)
+ if reach == nil {
+ // if any configs in previous dipped into outer context, that
+ // means that input up to t actually finished entry rule
+ // at least for LL decision. Full LL doesn't dip into outer
+ // so don't need special case.
+ // We will get an error no matter what so delay until after
+ // decision better error message. Also, no reachable target
+ // ATN states in SLL implies LL will also get nowhere.
+ // If conflict in states that dip out, choose min since we
+ // will get error no matter what.
+ e := p.noViableAlt(input, outerContext, previous, startIndex)
+ input.Seek(startIndex)
+ alt := p.getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext)
+ if alt != ATNInvalidAltNumber {
+ return alt
+ }
+
+ panic(e)
+ }
+ altSubSets := PredictionModegetConflictingAltSubsets(reach)
+ if ParserATNSimulatorDebug {
+ fmt.Println("LL altSubSets=" + fmt.Sprint(altSubSets) + ", predict=" +
+ strconv.Itoa(PredictionModegetUniqueAlt(altSubSets)) + ", resolvesToJustOneViableAlt=" +
+ fmt.Sprint(PredictionModeresolvesToJustOneViableAlt(altSubSets)))
+ }
+ reach.SetUniqueAlt(p.getUniqueAlt(reach))
+ // unique prediction?
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ predictedAlt = reach.GetUniqueAlt()
+ break
+ }
+ if p.predictionMode != PredictionModeLLExactAmbigDetection {
+ predictedAlt = PredictionModeresolvesToJustOneViableAlt(altSubSets)
+ if predictedAlt != ATNInvalidAltNumber {
+ break
+ }
+ } else {
+ // In exact ambiguity mode, we never try to terminate early.
+ // Just keeps scarfing until we know what the conflict is
+ if PredictionModeallSubsetsConflict(altSubSets) && PredictionModeallSubsetsEqual(altSubSets) {
+ foundExactAmbig = true
+ predictedAlt = PredictionModegetSingleViableAlt(altSubSets)
+ break
+ }
+ // else there are multiple non-conflicting subsets or
+ // we're not sure what the ambiguity is yet.
+ // So, keep going.
+ }
+ previous = reach
+ if t != TokenEOF {
+ input.Consume()
+ t = input.LA(1)
+ }
+ }
+ // If the configuration set uniquely predicts an alternative,
+ // without conflict, then we know that it's a full LL decision
+ // not SLL.
+ if reach.GetUniqueAlt() != ATNInvalidAltNumber {
+ p.ReportContextSensitivity(dfa, predictedAlt, reach, startIndex, input.Index())
+ return predictedAlt
+ }
+ // We do not check predicates here because we have checked them
+ // on-the-fly when doing full context prediction.
+
+ //
+ // In non-exact ambiguity detection mode, we might actually be able to
+ // detect an exact ambiguity, but I'm not going to spend the cycles
+ // needed to check. We only emit ambiguity warnings in exact ambiguity
+ // mode.
+ //
+ // For example, we might know that we have conflicting configurations.
+ // But, that does not mean that there is no way forward without a
+ // conflict. It's possible to have nonconflicting alt subsets as in:
+
+ // altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
+
+ // from
+ //
+ // [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
+ // (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
+ //
+ // In p case, (17,1,[5 $]) indicates there is some next sequence that
+ // would resolve p without conflict to alternative 1. Any other viable
+ // next sequence, however, is associated with a conflict. We stop
+ // looking for input because no amount of further lookahead will alter
+ // the fact that we should predict alternative 1. We just can't say for
+ // sure that there is an ambiguity without looking further.
+
+ p.ReportAmbiguity(dfa, D, startIndex, input.Index(), foundExactAmbig, reach.Alts(), reach)
+
+ return predictedAlt
+}
+
+func (p *ParserATNSimulator) computeReachSet(closure ATNConfigSet, t int, fullCtx bool) ATNConfigSet {
+ if ParserATNSimulatorDebug {
+ fmt.Println("in computeReachSet, starting closure: " + closure.String())
+ }
+ if p.mergeCache == nil {
+ p.mergeCache = NewDoubleDict()
+ }
+ intermediate := NewBaseATNConfigSet(fullCtx)
+
+ // Configurations already in a rule stop state indicate reaching the end
+ // of the decision rule (local context) or end of the start rule (full
+ // context). Once reached, these configurations are never updated by a
+ // closure operation, so they are handled separately for the performance
+ // advantage of having a smaller intermediate set when calling closure.
+ //
+ // For full-context reach operations, separate handling is required to
+ // ensure that the alternative Matching the longest overall sequence is
+ // chosen when multiple such configurations can Match the input.
+
+ var skippedStopStates []*BaseATNConfig
+
+ // First figure out where we can reach on input t
+ for _, c := range closure.GetItems() {
+ if ParserATNSimulatorDebug {
+ fmt.Println("testing " + p.GetTokenName(t) + " at " + c.String())
+ }
+
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ if fullCtx || t == TokenEOF {
+ skippedStopStates = append(skippedStopStates, c.(*BaseATNConfig))
+ if ParserATNSimulatorDebug {
+ fmt.Println("added " + c.String() + " to SkippedStopStates")
+ }
+ }
+ continue
+ }
+
+ for _, trans := range c.GetState().GetTransitions() {
+ target := p.getReachableTarget(trans, t)
+ if target != nil {
+ cfg := NewBaseATNConfig4(c, target)
+ intermediate.Add(cfg, p.mergeCache)
+ if ParserATNSimulatorDebug {
+ fmt.Println("added " + cfg.String() + " to intermediate")
+ }
+ }
+ }
+ }
+
+ // Now figure out where the reach operation can take us...
+ var reach ATNConfigSet
+
+ // This block optimizes the reach operation for intermediate sets which
+ // trivially indicate a termination state for the overall
+ // AdaptivePredict operation.
+ //
+ // The conditions assume that intermediate
+ // contains all configurations relevant to the reach set, but p
+ // condition is not true when one or more configurations have been
+ // withheld in SkippedStopStates, or when the current symbol is EOF.
+ //
+ if skippedStopStates == nil && t != TokenEOF {
+ if len(intermediate.configs) == 1 {
+ // Don't pursue the closure if there is just one state.
+ // It can only have one alternative just add to result
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ } else if p.getUniqueAlt(intermediate) != ATNInvalidAltNumber {
+ // Also don't pursue the closure if there is unique alternative
+ // among the configurations.
+ reach = intermediate
+ }
+ }
+ // If the reach set could not be trivially determined, perform a closure
+ // operation on the intermediate set to compute its initial value.
+ //
+ if reach == nil {
+ reach = NewBaseATNConfigSet(fullCtx)
+ closureBusy := newArray2DHashSet(nil, nil)
+ treatEOFAsEpsilon := t == TokenEOF
+ amount := len(intermediate.configs)
+ for k := 0; k < amount; k++ {
+ p.closure(intermediate.configs[k], reach, closureBusy, false, fullCtx, treatEOFAsEpsilon)
+ }
+ }
+ if t == TokenEOF {
+ // After consuming EOF no additional input is possible, so we are
+ // only interested in configurations which reached the end of the
+ // decision rule (local context) or end of the start rule (full
+ // context). Update reach to contain only these configurations. This
+ // handles both explicit EOF transitions in the grammar and implicit
+ // EOF transitions following the end of the decision or start rule.
+ //
+ // When reach==intermediate, no closure operation was performed. In
+ // p case, removeAllConfigsNotInRuleStopState needs to check for
+ // reachable rule stop states as well as configurations already in
+ // a rule stop state.
+ //
+ // This is handled before the configurations in SkippedStopStates,
+ // because any configurations potentially added from that list are
+ // already guaranteed to meet p condition whether or not it's
+ // required.
+ //
+ reach = p.removeAllConfigsNotInRuleStopState(reach, reach == intermediate)
+ }
+ // If SkippedStopStates!=nil, then it contains at least one
+ // configuration. For full-context reach operations, these
+ // configurations reached the end of the start rule, in which case we
+ // only add them back to reach if no configuration during the current
+ // closure operation reached such a state. This ensures AdaptivePredict
+ // chooses an alternative Matching the longest overall sequence when
+ // multiple alternatives are viable.
+ //
+ if skippedStopStates != nil && ((!fullCtx) || (!PredictionModehasConfigInRuleStopState(reach))) {
+ for l := 0; l < len(skippedStopStates); l++ {
+ reach.Add(skippedStopStates[l], p.mergeCache)
+ }
+ }
+ if len(reach.GetItems()) == 0 {
+ return nil
+ }
+
+ return reach
+}
+
+//
+// Return a configuration set containing only the configurations from
+// {@code configs} which are in a {@link RuleStopState}. If all
+// configurations in {@code configs} are already in a rule stop state, p
+// method simply returns {@code configs}.
+//
+// When {@code lookToEndOfRule} is true, p method uses
+// {@link ATN//NextTokens} for each configuration in {@code configs} which is
+// not already in a rule stop state to see if a rule stop state is reachable
+// from the configuration via epsilon-only transitions.
+//
+// @param configs the configuration set to update
+// @param lookToEndOfRule when true, p method checks for rule stop states
+// reachable by epsilon-only transitions from each configuration in
+// {@code configs}.
+//
+// @return {@code configs} if all configurations in {@code configs} are in a
+// rule stop state, otherwise return a Newconfiguration set containing only
+// the configurations from {@code configs} which are in a rule stop state
+//
+func (p *ParserATNSimulator) removeAllConfigsNotInRuleStopState(configs ATNConfigSet, lookToEndOfRule bool) ATNConfigSet {
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return configs
+ }
+ result := NewBaseATNConfigSet(configs.FullContext())
+ for _, config := range configs.GetItems() {
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ result.Add(config, p.mergeCache)
+ continue
+ }
+ if lookToEndOfRule && config.GetState().GetEpsilonOnlyTransitions() {
+ NextTokens := p.atn.NextTokens(config.GetState(), nil)
+ if NextTokens.contains(TokenEpsilon) {
+ endOfRuleState := p.atn.ruleToStopState[config.GetState().GetRuleIndex()]
+ result.Add(NewBaseATNConfig4(config, endOfRuleState), p.mergeCache)
+ }
+ }
+ }
+ return result
+}
+
+func (p *ParserATNSimulator) computeStartState(a ATNState, ctx RuleContext, fullCtx bool) ATNConfigSet {
+ // always at least the implicit call to start rule
+ initialContext := predictionContextFromRuleContext(p.atn, ctx)
+ configs := NewBaseATNConfigSet(fullCtx)
+ for i := 0; i < len(a.GetTransitions()); i++ {
+ target := a.GetTransitions()[i].getTarget()
+ c := NewBaseATNConfig6(target, i+1, initialContext)
+ closureBusy := newArray2DHashSet(nil, nil)
+ p.closure(c, configs, closureBusy, true, fullCtx, false)
+ }
+ return configs
+}
+
+//
+// This method transforms the start state computed by
+// {@link //computeStartState} to the special start state used by a
+// precedence DFA for a particular precedence value. The transformation
+// process applies the following changes to the start state's configuration
+// set.
+//
+//
+// - Evaluate the precedence predicates for each configuration using
+// {@link SemanticContext//evalPrecedence}.
+// - Remove all configurations which predict an alternative greater than
+// 1, for which another configuration that predicts alternative 1 is in the
+// same ATN state with the same prediction context. This transformation is
+// valid for the following reasons:
+//
+// - The closure block cannot contain any epsilon transitions which bypass
+// the body of the closure, so all states reachable via alternative 1 are
+// part of the precedence alternatives of the transformed left-recursive
+// rule.
+// - The "primary" portion of a left recursive rule cannot contain an
+// epsilon transition, so the only way an alternative other than 1 can exist
+// in a state that is also reachable via alternative 1 is by nesting calls
+// to the left-recursive rule, with the outer calls not being at the
+// preferred precedence level.
+//
+//
+//
+//
+//
+// The prediction context must be considered by p filter to address
+// situations like the following.
+//
+//
+//
+// grammar TA
+// prog: statement* EOF
+// statement: letterA | statement letterA 'b'
+// letterA: 'a'
+//
+//
+//
+// If the above grammar, the ATN state immediately before the token
+// reference {@code 'a'} in {@code letterA} is reachable from the left edge
+// of both the primary and closure blocks of the left-recursive rule
+// {@code statement}. The prediction context associated with each of these
+// configurations distinguishes between them, and prevents the alternative
+// which stepped out to {@code prog} (and then back in to {@code statement}
+// from being eliminated by the filter.
+//
+//
+// @param configs The configuration set computed by
+// {@link //computeStartState} as the start state for the DFA.
+// @return The transformed configuration set representing the start state
+// for a precedence DFA at a particular precedence level (determined by
+// calling {@link Parser//getPrecedence}).
+//
+func (p *ParserATNSimulator) applyPrecedenceFilter(configs ATNConfigSet) ATNConfigSet {
+
+ statesFromAlt1 := make(map[int]PredictionContext)
+ configSet := NewBaseATNConfigSet(configs.FullContext())
+
+ for _, config := range configs.GetItems() {
+ // handle alt 1 first
+ if config.GetAlt() != 1 {
+ continue
+ }
+ updatedContext := config.GetSemanticContext().evalPrecedence(p.parser, p.outerContext)
+ if updatedContext == nil {
+ // the configuration was eliminated
+ continue
+ }
+ statesFromAlt1[config.GetState().GetStateNumber()] = config.GetContext()
+ if updatedContext != config.GetSemanticContext() {
+ configSet.Add(NewBaseATNConfig2(config, updatedContext), p.mergeCache)
+ } else {
+ configSet.Add(config, p.mergeCache)
+ }
+ }
+ for _, config := range configs.GetItems() {
+
+ if config.GetAlt() == 1 {
+ // already handled
+ continue
+ }
+ // In the future, p elimination step could be updated to also
+ // filter the prediction context for alternatives predicting alt>1
+ // (basically a graph subtraction algorithm).
+ if !config.getPrecedenceFilterSuppressed() {
+ context := statesFromAlt1[config.GetState().GetStateNumber()]
+ if context != nil && context.equals(config.GetContext()) {
+ // eliminated
+ continue
+ }
+ }
+ configSet.Add(config, p.mergeCache)
+ }
+ return configSet
+}
+
+func (p *ParserATNSimulator) getReachableTarget(trans Transition, ttype int) ATNState {
+ if trans.Matches(ttype, 0, p.atn.maxTokenType) {
+ return trans.getTarget()
+ }
+
+ return nil
+}
+
+func (p *ParserATNSimulator) getPredsForAmbigAlts(ambigAlts *BitSet, configs ATNConfigSet, nalts int) []SemanticContext {
+
+ altToPred := make([]SemanticContext, nalts+1)
+ for _, c := range configs.GetItems() {
+ if ambigAlts.contains(c.GetAlt()) {
+ altToPred[c.GetAlt()] = SemanticContextorContext(altToPred[c.GetAlt()], c.GetSemanticContext())
+ }
+ }
+ nPredAlts := 0
+ for i := 1; i <= nalts; i++ {
+ pred := altToPred[i]
+ if pred == nil {
+ altToPred[i] = SemanticContextNone
+ } else if pred != SemanticContextNone {
+ nPredAlts++
+ }
+ }
+ // nonambig alts are nil in altToPred
+ if nPredAlts == 0 {
+ altToPred = nil
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("getPredsForAmbigAlts result " + fmt.Sprint(altToPred))
+ }
+ return altToPred
+}
+
+func (p *ParserATNSimulator) getPredicatePredictions(ambigAlts *BitSet, altToPred []SemanticContext) []*PredPrediction {
+ pairs := make([]*PredPrediction, 0)
+ containsPredicate := false
+ for i := 1; i < len(altToPred); i++ {
+ pred := altToPred[i]
+ // unpredicated is indicated by SemanticContextNONE
+ if ambigAlts != nil && ambigAlts.contains(i) {
+ pairs = append(pairs, NewPredPrediction(pred, i))
+ }
+ if pred != SemanticContextNone {
+ containsPredicate = true
+ }
+ }
+ if !containsPredicate {
+ return nil
+ }
+ return pairs
+}
+
+//
+// This method is used to improve the localization of error messages by
+// choosing an alternative rather than panicing a
+// {@link NoViableAltException} in particular prediction scenarios where the
+// {@link //ERROR} state was reached during ATN simulation.
+//
+//
+// The default implementation of p method uses the following
+// algorithm to identify an ATN configuration which successfully parsed the
+// decision entry rule. Choosing such an alternative ensures that the
+// {@link ParserRuleContext} returned by the calling rule will be complete
+// and valid, and the syntax error will be Reported later at a more
+// localized location.
+//
+//
+// - If a syntactically valid path or paths reach the end of the decision rule and
+// they are semantically valid if predicated, return the min associated alt.
+// - Else, if a semantically invalid but syntactically valid path exist
+// or paths exist, return the minimum associated alt.
+//
+// - Otherwise, return {@link ATN//INVALID_ALT_NUMBER}.
+//
+//
+//
+// In some scenarios, the algorithm described above could predict an
+// alternative which will result in a {@link FailedPredicateException} in
+// the parser. Specifically, p could occur if the only configuration
+// capable of successfully parsing to the end of the decision rule is
+// blocked by a semantic predicate. By choosing p alternative within
+// {@link //AdaptivePredict} instead of panicing a
+// {@link NoViableAltException}, the resulting
+// {@link FailedPredicateException} in the parser will identify the specific
+// predicate which is preventing the parser from successfully parsing the
+// decision rule, which helps developers identify and correct logic errors
+// in semantic predicates.
+//
+//
+// @param configs The ATN configurations which were valid immediately before
+// the {@link //ERROR} state was reached
+// @param outerContext The is the \gamma_0 initial parser context from the paper
+// or the parser stack at the instant before prediction commences.
+//
+// @return The value to return from {@link //AdaptivePredict}, or
+// {@link ATN//INVALID_ALT_NUMBER} if a suitable alternative was not
+// identified and {@link //AdaptivePredict} should Report an error instead.
+//
+func (p *ParserATNSimulator) getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(configs ATNConfigSet, outerContext ParserRuleContext) int {
+ cfgs := p.splitAccordingToSemanticValidity(configs, outerContext)
+ semValidConfigs := cfgs[0]
+ semInvalidConfigs := cfgs[1]
+ alt := p.GetAltThatFinishedDecisionEntryRule(semValidConfigs)
+ if alt != ATNInvalidAltNumber { // semantically/syntactically viable path exists
+ return alt
+ }
+ // Is there a syntactically valid path with a failed pred?
+ if len(semInvalidConfigs.GetItems()) > 0 {
+ alt = p.GetAltThatFinishedDecisionEntryRule(semInvalidConfigs)
+ if alt != ATNInvalidAltNumber { // syntactically viable path exists
+ return alt
+ }
+ }
+ return ATNInvalidAltNumber
+}
+
+func (p *ParserATNSimulator) GetAltThatFinishedDecisionEntryRule(configs ATNConfigSet) int {
+ alts := NewIntervalSet()
+
+ for _, c := range configs.GetItems() {
+ _, ok := c.GetState().(*RuleStopState)
+
+ if c.GetReachesIntoOuterContext() > 0 || (ok && c.GetContext().hasEmptyPath()) {
+ alts.addOne(c.GetAlt())
+ }
+ }
+ if alts.length() == 0 {
+ return ATNInvalidAltNumber
+ }
+
+ return alts.first()
+}
+
+// Walk the list of configurations and split them according to
+// those that have preds evaluating to true/false. If no pred, assume
+// true pred and include in succeeded set. Returns Pair of sets.
+//
+// Create a NewSet so as not to alter the incoming parameter.
+//
+// Assumption: the input stream has been restored to the starting point
+// prediction, which is where predicates need to evaluate.
+
+type ATNConfigSetPair struct {
+ item0, item1 ATNConfigSet
+}
+
+func (p *ParserATNSimulator) splitAccordingToSemanticValidity(configs ATNConfigSet, outerContext ParserRuleContext) []ATNConfigSet {
+ succeeded := NewBaseATNConfigSet(configs.FullContext())
+ failed := NewBaseATNConfigSet(configs.FullContext())
+
+ for _, c := range configs.GetItems() {
+ if c.GetSemanticContext() != SemanticContextNone {
+ predicateEvaluationResult := c.GetSemanticContext().evaluate(p.parser, outerContext)
+ if predicateEvaluationResult {
+ succeeded.Add(c, nil)
+ } else {
+ failed.Add(c, nil)
+ }
+ } else {
+ succeeded.Add(c, nil)
+ }
+ }
+ return []ATNConfigSet{succeeded, failed}
+}
+
+// Look through a list of predicate/alt pairs, returning alts for the
+// pairs that win. A {@code NONE} predicate indicates an alt containing an
+// unpredicated config which behaves as "always true." If !complete
+// then we stop at the first predicate that evaluates to true. This
+// includes pairs with nil predicates.
+//
+func (p *ParserATNSimulator) evalSemanticContext(predPredictions []*PredPrediction, outerContext ParserRuleContext, complete bool) *BitSet {
+ predictions := NewBitSet()
+ for i := 0; i < len(predPredictions); i++ {
+ pair := predPredictions[i]
+ if pair.pred == SemanticContextNone {
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ continue
+ }
+
+ predicateEvaluationResult := pair.pred.evaluate(p.parser, outerContext)
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
+ fmt.Println("eval pred " + pair.String() + "=" + fmt.Sprint(predicateEvaluationResult))
+ }
+ if predicateEvaluationResult {
+ if ParserATNSimulatorDebug || ParserATNSimulatorDFADebug {
+ fmt.Println("PREDICT " + fmt.Sprint(pair.alt))
+ }
+ predictions.add(pair.alt)
+ if !complete {
+ break
+ }
+ }
+ }
+ return predictions
+}
+
+func (p *ParserATNSimulator) closure(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx, treatEOFAsEpsilon bool) {
+ initialDepth := 0
+ p.closureCheckingStopState(config, configs, closureBusy, collectPredicates,
+ fullCtx, initialDepth, treatEOFAsEpsilon)
+}
+
+func (p *ParserATNSimulator) closureCheckingStopState(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ if ParserATNSimulatorDebug {
+ fmt.Println("closure(" + config.String() + ")")
+ fmt.Println("configs(" + configs.String() + ")")
+ if config.GetReachesIntoOuterContext() > 50 {
+ panic("problem")
+ }
+ }
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // We hit rule end. If we have context info, use it
+ // run thru all possible stack tops in ctx
+ if !config.GetContext().isEmpty() {
+ for i := 0; i < config.GetContext().length(); i++ {
+ if config.GetContext().getReturnState(i) == BasePredictionContextEmptyReturnState {
+ if fullCtx {
+ configs.Add(NewBaseATNConfig1(config, config.GetState(), BasePredictionContextEMPTY), p.mergeCache)
+ continue
+ } else {
+ // we have no context info, just chase follow links (if greedy)
+ if ParserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+ }
+ continue
+ }
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ newContext := config.GetContext().GetParent(i) // "pop" return state
+
+ c := NewBaseATNConfig5(returnState, config.GetAlt(), newContext, config.GetSemanticContext())
+ // While we have context to pop back from, we may have
+ // gotten that context AFTER having falling off a rule.
+ // Make sure we track that we are now out of context.
+ c.SetReachesIntoOuterContext(config.GetReachesIntoOuterContext())
+ p.closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth-1, treatEOFAsEpsilon)
+ }
+ return
+ } else if fullCtx {
+ // reached end of start rule
+ configs.Add(config, p.mergeCache)
+ return
+ } else {
+ // else if we have no context info, just chase follow links (if greedy)
+ if ParserATNSimulatorDebug {
+ fmt.Println("FALLING off rule " + p.getRuleName(config.GetState().GetRuleIndex()))
+ }
+ }
+ }
+ p.closureWork(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEOFAsEpsilon)
+}
+
+// Do the actual work of walking epsilon edges//
+func (p *ParserATNSimulator) closureWork(config ATNConfig, configs ATNConfigSet, closureBusy Set, collectPredicates, fullCtx bool, depth int, treatEOFAsEpsilon bool) {
+ state := config.GetState()
+ // optimization
+ if !state.GetEpsilonOnlyTransitions() {
+ configs.Add(config, p.mergeCache)
+ // make sure to not return here, because EOF transitions can act as
+ // both epsilon transitions and non-epsilon transitions.
+ }
+ for i := 0; i < len(state.GetTransitions()); i++ {
+ if i == 0 && p.canDropLoopEntryEdgeInLeftRecursiveRule(config) {
+ continue
+ }
+
+ t := state.GetTransitions()[i]
+ _, ok := t.(*ActionTransition)
+ continueCollecting := collectPredicates && !ok
+ c := p.getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEOFAsEpsilon)
+ if ci, ok := c.(*BaseATNConfig); ok && ci != nil {
+ newDepth := depth
+
+ if _, ok := config.GetState().(*RuleStopState); ok {
+ // target fell off end of rule mark resulting c as having dipped into outer context
+ // We can't get here if incoming config was rule stop and we had context
+ // track how far we dip into outer context. Might
+ // come in handy and we avoid evaluating context dependent
+ // preds if p is > 0.
+
+ if p.dfa != nil && p.dfa.getPrecedenceDfa() {
+ if t.(*EpsilonTransition).outermostPrecedenceReturn == p.dfa.atnStartState.GetRuleIndex() {
+ c.setPrecedenceFilterSuppressed(true)
+ }
+ }
+
+ c.SetReachesIntoOuterContext(c.GetReachesIntoOuterContext() + 1)
+
+ if closureBusy.Add(c) != c {
+ // avoid infinite recursion for right-recursive rules
+ continue
+ }
+
+ configs.SetDipsIntoOuterContext(true) // TODO: can remove? only care when we add to set per middle of p method
+ newDepth--
+ if ParserATNSimulatorDebug {
+ fmt.Println("dips into outer ctx: " + c.String())
+ }
+ } else {
+ if !t.getIsEpsilon() && closureBusy.Add(c) != c {
+ // avoid infinite recursion for EOF* and EOF+
+ continue
+ }
+ if _, ok := t.(*RuleTransition); ok {
+ // latch when newDepth goes negative - once we step out of the entry context we can't return
+ if newDepth >= 0 {
+ newDepth++
+ }
+ }
+ }
+ p.closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEOFAsEpsilon)
+ }
+ }
+}
+
+func (p *ParserATNSimulator) canDropLoopEntryEdgeInLeftRecursiveRule(config ATNConfig) bool {
+ if TurnOffLRLoopEntryBranchOpt {
+ return false
+ }
+
+ _p := config.GetState()
+
+ // First check to see if we are in StarLoopEntryState generated during
+ // left-recursion elimination. For efficiency, also check if
+ // the context has an empty stack case. If so, it would mean
+ // global FOLLOW so we can't perform optimization
+ if startLoop, ok := _p.(StarLoopEntryState); !ok || !startLoop.precedenceRuleDecision || config.GetContext().isEmpty() || config.GetContext().hasEmptyPath() {
+ return false
+ }
+
+ // Require all return states to return back to the same rule
+ // that p is in.
+ numCtxs := config.GetContext().length()
+ for i := 0; i < numCtxs; i++ {
+ returnState := p.atn.states[config.GetContext().getReturnState(i)]
+ if returnState.GetRuleIndex() != _p.GetRuleIndex() {
+ return false
+ }
+ }
+
+ decisionStartState := _p.(BlockStartState).GetTransitions()[0].getTarget().(BlockStartState)
+ blockEndStateNum := decisionStartState.getEndState().stateNumber
+ blockEndState := p.atn.states[blockEndStateNum].(*BlockEndState)
+
+ // Verify that the top of each stack context leads to loop entry/exit
+ // state through epsilon edges and w/o leaving rule.
+
+ for i := 0; i < numCtxs; i++ { // for each stack context
+ returnStateNumber := config.GetContext().getReturnState(i)
+ returnState := p.atn.states[returnStateNumber]
+
+ // all states must have single outgoing epsilon edge
+ if len(returnState.GetTransitions()) != 1 || !returnState.GetTransitions()[0].getIsEpsilon() {
+ return false
+ }
+
+ // Look for prefix op case like 'not expr', (' type ')' expr
+ returnStateTarget := returnState.GetTransitions()[0].getTarget()
+ if returnState.GetStateType() == ATNStateBlockEnd && returnStateTarget == _p {
+ continue
+ }
+
+ // Look for 'expr op expr' or case where expr's return state is block end
+ // of (...)* internal block; the block end points to loop back
+ // which points to p but we don't need to check that
+ if returnState == blockEndState {
+ continue
+ }
+
+ // Look for ternary expr ? expr : expr. The return state points at block end,
+ // which points at loop entry state
+ if returnStateTarget == blockEndState {
+ continue
+ }
+
+ // Look for complex prefix 'between expr and expr' case where 2nd expr's
+ // return state points at block end state of (...)* internal block
+ if returnStateTarget.GetStateType() == ATNStateBlockEnd &&
+ len(returnStateTarget.GetTransitions()) == 1 &&
+ returnStateTarget.GetTransitions()[0].getIsEpsilon() &&
+ returnStateTarget.GetTransitions()[0].getTarget() == _p {
+ continue
+ }
+
+ // anything else ain't conforming
+ return false
+ }
+
+ return true
+}
+
+func (p *ParserATNSimulator) getRuleName(index int) string {
+ if p.parser != nil && index >= 0 {
+ return p.parser.GetRuleNames()[index]
+ }
+ var sb strings.Builder
+ sb.Grow(32)
+
+ sb.WriteString("')
+ return sb.String()
+}
+
+func (p *ParserATNSimulator) getEpsilonTarget(config ATNConfig, t Transition, collectPredicates, inContext, fullCtx, treatEOFAsEpsilon bool) ATNConfig {
+
+ switch t.getSerializationType() {
+ case TransitionRULE:
+ return p.ruleTransition(config, t.(*RuleTransition))
+ case TransitionPRECEDENCE:
+ return p.precedenceTransition(config, t.(*PrecedencePredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionPREDICATE:
+ return p.predTransition(config, t.(*PredicateTransition), collectPredicates, inContext, fullCtx)
+ case TransitionACTION:
+ return p.actionTransition(config, t.(*ActionTransition))
+ case TransitionEPSILON:
+ return NewBaseATNConfig4(config, t.getTarget())
+ case TransitionATOM, TransitionRANGE, TransitionSET:
+ // EOF transitions act like epsilon transitions after the first EOF
+ // transition is traversed
+ if treatEOFAsEpsilon {
+ if t.Matches(TokenEOF, 0, 1) {
+ return NewBaseATNConfig4(config, t.getTarget())
+ }
+ }
+ return nil
+ default:
+ return nil
+ }
+}
+
+func (p *ParserATNSimulator) actionTransition(config ATNConfig, t *ActionTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
+ fmt.Println("ACTION edge " + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex))
+ }
+ return NewBaseATNConfig4(config, t.getTarget())
+}
+
+func (p *ParserATNSimulator) precedenceTransition(config ATNConfig,
+ pt *PrecedencePredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " +
+ strconv.Itoa(pt.precedence) + ">=_p, ctx dependent=true")
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *BaseATNConfig
+ if collectPredicates && inContext {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewBaseATNConfig4(config, pt.getTarget())
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+func (p *ParserATNSimulator) predTransition(config ATNConfig, pt *PredicateTransition, collectPredicates, inContext, fullCtx bool) *BaseATNConfig {
+
+ if ParserATNSimulatorDebug {
+ fmt.Println("PRED (collectPredicates=" + fmt.Sprint(collectPredicates) + ") " + strconv.Itoa(pt.ruleIndex) +
+ ":" + strconv.Itoa(pt.predIndex) + ", ctx dependent=" + fmt.Sprint(pt.isCtxDependent))
+ if p.parser != nil {
+ fmt.Println("context surrounding pred is " + fmt.Sprint(p.parser.GetRuleInvocationStack(nil)))
+ }
+ }
+ var c *BaseATNConfig
+ if collectPredicates && (!pt.isCtxDependent || inContext) {
+ if fullCtx {
+ // In full context mode, we can evaluate predicates on-the-fly
+ // during closure, which dramatically reduces the size of
+ // the config sets. It also obviates the need to test predicates
+ // later during conflict resolution.
+ currentPosition := p.input.Index()
+ p.input.Seek(p.startIndex)
+ predSucceeds := pt.getPredicate().evaluate(p.parser, p.outerContext)
+ p.input.Seek(currentPosition)
+ if predSucceeds {
+ c = NewBaseATNConfig4(config, pt.getTarget()) // no pred context
+ }
+ } else {
+ newSemCtx := SemanticContextandContext(config.GetSemanticContext(), pt.getPredicate())
+ c = NewBaseATNConfig3(config, pt.getTarget(), newSemCtx)
+ }
+ } else {
+ c = NewBaseATNConfig4(config, pt.getTarget())
+ }
+ if ParserATNSimulatorDebug {
+ fmt.Println("config from pred transition=" + c.String())
+ }
+ return c
+}
+
+func (p *ParserATNSimulator) ruleTransition(config ATNConfig, t *RuleTransition) *BaseATNConfig {
+ if ParserATNSimulatorDebug {
+ fmt.Println("CALL rule " + p.getRuleName(t.getTarget().GetRuleIndex()) + ", ctx=" + config.GetContext().String())
+ }
+ returnState := t.followState
+ newContext := SingletonBasePredictionContextCreate(config.GetContext(), returnState.GetStateNumber())
+ return NewBaseATNConfig1(config, t.getTarget(), newContext)
+}
+
+func (p *ParserATNSimulator) getConflictingAlts(configs ATNConfigSet) *BitSet {
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModeGetAlts(altsets)
+}
+
+// Sam pointed out a problem with the previous definition, v3, of
+// ambiguous states. If we have another state associated with conflicting
+// alternatives, we should keep going. For example, the following grammar
+//
+// s : (ID | ID ID?) ''
+//
+// When the ATN simulation reaches the state before '', it has a DFA
+// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
+// 12|1|[] and 12|2|[] conflict, but we cannot stop processing p node
+// because alternative to has another way to continue, via [6|2|[]].
+// The key is that we have a single state that has config's only associated
+// with a single alternative, 2, and crucially the state transitions
+// among the configurations are all non-epsilon transitions. That means
+// we don't consider any conflicts that include alternative 2. So, we
+// ignore the conflict between alts 1 and 2. We ignore a set of
+// conflicting alts when there is an intersection with an alternative
+// associated with a single alt state in the state&rarrconfig-list map.
+//
+// It's also the case that we might have two conflicting configurations but
+// also a 3rd nonconflicting configuration for a different alternative:
+// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
+//
+// a : A | A | A B
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not
+// stop working on p state. In the previous example, we're concerned
+// with states associated with the conflicting alternatives. Here alt
+// 3 is not associated with the conflicting configs, but since we can continue
+// looking for input reasonably, I don't declare the state done. We
+// ignore a set of conflicting alts when we have an alternative
+// that we still need to pursue.
+//
+
+func (p *ParserATNSimulator) getConflictingAltsOrUniqueAlt(configs ATNConfigSet) *BitSet {
+ var conflictingAlts *BitSet
+ if configs.GetUniqueAlt() != ATNInvalidAltNumber {
+ conflictingAlts = NewBitSet()
+ conflictingAlts.add(configs.GetUniqueAlt())
+ } else {
+ conflictingAlts = configs.GetConflictingAlts()
+ }
+ return conflictingAlts
+}
+
+func (p *ParserATNSimulator) GetTokenName(t int) string {
+ if t == TokenEOF {
+ return "EOF"
+ }
+
+ if p.parser != nil && p.parser.GetLiteralNames() != nil {
+ if t >= len(p.parser.GetLiteralNames()) {
+ fmt.Println(strconv.Itoa(t) + " ttype out of range: " + strings.Join(p.parser.GetLiteralNames(), ","))
+ // fmt.Println(p.parser.GetInputStream().(TokenStream).GetAllText()) // p seems incorrect
+ } else {
+ return p.parser.GetLiteralNames()[t] + "<" + strconv.Itoa(t) + ">"
+ }
+ }
+
+ return strconv.Itoa(t)
+}
+
+func (p *ParserATNSimulator) getLookaheadName(input TokenStream) string {
+ return p.GetTokenName(input.LA(1))
+}
+
+// Used for debugging in AdaptivePredict around execATN but I cut
+// it out for clarity now that alg. works well. We can leave p
+// "dead" code for a bit.
+//
+func (p *ParserATNSimulator) dumpDeadEndConfigs(nvae *NoViableAltException) {
+
+ panic("Not implemented")
+
+ // fmt.Println("dead end configs: ")
+ // var decs = nvae.deadEndConfigs
+ //
+ // for i:=0; i0) {
+ // var t = c.state.GetTransitions()[0]
+ // if t2, ok := t.(*AtomTransition); ok {
+ // trans = "Atom "+ p.GetTokenName(t2.label)
+ // } else if t3, ok := t.(SetTransition); ok {
+ // _, ok := t.(*NotSetTransition)
+ //
+ // var s string
+ // if (ok){
+ // s = "~"
+ // }
+ //
+ // trans = s + "Set " + t3.set
+ // }
+ // }
+ // fmt.Errorf(c.String(p.parser, true) + ":" + trans)
+ // }
+}
+
+func (p *ParserATNSimulator) noViableAlt(input TokenStream, outerContext ParserRuleContext, configs ATNConfigSet, startIndex int) *NoViableAltException {
+ return NewNoViableAltException(p.parser, input, input.Get(startIndex), input.LT(1), configs, outerContext)
+}
+
+func (p *ParserATNSimulator) getUniqueAlt(configs ATNConfigSet) int {
+ alt := ATNInvalidAltNumber
+ for _, c := range configs.GetItems() {
+ if alt == ATNInvalidAltNumber {
+ alt = c.GetAlt() // found first alt
+ } else if c.GetAlt() != alt {
+ return ATNInvalidAltNumber
+ }
+ }
+ return alt
+}
+
+//
+// Add an edge to the DFA, if possible. This method calls
+// {@link //addDFAState} to ensure the {@code to} state is present in the
+// DFA. If {@code from} is {@code nil}, or if {@code t} is outside the
+// range of edges that can be represented in the DFA tables, p method
+// returns without adding the edge to the DFA.
+//
+// If {@code to} is {@code nil}, p method returns {@code nil}.
+// Otherwise, p method returns the {@link DFAState} returned by calling
+// {@link //addDFAState} for the {@code to} state.
+//
+// @param dfa The DFA
+// @param from The source state for the edge
+// @param t The input symbol
+// @param to The target state for the edge
+//
+// @return If {@code to} is {@code nil}, p method returns {@code nil}
+// otherwise p method returns the result of calling {@link //addDFAState}
+// on {@code to}
+//
+func (p *ParserATNSimulator) addDFAEdge(dfa *DFA, from *DFAState, t int, to *DFAState) *DFAState {
+ if ParserATNSimulatorDebug {
+ fmt.Println("EDGE " + from.String() + " -> " + to.String() + " upon " + p.GetTokenName(t))
+ }
+ if to == nil {
+ return nil
+ }
+ p.atn.stateMu.Lock()
+ to = p.addDFAState(dfa, to) // used existing if possible not incoming
+ p.atn.stateMu.Unlock()
+ if from == nil || t < -1 || t > p.atn.maxTokenType {
+ return to
+ }
+ p.atn.edgeMu.Lock()
+ if from.getEdges() == nil {
+ from.setEdges(make([]*DFAState, p.atn.maxTokenType+1+1))
+ }
+ from.setIthEdge(t+1, to) // connect
+ p.atn.edgeMu.Unlock()
+
+ if ParserATNSimulatorDebug {
+ var names []string
+ if p.parser != nil {
+ names = p.parser.GetLiteralNames()
+ }
+
+ fmt.Println("DFA=\n" + dfa.String(names, nil))
+ }
+ return to
+}
+
+//
+// Add state {@code D} to the DFA if it is not already present, and return
+// the actual instance stored in the DFA. If a state equivalent to {@code D}
+// is already in the DFA, the existing state is returned. Otherwise p
+// method returns {@code D} after adding it to the DFA.
+//
+// If {@code D} is {@link //ERROR}, p method returns {@link //ERROR} and
+// does not change the DFA.
+//
+// @param dfa The dfa
+// @param D The DFA state to add
+// @return The state stored in the DFA. This will be either the existing
+// state if {@code D} is already in the DFA, or {@code D} itself if the
+// state was not already present.
+//
+func (p *ParserATNSimulator) addDFAState(dfa *DFA, d *DFAState) *DFAState {
+ if d == ATNSimulatorError {
+ return d
+ }
+ hash := d.hash()
+ existing, ok := dfa.getState(hash)
+ if ok {
+ return existing
+ }
+ d.stateNumber = dfa.numStates()
+ if !d.configs.ReadOnly() {
+ d.configs.OptimizeConfigs(p.BaseATNSimulator)
+ d.configs.SetReadOnly(true)
+ }
+ dfa.setState(hash, d)
+ if ParserATNSimulatorDebug {
+ fmt.Println("adding NewDFA state: " + d.String())
+ }
+ return d
+}
+
+func (p *ParserATNSimulator) ReportAttemptingFullContext(dfa *DFA, conflictingAlts *BitSet, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAttemptingFullContext decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAttemptingFullContext(p.parser, dfa, startIndex, stopIndex, conflictingAlts, configs)
+ }
+}
+
+func (p *ParserATNSimulator) ReportContextSensitivity(dfa *DFA, prediction int, configs ATNConfigSet, startIndex, stopIndex int) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportContextSensitivity decision=" + strconv.Itoa(dfa.decision) + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportContextSensitivity(p.parser, dfa, startIndex, stopIndex, prediction, configs)
+ }
+}
+
+// If context sensitive parsing, we know it's ambiguity not conflict//
+func (p *ParserATNSimulator) ReportAmbiguity(dfa *DFA, D *DFAState, startIndex, stopIndex int,
+ exact bool, ambigAlts *BitSet, configs ATNConfigSet) {
+ if ParserATNSimulatorDebug || ParserATNSimulatorRetryDebug {
+ interval := NewInterval(startIndex, stopIndex+1)
+ fmt.Println("ReportAmbiguity " + ambigAlts.String() + ":" + configs.String() +
+ ", input=" + p.parser.GetTokenStream().GetTextFromInterval(interval))
+ }
+ if p.parser != nil {
+ p.parser.GetErrorListenerDispatch().ReportAmbiguity(p.parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs)
+ }
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
new file mode 100644
index 000000000..49cd10c5f
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/parser_rule_context.go
@@ -0,0 +1,362 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "reflect"
+ "strconv"
+)
+
+type ParserRuleContext interface {
+ RuleContext
+
+ SetException(RecognitionException)
+
+ AddTokenNode(token Token) *TerminalNodeImpl
+ AddErrorNode(badToken Token) *ErrorNodeImpl
+
+ EnterRule(listener ParseTreeListener)
+ ExitRule(listener ParseTreeListener)
+
+ SetStart(Token)
+ GetStart() Token
+
+ SetStop(Token)
+ GetStop() Token
+
+ AddChild(child RuleContext) RuleContext
+ RemoveLastChild()
+}
+
+type BaseParserRuleContext struct {
+ *BaseRuleContext
+
+ start, stop Token
+ exception RecognitionException
+ children []Tree
+}
+
+func NewBaseParserRuleContext(parent ParserRuleContext, invokingStateNumber int) *BaseParserRuleContext {
+ prc := new(BaseParserRuleContext)
+
+ prc.BaseRuleContext = NewBaseRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = -1
+ // * If we are debugging or building a parse tree for a Visitor,
+ // we need to track all of the tokens and rule invocations associated
+ // with prc rule's context. This is empty for parsing w/o tree constr.
+ // operation because we don't the need to track the details about
+ // how we parse prc rule.
+ // /
+ prc.children = nil
+ prc.start = nil
+ prc.stop = nil
+ // The exception that forced prc rule to return. If the rule successfully
+ // completed, prc is {@code nil}.
+ prc.exception = nil
+
+ return prc
+}
+
+func (prc *BaseParserRuleContext) SetException(e RecognitionException) {
+ prc.exception = e
+}
+
+func (prc *BaseParserRuleContext) GetChildren() []Tree {
+ return prc.children
+}
+
+func (prc *BaseParserRuleContext) CopyFrom(ctx *BaseParserRuleContext) {
+ // from RuleContext
+ prc.parentCtx = ctx.parentCtx
+ prc.invokingState = ctx.invokingState
+ prc.children = nil
+ prc.start = ctx.start
+ prc.stop = ctx.stop
+}
+
+func (prc *BaseParserRuleContext) GetText() string {
+ if prc.GetChildCount() == 0 {
+ return ""
+ }
+
+ var s string
+ for _, child := range prc.children {
+ s += child.(ParseTree).GetText()
+ }
+
+ return s
+}
+
+// Double dispatch methods for listeners
+func (prc *BaseParserRuleContext) EnterRule(listener ParseTreeListener) {
+}
+
+func (prc *BaseParserRuleContext) ExitRule(listener ParseTreeListener) {
+}
+
+// * Does not set parent link other add methods do that///
+func (prc *BaseParserRuleContext) addTerminalNodeChild(child TerminalNode) TerminalNode {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+func (prc *BaseParserRuleContext) AddChild(child RuleContext) RuleContext {
+ if prc.children == nil {
+ prc.children = make([]Tree, 0)
+ }
+ if child == nil {
+ panic("Child may not be null")
+ }
+ prc.children = append(prc.children, child)
+ return child
+}
+
+// * Used by EnterOuterAlt to toss out a RuleContext previously added as
+// we entered a rule. If we have // label, we will need to remove
+// generic ruleContext object.
+// /
+func (prc *BaseParserRuleContext) RemoveLastChild() {
+ if prc.children != nil && len(prc.children) > 0 {
+ prc.children = prc.children[0 : len(prc.children)-1]
+ }
+}
+
+func (prc *BaseParserRuleContext) AddTokenNode(token Token) *TerminalNodeImpl {
+
+ node := NewTerminalNodeImpl(token)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+
+}
+
+func (prc *BaseParserRuleContext) AddErrorNode(badToken Token) *ErrorNodeImpl {
+ node := NewErrorNodeImpl(badToken)
+ prc.addTerminalNodeChild(node)
+ node.parentCtx = prc
+ return node
+}
+
+func (prc *BaseParserRuleContext) GetChild(i int) Tree {
+ if prc.children != nil && len(prc.children) >= i {
+ return prc.children[i]
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetChildOfType(i int, childType reflect.Type) RuleContext {
+ if childType == nil {
+ return prc.GetChild(i).(RuleContext)
+ }
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if reflect.TypeOf(child) == childType {
+ if i == 0 {
+ return child.(RuleContext)
+ }
+
+ i--
+ }
+ }
+
+ return nil
+}
+
+func (prc *BaseParserRuleContext) ToStringTree(ruleNames []string, recog Recognizer) string {
+ return TreesStringTree(prc, ruleNames, recog)
+}
+
+func (prc *BaseParserRuleContext) GetRuleContext() RuleContext {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) Accept(visitor ParseTreeVisitor) interface{} {
+ return visitor.VisitChildren(prc)
+}
+
+func (prc *BaseParserRuleContext) SetStart(t Token) {
+ prc.start = t
+}
+
+func (prc *BaseParserRuleContext) GetStart() Token {
+ return prc.start
+}
+
+func (prc *BaseParserRuleContext) SetStop(t Token) {
+ prc.stop = t
+}
+
+func (prc *BaseParserRuleContext) GetStop() Token {
+ return prc.stop
+}
+
+func (prc *BaseParserRuleContext) GetToken(ttype int, i int) TerminalNode {
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if c2, ok := child.(TerminalNode); ok {
+ if c2.GetSymbol().GetTokenType() == ttype {
+ if i == 0 {
+ return c2
+ }
+
+ i--
+ }
+ }
+ }
+ return nil
+}
+
+func (prc *BaseParserRuleContext) GetTokens(ttype int) []TerminalNode {
+ if prc.children == nil {
+ return make([]TerminalNode, 0)
+ }
+
+ tokens := make([]TerminalNode, 0)
+
+ for j := 0; j < len(prc.children); j++ {
+ child := prc.children[j]
+ if tchild, ok := child.(TerminalNode); ok {
+ if tchild.GetSymbol().GetTokenType() == ttype {
+ tokens = append(tokens, tchild)
+ }
+ }
+ }
+
+ return tokens
+}
+
+func (prc *BaseParserRuleContext) GetPayload() interface{} {
+ return prc
+}
+
+func (prc *BaseParserRuleContext) getChild(ctxType reflect.Type, i int) RuleContext {
+ if prc.children == nil || i < 0 || i >= len(prc.children) {
+ return nil
+ }
+
+ j := -1 // what element have we found with ctxType?
+ for _, o := range prc.children {
+
+ childType := reflect.TypeOf(o)
+
+ if childType.Implements(ctxType) {
+ j++
+ if j == i {
+ return o.(RuleContext)
+ }
+ }
+ }
+ return nil
+}
+
+// Go lacks generics, so it's not possible for us to return the child with the correct type, but we do
+// check for convertibility
+
+func (prc *BaseParserRuleContext) GetTypedRuleContext(ctxType reflect.Type, i int) RuleContext {
+ return prc.getChild(ctxType, i)
+}
+
+func (prc *BaseParserRuleContext) GetTypedRuleContexts(ctxType reflect.Type) []RuleContext {
+ if prc.children == nil {
+ return make([]RuleContext, 0)
+ }
+
+ contexts := make([]RuleContext, 0)
+
+ for _, child := range prc.children {
+ childType := reflect.TypeOf(child)
+
+ if childType.ConvertibleTo(ctxType) {
+ contexts = append(contexts, child.(RuleContext))
+ }
+ }
+ return contexts
+}
+
+func (prc *BaseParserRuleContext) GetChildCount() int {
+ if prc.children == nil {
+ return 0
+ }
+
+ return len(prc.children)
+}
+
+func (prc *BaseParserRuleContext) GetSourceInterval() *Interval {
+ if prc.start == nil || prc.stop == nil {
+ return TreeInvalidInterval
+ }
+
+ return NewInterval(prc.start.GetTokenIndex(), prc.stop.GetTokenIndex())
+}
+
+//need to manage circular dependencies, so export now
+
+// Print out a whole tree, not just a node, in LISP format
+// (root child1 .. childN). Print just a node if b is a leaf.
+//
+
+func (prc *BaseParserRuleContext) String(ruleNames []string, stop RuleContext) string {
+
+ var p ParserRuleContext = prc
+ s := "["
+ for p != nil && p != stop {
+ if ruleNames == nil {
+ if !p.IsEmpty() {
+ s += strconv.Itoa(p.GetInvokingState())
+ }
+ } else {
+ ri := p.GetRuleIndex()
+ var ruleName string
+ if ri >= 0 && ri < len(ruleNames) {
+ ruleName = ruleNames[ri]
+ } else {
+ ruleName = strconv.Itoa(ri)
+ }
+ s += ruleName
+ }
+ if p.GetParent() != nil && (ruleNames != nil || !p.GetParent().(ParserRuleContext).IsEmpty()) {
+ s += " "
+ }
+ pi := p.GetParent()
+ if pi != nil {
+ p = pi.(ParserRuleContext)
+ } else {
+ p = nil
+ }
+ }
+ s += "]"
+ return s
+}
+
+var RuleContextEmpty = NewBaseParserRuleContext(nil, -1)
+
+type InterpreterRuleContext interface {
+ ParserRuleContext
+}
+
+type BaseInterpreterRuleContext struct {
+ *BaseParserRuleContext
+}
+
+func NewBaseInterpreterRuleContext(parent BaseInterpreterRuleContext, invokingStateNumber, ruleIndex int) *BaseInterpreterRuleContext {
+
+ prc := new(BaseInterpreterRuleContext)
+
+ prc.BaseParserRuleContext = NewBaseParserRuleContext(parent, invokingStateNumber)
+
+ prc.RuleIndex = ruleIndex
+
+ return prc
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
new file mode 100644
index 000000000..9fdfd52b2
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_context.go
@@ -0,0 +1,751 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+)
+
+// Represents {@code $} in local context prediction, which means wildcard.
+// {@code//+x =//}.
+// /
+const (
+ BasePredictionContextEmptyReturnState = 0x7FFFFFFF
+)
+
+// Represents {@code $} in an array in full context mode, when {@code $}
+// doesn't mean wildcard: {@code $ + x = [$,x]}. Here,
+// {@code $} = {@link //EmptyReturnState}.
+// /
+
+var (
+ BasePredictionContextglobalNodeCount = 1
+ BasePredictionContextid = BasePredictionContextglobalNodeCount
+)
+
+type PredictionContext interface {
+ hash() int
+ GetParent(int) PredictionContext
+ getReturnState(int) int
+ equals(PredictionContext) bool
+ length() int
+ isEmpty() bool
+ hasEmptyPath() bool
+ String() string
+}
+
+type BasePredictionContext struct {
+ cachedHash int
+}
+
+func NewBasePredictionContext(cachedHash int) *BasePredictionContext {
+ pc := new(BasePredictionContext)
+ pc.cachedHash = cachedHash
+
+ return pc
+}
+
+func (b *BasePredictionContext) isEmpty() bool {
+ return false
+}
+
+func calculateHash(parent PredictionContext, returnState int) int {
+ h := murmurInit(1)
+ h = murmurUpdate(h, parent.hash())
+ h = murmurUpdate(h, returnState)
+ return murmurFinish(h, 2)
+}
+
+var _emptyPredictionContextHash int
+
+func init() {
+ _emptyPredictionContextHash = murmurInit(1)
+ _emptyPredictionContextHash = murmurFinish(_emptyPredictionContextHash, 0)
+}
+
+func calculateEmptyHash() int {
+ return _emptyPredictionContextHash
+}
+
+// Used to cache {@link BasePredictionContext} objects. Its used for the shared
+// context cash associated with contexts in DFA states. This cache
+// can be used for both lexers and parsers.
+
+type PredictionContextCache struct {
+ cache map[PredictionContext]PredictionContext
+}
+
+func NewPredictionContextCache() *PredictionContextCache {
+ t := new(PredictionContextCache)
+ t.cache = make(map[PredictionContext]PredictionContext)
+ return t
+}
+
+// Add a context to the cache and return it. If the context already exists,
+// return that one instead and do not add a Newcontext to the cache.
+// Protect shared cache from unsafe thread access.
+//
+func (p *PredictionContextCache) add(ctx PredictionContext) PredictionContext {
+ if ctx == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY
+ }
+ existing := p.cache[ctx]
+ if existing != nil {
+ return existing
+ }
+ p.cache[ctx] = ctx
+ return ctx
+}
+
+func (p *PredictionContextCache) Get(ctx PredictionContext) PredictionContext {
+ return p.cache[ctx]
+}
+
+func (p *PredictionContextCache) length() int {
+ return len(p.cache)
+}
+
+type SingletonPredictionContext interface {
+ PredictionContext
+}
+
+type BaseSingletonPredictionContext struct {
+ *BasePredictionContext
+
+ parentCtx PredictionContext
+ returnState int
+}
+
+func NewBaseSingletonPredictionContext(parent PredictionContext, returnState int) *BaseSingletonPredictionContext {
+ var cachedHash int
+ if parent != nil {
+ cachedHash = calculateHash(parent, returnState)
+ } else {
+ cachedHash = calculateEmptyHash()
+ }
+
+ s := new(BaseSingletonPredictionContext)
+ s.BasePredictionContext = NewBasePredictionContext(cachedHash)
+
+ s.parentCtx = parent
+ s.returnState = returnState
+
+ return s
+}
+
+func SingletonBasePredictionContextCreate(parent PredictionContext, returnState int) PredictionContext {
+ if returnState == BasePredictionContextEmptyReturnState && parent == nil {
+ // someone can pass in the bits of an array ctx that mean $
+ return BasePredictionContextEMPTY
+ }
+
+ return NewBaseSingletonPredictionContext(parent, returnState)
+}
+
+func (b *BaseSingletonPredictionContext) length() int {
+ return 1
+}
+
+func (b *BaseSingletonPredictionContext) GetParent(index int) PredictionContext {
+ return b.parentCtx
+}
+
+func (b *BaseSingletonPredictionContext) getReturnState(index int) int {
+ return b.returnState
+}
+
+func (b *BaseSingletonPredictionContext) hasEmptyPath() bool {
+ return b.returnState == BasePredictionContextEmptyReturnState
+}
+
+func (b *BaseSingletonPredictionContext) equals(other PredictionContext) bool {
+ if b == other {
+ return true
+ } else if _, ok := other.(*BaseSingletonPredictionContext); !ok {
+ return false
+ } else if b.hash() != other.hash() {
+ return false // can't be same if hash is different
+ }
+
+ otherP := other.(*BaseSingletonPredictionContext)
+
+ if b.returnState != other.getReturnState(0) {
+ return false
+ } else if b.parentCtx == nil {
+ return otherP.parentCtx == nil
+ }
+
+ return b.parentCtx.equals(otherP.parentCtx)
+}
+
+func (b *BaseSingletonPredictionContext) hash() int {
+ return b.cachedHash
+}
+
+func (b *BaseSingletonPredictionContext) String() string {
+ var up string
+
+ if b.parentCtx == nil {
+ up = ""
+ } else {
+ up = b.parentCtx.String()
+ }
+
+ if len(up) == 0 {
+ if b.returnState == BasePredictionContextEmptyReturnState {
+ return "$"
+ }
+
+ return strconv.Itoa(b.returnState)
+ }
+
+ return strconv.Itoa(b.returnState) + " " + up
+}
+
+var BasePredictionContextEMPTY = NewEmptyPredictionContext()
+
+type EmptyPredictionContext struct {
+ *BaseSingletonPredictionContext
+}
+
+func NewEmptyPredictionContext() *EmptyPredictionContext {
+
+ p := new(EmptyPredictionContext)
+
+ p.BaseSingletonPredictionContext = NewBaseSingletonPredictionContext(nil, BasePredictionContextEmptyReturnState)
+
+ return p
+}
+
+func (e *EmptyPredictionContext) isEmpty() bool {
+ return true
+}
+
+func (e *EmptyPredictionContext) GetParent(index int) PredictionContext {
+ return nil
+}
+
+func (e *EmptyPredictionContext) getReturnState(index int) int {
+ return e.returnState
+}
+
+func (e *EmptyPredictionContext) equals(other PredictionContext) bool {
+ return e == other
+}
+
+func (e *EmptyPredictionContext) String() string {
+ return "$"
+}
+
+type ArrayPredictionContext struct {
+ *BasePredictionContext
+
+ parents []PredictionContext
+ returnStates []int
+}
+
+func NewArrayPredictionContext(parents []PredictionContext, returnStates []int) *ArrayPredictionContext {
+ // Parent can be nil only if full ctx mode and we make an array
+ // from {@link //EMPTY} and non-empty. We merge {@link //EMPTY} by using
+ // nil parent and
+ // returnState == {@link //EmptyReturnState}.
+ hash := murmurInit(1)
+
+ for _, parent := range parents {
+ hash = murmurUpdate(hash, parent.hash())
+ }
+
+ for _, returnState := range returnStates {
+ hash = murmurUpdate(hash, returnState)
+ }
+
+ hash = murmurFinish(hash, len(parents)<<1)
+
+ c := new(ArrayPredictionContext)
+ c.BasePredictionContext = NewBasePredictionContext(hash)
+
+ c.parents = parents
+ c.returnStates = returnStates
+
+ return c
+}
+
+func (a *ArrayPredictionContext) GetReturnStates() []int {
+ return a.returnStates
+}
+
+func (a *ArrayPredictionContext) hasEmptyPath() bool {
+ return a.getReturnState(a.length()-1) == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) isEmpty() bool {
+ // since EmptyReturnState can only appear in the last position, we
+ // don't need to verify that size==1
+ return a.returnStates[0] == BasePredictionContextEmptyReturnState
+}
+
+func (a *ArrayPredictionContext) length() int {
+ return len(a.returnStates)
+}
+
+func (a *ArrayPredictionContext) GetParent(index int) PredictionContext {
+ return a.parents[index]
+}
+
+func (a *ArrayPredictionContext) getReturnState(index int) int {
+ return a.returnStates[index]
+}
+
+func (a *ArrayPredictionContext) equals(other PredictionContext) bool {
+ if _, ok := other.(*ArrayPredictionContext); !ok {
+ return false
+ } else if a.cachedHash != other.hash() {
+ return false // can't be same if hash is different
+ } else {
+ otherP := other.(*ArrayPredictionContext)
+ return &a.returnStates == &otherP.returnStates && &a.parents == &otherP.parents
+ }
+}
+
+func (a *ArrayPredictionContext) hash() int {
+ return a.BasePredictionContext.cachedHash
+}
+
+func (a *ArrayPredictionContext) String() string {
+ if a.isEmpty() {
+ return "[]"
+ }
+
+ s := "["
+ for i := 0; i < len(a.returnStates); i++ {
+ if i > 0 {
+ s = s + ", "
+ }
+ if a.returnStates[i] == BasePredictionContextEmptyReturnState {
+ s = s + "$"
+ continue
+ }
+ s = s + strconv.Itoa(a.returnStates[i])
+ if a.parents[i] != nil {
+ s = s + " " + a.parents[i].String()
+ } else {
+ s = s + "nil"
+ }
+ }
+
+ return s + "]"
+}
+
+// Convert a {@link RuleContext} tree to a {@link BasePredictionContext} graph.
+// Return {@link //EMPTY} if {@code outerContext} is empty or nil.
+// /
+func predictionContextFromRuleContext(a *ATN, outerContext RuleContext) PredictionContext {
+ if outerContext == nil {
+ outerContext = RuleContextEmpty
+ }
+ // if we are in RuleContext of start rule, s, then BasePredictionContext
+ // is EMPTY. Nobody called us. (if we are empty, return empty)
+ if outerContext.GetParent() == nil || outerContext == RuleContextEmpty {
+ return BasePredictionContextEMPTY
+ }
+ // If we have a parent, convert it to a BasePredictionContext graph
+ parent := predictionContextFromRuleContext(a, outerContext.GetParent().(RuleContext))
+ state := a.states[outerContext.GetInvokingState()]
+ transition := state.GetTransitions()[0]
+
+ return SingletonBasePredictionContextCreate(parent, transition.(*RuleTransition).followState.GetStateNumber())
+}
+
+func merge(a, b PredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ // share same graph if both same
+ if a == b {
+ return a
+ }
+
+ ac, ok1 := a.(*BaseSingletonPredictionContext)
+ bc, ok2 := b.(*BaseSingletonPredictionContext)
+
+ if ok1 && ok2 {
+ return mergeSingletons(ac, bc, rootIsWildcard, mergeCache)
+ }
+ // At least one of a or b is array
+ // If one is $ and rootIsWildcard, return $ as// wildcard
+ if rootIsWildcard {
+ if _, ok := a.(*EmptyPredictionContext); ok {
+ return a
+ }
+ if _, ok := b.(*EmptyPredictionContext); ok {
+ return b
+ }
+ }
+ // convert singleton so both are arrays to normalize
+ if _, ok := a.(*BaseSingletonPredictionContext); ok {
+ a = NewArrayPredictionContext([]PredictionContext{a.GetParent(0)}, []int{a.getReturnState(0)})
+ }
+ if _, ok := b.(*BaseSingletonPredictionContext); ok {
+ b = NewArrayPredictionContext([]PredictionContext{b.GetParent(0)}, []int{b.getReturnState(0)})
+ }
+ return mergeArrays(a.(*ArrayPredictionContext), b.(*ArrayPredictionContext), rootIsWildcard, mergeCache)
+}
+
+//
+// Merge two {@link SingletonBasePredictionContext} instances.
+//
+// Stack tops equal, parents merge is same return left graph.
+//
+//
+// Same stack top, parents differ merge parents giving array node, then
+// remainders of those graphs. A Newroot node is created to point to the
+// merged parents.
+//
+//
+// Different stack tops pointing to same parent. Make array node for the
+// root where both element in the root point to the same (original)
+// parent.
+//
+//
+// Different stack tops pointing to different parents. Make array node for
+// the root where each element points to the corresponding original
+// parent.
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// @param mergeCache
+// /
+func mergeSingletons(a, b *BaseSingletonPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.hash(), b.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.hash(), a.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+
+ rootMerge := mergeRoot(a, b, rootIsWildcard)
+ if rootMerge != nil {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), rootMerge)
+ }
+ return rootMerge
+ }
+ if a.returnState == b.returnState {
+ parent := merge(a.parentCtx, b.parentCtx, rootIsWildcard, mergeCache)
+ // if parent is same as existing a or b parent or reduced to a parent,
+ // return it
+ if parent == a.parentCtx {
+ return a // ax + bx = ax, if a=b
+ }
+ if parent == b.parentCtx {
+ return b // ax + bx = bx, if a=b
+ }
+ // else: ax + ay = a'[x,y]
+ // merge parents x and y, giving array node with x,y then remainders
+ // of those graphs. dup a, a' points at merged array
+ // Newjoined parent so create Newsingleton pointing to it, a'
+ spc := SingletonBasePredictionContextCreate(parent, a.returnState)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), spc)
+ }
+ return spc
+ }
+ // a != b payloads differ
+ // see if we can collapse parents due to $+x parents if local ctx
+ var singleParent PredictionContext
+ if a == b || (a.parentCtx != nil && a.parentCtx == b.parentCtx) { // ax +
+ // bx =
+ // [a,b]x
+ singleParent = a.parentCtx
+ }
+ if singleParent != nil { // parents are same
+ // sort payloads and use same parent
+ payloads := []int{a.returnState, b.returnState}
+ if a.returnState > b.returnState {
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ }
+ parents := []PredictionContext{singleParent, singleParent}
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), apc)
+ }
+ return apc
+ }
+ // parents differ and can't merge them. Just pack together
+ // into array can't merge.
+ // ax + by = [ax,by]
+ payloads := []int{a.returnState, b.returnState}
+ parents := []PredictionContext{a.parentCtx, b.parentCtx}
+ if a.returnState > b.returnState { // sort by payload
+ payloads[0] = b.returnState
+ payloads[1] = a.returnState
+ parents = []PredictionContext{b.parentCtx, a.parentCtx}
+ }
+ apc := NewArrayPredictionContext(parents, payloads)
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), apc)
+ }
+ return apc
+}
+
+//
+// Handle case where at least one of {@code a} or {@code b} is
+// {@link //EMPTY}. In the following diagrams, the symbol {@code $} is used
+// to represent {@link //EMPTY}.
+//
+// Local-Context Merges
+//
+// These local-context merge operations are used when {@code rootIsWildcard}
+// is true.
+//
+// {@link //EMPTY} is superset of any graph return {@link //EMPTY}.
+//
+//
+// {@link //EMPTY} and anything is {@code //EMPTY}, so merged parent is
+// {@code //EMPTY} return left graph.
+//
+//
+// Special case of last merge if local context.
+//
+//
+// Full-Context Merges
+//
+// These full-context merge operations are used when {@code rootIsWildcard}
+// is false.
+//
+//
+//
+// Must keep all contexts {@link //EMPTY} in array is a special value (and
+// nil parent).
+//
+//
+//
+//
+// @param a the first {@link SingletonBasePredictionContext}
+// @param b the second {@link SingletonBasePredictionContext}
+// @param rootIsWildcard {@code true} if this is a local-context merge,
+// otherwise false to indicate a full-context merge
+// /
+func mergeRoot(a, b SingletonPredictionContext, rootIsWildcard bool) PredictionContext {
+ if rootIsWildcard {
+ if a == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // // + b =//
+ }
+ if b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // a +// =//
+ }
+ } else {
+ if a == BasePredictionContextEMPTY && b == BasePredictionContextEMPTY {
+ return BasePredictionContextEMPTY // $ + $ = $
+ } else if a == BasePredictionContextEMPTY { // $ + x = [$,x]
+ payloads := []int{b.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{b.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ } else if b == BasePredictionContextEMPTY { // x + $ = [$,x] ($ is always first if present)
+ payloads := []int{a.getReturnState(-1), BasePredictionContextEmptyReturnState}
+ parents := []PredictionContext{a.GetParent(-1), nil}
+ return NewArrayPredictionContext(parents, payloads)
+ }
+ }
+ return nil
+}
+
+//
+// Merge two {@link ArrayBasePredictionContext} instances.
+//
+// Different tops, different parents.
+//
+//
+// Shared top, same parents.
+//
+//
+// Shared top, different parents.
+//
+//
+// Shared top, all shared parents.
+//
+//
+// Equal tops, merge parents and reduce top to
+// {@link SingletonBasePredictionContext}.
+//
+// /
+func mergeArrays(a, b *ArrayPredictionContext, rootIsWildcard bool, mergeCache *DoubleDict) PredictionContext {
+ if mergeCache != nil {
+ previous := mergeCache.Get(a.hash(), b.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ previous = mergeCache.Get(b.hash(), a.hash())
+ if previous != nil {
+ return previous.(PredictionContext)
+ }
+ }
+ // merge sorted payloads a + b => M
+ i := 0 // walks a
+ j := 0 // walks b
+ k := 0 // walks target M array
+
+ mergedReturnStates := make([]int, len(a.returnStates)+len(b.returnStates))
+ mergedParents := make([]PredictionContext, len(a.returnStates)+len(b.returnStates))
+ // walk and merge to yield mergedParents, mergedReturnStates
+ for i < len(a.returnStates) && j < len(b.returnStates) {
+ aParent := a.parents[i]
+ bParent := b.parents[j]
+ if a.returnStates[i] == b.returnStates[j] {
+ // same payload (stack tops are equal), must yield merged singleton
+ payload := a.returnStates[i]
+ // $+$ = $
+ bothDollars := payload == BasePredictionContextEmptyReturnState && aParent == nil && bParent == nil
+ axAX := (aParent != nil && bParent != nil && aParent == bParent) // ax+ax
+ // ->
+ // ax
+ if bothDollars || axAX {
+ mergedParents[k] = aParent // choose left
+ mergedReturnStates[k] = payload
+ } else { // ax+ay -> a'[x,y]
+ mergedParent := merge(aParent, bParent, rootIsWildcard, mergeCache)
+ mergedParents[k] = mergedParent
+ mergedReturnStates[k] = payload
+ }
+ i++ // hop over left one as usual
+ j++ // but also Skip one in right side since we merge
+ } else if a.returnStates[i] < b.returnStates[j] { // copy a[i] to M
+ mergedParents[k] = aParent
+ mergedReturnStates[k] = a.returnStates[i]
+ i++
+ } else { // b > a, copy b[j] to M
+ mergedParents[k] = bParent
+ mergedReturnStates[k] = b.returnStates[j]
+ j++
+ }
+ k++
+ }
+ // copy over any payloads remaining in either array
+ if i < len(a.returnStates) {
+ for p := i; p < len(a.returnStates); p++ {
+ mergedParents[k] = a.parents[p]
+ mergedReturnStates[k] = a.returnStates[p]
+ k++
+ }
+ } else {
+ for p := j; p < len(b.returnStates); p++ {
+ mergedParents[k] = b.parents[p]
+ mergedReturnStates[k] = b.returnStates[p]
+ k++
+ }
+ }
+ // trim merged if we combined a few that had same stack tops
+ if k < len(mergedParents) { // write index < last position trim
+ if k == 1 { // for just one merged element, return singleton top
+ pc := SingletonBasePredictionContextCreate(mergedParents[0], mergedReturnStates[0])
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), pc)
+ }
+ return pc
+ }
+ mergedParents = mergedParents[0:k]
+ mergedReturnStates = mergedReturnStates[0:k]
+ }
+
+ M := NewArrayPredictionContext(mergedParents, mergedReturnStates)
+
+ // if we created same array as a or b, return that instead
+ // TODO: track whether this is possible above during merge sort for speed
+ if M == a {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), a)
+ }
+ return a
+ }
+ if M == b {
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), b)
+ }
+ return b
+ }
+ combineCommonParents(mergedParents)
+
+ if mergeCache != nil {
+ mergeCache.set(a.hash(), b.hash(), M)
+ }
+ return M
+}
+
+//
+// Make pass over all M {@code parents} merge any {@code equals()}
+// ones.
+// /
+func combineCommonParents(parents []PredictionContext) {
+ uniqueParents := make(map[PredictionContext]PredictionContext)
+
+ for p := 0; p < len(parents); p++ {
+ parent := parents[p]
+ if uniqueParents[parent] == nil {
+ uniqueParents[parent] = parent
+ }
+ }
+ for q := 0; q < len(parents); q++ {
+ parents[q] = uniqueParents[parents[q]]
+ }
+}
+
+func getCachedBasePredictionContext(context PredictionContext, contextCache *PredictionContextCache, visited map[PredictionContext]PredictionContext) PredictionContext {
+
+ if context.isEmpty() {
+ return context
+ }
+ existing := visited[context]
+ if existing != nil {
+ return existing
+ }
+ existing = contextCache.Get(context)
+ if existing != nil {
+ visited[context] = existing
+ return existing
+ }
+ changed := false
+ parents := make([]PredictionContext, context.length())
+ for i := 0; i < len(parents); i++ {
+ parent := getCachedBasePredictionContext(context.GetParent(i), contextCache, visited)
+ if changed || parent != context.GetParent(i) {
+ if !changed {
+ parents = make([]PredictionContext, context.length())
+ for j := 0; j < context.length(); j++ {
+ parents[j] = context.GetParent(j)
+ }
+ changed = true
+ }
+ parents[i] = parent
+ }
+ }
+ if !changed {
+ contextCache.add(context)
+ visited[context] = context
+ return context
+ }
+ var updated PredictionContext
+ if len(parents) == 0 {
+ updated = BasePredictionContextEMPTY
+ } else if len(parents) == 1 {
+ updated = SingletonBasePredictionContextCreate(parents[0], context.getReturnState(0))
+ } else {
+ updated = NewArrayPredictionContext(parents, context.(*ArrayPredictionContext).GetReturnStates())
+ }
+ contextCache.add(updated)
+ visited[updated] = updated
+ visited[context] = updated
+
+ return updated
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
new file mode 100644
index 000000000..15718f912
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/prediction_mode.go
@@ -0,0 +1,553 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// This enumeration defines the prediction modes available in ANTLR 4 along with
+// utility methods for analyzing configuration sets for conflicts and/or
+// ambiguities.
+
+const (
+ //
+ // The SLL(*) prediction mode. This prediction mode ignores the current
+ // parser context when making predictions. This is the fastest prediction
+ // mode, and provides correct results for many grammars. This prediction
+ // mode is more powerful than the prediction mode provided by ANTLR 3, but
+ // may result in syntax errors for grammar and input combinations which are
+ // not SLL.
+ //
+ //
+ // When using this prediction mode, the parser will either return a correct
+ // parse tree (i.e. the same parse tree that would be returned with the
+ // {@link //LL} prediction mode), or it will Report a syntax error. If a
+ // syntax error is encountered when using the {@link //SLL} prediction mode,
+ // it may be due to either an actual syntax error in the input or indicate
+ // that the particular combination of grammar and input requires the more
+ // powerful {@link //LL} prediction abilities to complete successfully.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeSLL = 0
+ //
+ // The LL(*) prediction mode. This prediction mode allows the current parser
+ // context to be used for resolving SLL conflicts that occur during
+ // prediction. This is the fastest prediction mode that guarantees correct
+ // parse results for all combinations of grammars with syntactically correct
+ // inputs.
+ //
+ //
+ // When using this prediction mode, the parser will make correct decisions
+ // for all syntactically-correct grammar and input combinations. However, in
+ // cases where the grammar is truly ambiguous this prediction mode might not
+ // Report a precise answer for exactly which alternatives are
+ // ambiguous.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLL = 1
+ //
+ // The LL(*) prediction mode with exact ambiguity detection. In addition to
+ // the correctness guarantees provided by the {@link //LL} prediction mode,
+ // this prediction mode instructs the prediction algorithm to determine the
+ // complete and exact set of ambiguous alternatives for every ambiguous
+ // decision encountered while parsing.
+ //
+ //
+ // This prediction mode may be used for diagnosing ambiguities during
+ // grammar development. Due to the performance overhead of calculating sets
+ // of ambiguous alternatives, this prediction mode should be avoided when
+ // the exact results are not necessary.
+ //
+ //
+ // This prediction mode does not provide any guarantees for prediction
+ // behavior for syntactically-incorrect inputs.
+ //
+ PredictionModeLLExactAmbigDetection = 2
+)
+
+//
+// Computes the SLL prediction termination condition.
+//
+//
+// This method computes the SLL prediction termination condition for both of
+// the following cases.
+//
+//
+// - The usual SLL+LL fallback upon SLL conflict
+// - Pure SLL without LL fallback
+//
+//
+// COMBINED SLL+LL PARSING
+//
+// When LL-fallback is enabled upon SLL conflict, correct predictions are
+// ensured regardless of how the termination condition is computed by this
+// method. Due to the substantially higher cost of LL prediction, the
+// prediction should only fall back to LL when the additional lookahead
+// cannot lead to a unique SLL prediction.
+//
+// Assuming combined SLL+LL parsing, an SLL configuration set with only
+// conflicting subsets should fall back to full LL, even if the
+// configuration sets don't resolve to the same alternative (e.g.
+// {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
+// configuration, SLL could continue with the hopes that more lookahead will
+// resolve via one of those non-conflicting configurations.
+//
+// Here's the prediction termination rule them: SLL (for SLL+LL parsing)
+// stops when it sees only conflicting configuration subsets. In contrast,
+// full LL keeps going when there is uncertainty.
+//
+// HEURISTIC
+//
+// As a heuristic, we stop prediction when we see any conflicting subset
+// unless we see a state that only has one alternative associated with it.
+// The single-alt-state thing lets prediction continue upon rules like
+// (otherwise, it would admit defeat too soon):
+//
+// {@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) '' }
+//
+// When the ATN simulation reaches the state before {@code ''}, it has a
+// DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
+// {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
+// processing this node because alternative to has another way to continue,
+// via {@code [6|2|[]]}.
+//
+// It also let's us continue for this rule:
+//
+// {@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B }
+//
+// After Matching input A, we reach the stop state for rule A, state 1.
+// State 8 is the state right before B. Clearly alternatives 1 and 2
+// conflict and no amount of further lookahead will separate the two.
+// However, alternative 3 will be able to continue and so we do not stop
+// working on this state. In the previous example, we're concerned with
+// states associated with the conflicting alternatives. Here alt 3 is not
+// associated with the conflicting configs, but since we can continue
+// looking for input reasonably, don't declare the state done.
+//
+// PURE SLL PARSING
+//
+// To handle pure SLL parsing, all we have to do is make sure that we
+// combine stack contexts for configurations that differ only by semantic
+// predicate. From there, we can do the usual SLL termination heuristic.
+//
+// PREDICATES IN SLL+LL PARSING
+//
+// SLL decisions don't evaluate predicates until after they reach DFA stop
+// states because they need to create the DFA cache that works in all
+// semantic situations. In contrast, full LL evaluates predicates collected
+// during start state computation so it can ignore predicates thereafter.
+// This means that SLL termination detection can totally ignore semantic
+// predicates.
+//
+// Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
+// semantic predicate contexts so we might see two configurations like the
+// following.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p})}
+//
+// Before testing these configurations against others, we have to merge
+// {@code x} and {@code x'} (without modifying the existing configurations).
+// For example, we test {@code (x+x')==x''} when looking for conflicts in
+// the following configurations.
+//
+// {@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}
+//
+// If the configuration set has predicates (as indicated by
+// {@link ATNConfigSet//hasSemanticContext}), this algorithm makes a copy of
+// the configurations to strip out all of the predicates so that a standard
+// {@link ATNConfigSet} will merge everything ignoring predicates.
+//
+func PredictionModehasSLLConflictTerminatingPrediction(mode int, configs ATNConfigSet) bool {
+ // Configs in rule stop states indicate reaching the end of the decision
+ // rule (local context) or end of start rule (full context). If all
+ // configs meet this condition, then none of the configurations is able
+ // to Match additional input so we terminate prediction.
+ //
+ if PredictionModeallConfigsInRuleStopStates(configs) {
+ return true
+ }
+ // pure SLL mode parsing
+ if mode == PredictionModeSLL {
+ // Don't bother with combining configs from different semantic
+ // contexts if we can fail over to full LL costs more time
+ // since we'll often fail over anyway.
+ if configs.HasSemanticContext() {
+ // dup configs, tossing out semantic predicates
+ dup := NewBaseATNConfigSet(false)
+ for _, c := range configs.GetItems() {
+
+ // NewBaseATNConfig({semanticContext:}, c)
+ c = NewBaseATNConfig2(c, SemanticContextNone)
+ dup.Add(c, nil)
+ }
+ configs = dup
+ }
+ // now we have combined contexts for configs with dissimilar preds
+ }
+ // pure SLL or combined SLL+LL mode parsing
+ altsets := PredictionModegetConflictingAltSubsets(configs)
+ return PredictionModehasConflictingAltSet(altsets) && !PredictionModehasStateAssociatedWithOneAlt(configs)
+}
+
+// Checks if any configuration in {@code configs} is in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if any configuration in {@code configs} is in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModehasConfigInRuleStopState(configs ATNConfigSet) bool {
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); ok {
+ return true
+ }
+ }
+ return false
+}
+
+// Checks if all configurations in {@code configs} are in a
+// {@link RuleStopState}. Configurations meeting this condition have reached
+// the end of the decision rule (local context) or end of start rule (full
+// context).
+//
+// @param configs the configuration set to test
+// @return {@code true} if all configurations in {@code configs} are in a
+// {@link RuleStopState}, otherwise {@code false}
+func PredictionModeallConfigsInRuleStopStates(configs ATNConfigSet) bool {
+
+ for _, c := range configs.GetItems() {
+ if _, ok := c.GetState().(*RuleStopState); !ok {
+ return false
+ }
+ }
+ return true
+}
+
+//
+// Full LL prediction termination.
+//
+// Can we stop looking ahead during ATN simulation or is there some
+// uncertainty as to which alternative we will ultimately pick, after
+// consuming more input? Even if there are partial conflicts, we might know
+// that everything is going to resolve to the same minimum alternative. That
+// means we can stop since no more lookahead will change that fact. On the
+// other hand, there might be multiple conflicts that resolve to different
+// minimums. That means we need more look ahead to decide which of those
+// alternatives we should predict.
+//
+// The basic idea is to split the set of configurations {@code C}, into
+// conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
+// non-conflicting configurations. Two configurations conflict if they have
+// identical {@link ATNConfig//state} and {@link ATNConfig//context} values
+// but different {@link ATNConfig//alt} value, e.g. {@code (s, i, ctx, _)}
+// and {@code (s, j, ctx, _)} for {@code i!=j}.
+//
+// Reduce these configuration subsets to the set of possible alternatives.
+// You can compute the alternative subsets in one pass as follows:
+//
+// {@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
+// {@code C} holding {@code s} and {@code ctx} fixed.
+//
+// Or in pseudo-code, for each configuration {@code c} in {@code C}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+// The values in {@code map} are the set of {@code A_s,ctx} sets.
+//
+// If {@code |A_s,ctx|=1} then there is no conflict associated with
+// {@code s} and {@code ctx}.
+//
+// Reduce the subsets to singletons by choosing a minimum of each subset. If
+// the union of these alternative subsets is a singleton, then no amount of
+// more lookahead will help us. We will always pick that alternative. If,
+// however, there is more than one alternative, then we are uncertain which
+// alternative to predict and must continue looking for resolution. We may
+// or may not discover an ambiguity in the future, even if there are no
+// conflicting subsets this round.
+//
+// The biggest sin is to terminate early because it means we've made a
+// decision but were uncertain as to the eventual outcome. We haven't used
+// enough lookahead. On the other hand, announcing a conflict too late is no
+// big deal you will still have the conflict. It's just inefficient. It
+// might even look until the end of file.
+//
+// No special consideration for semantic predicates is required because
+// predicates are evaluated on-the-fly for full LL prediction, ensuring that
+// no configuration contains a semantic context during the termination
+// check.
+//
+// CONFLICTING CONFIGS
+//
+// Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
+// when {@code i!=j} but {@code x=x'}. Because we merge all
+// {@code (s, i, _)} configurations together, that means that there are at
+// most {@code n} configurations associated with state {@code s} for
+// {@code n} possible alternatives in the decision. The merged stacks
+// complicate the comparison of configuration contexts {@code x} and
+// {@code x'}. Sam checks to see if one is a subset of the other by calling
+// merge and checking to see if the merged result is either {@code x} or
+// {@code x'}. If the {@code x} associated with lowest alternative {@code i}
+// is the superset, then {@code i} is the only possible prediction since the
+// others resolve to {@code min(i)} as well. However, if {@code x} is
+// associated with {@code j>i} then at least one stack configuration for
+// {@code j} is not in conflict with alternative {@code i}. The algorithm
+// should keep going, looking for more lookahead due to the uncertainty.
+//
+// For simplicity, I'm doing a equality check between {@code x} and
+// {@code x'} that lets the algorithm continue to consume lookahead longer
+// than necessary. The reason I like the equality is of course the
+// simplicity but also because that is the test you need to detect the
+// alternatives that are actually in conflict.
+//
+// CONTINUE/STOP RULE
+//
+// Continue if union of resolved alternative sets from non-conflicting and
+// conflicting alternative subsets has more than one alternative. We are
+// uncertain about which alternative to predict.
+//
+// The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
+// alternatives are still in the running for the amount of input we've
+// consumed at this point. The conflicting sets let us to strip away
+// configurations that won't lead to more states because we resolve
+// conflicts to the configuration with a minimum alternate for the
+// conflicting set.
+//
+// CASES
+//
+//
+//
+// - no conflicts and more than 1 alternative in set => continue
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
+// {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
+// {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1,3}} => continue
+//
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
+// {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
+// {@code {1}} => stop and predict 1
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
+// {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {1}} = {@code {1}} => stop and predict 1, can announce
+// ambiguity {@code {1,2}}
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
+// {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {2}} = {@code {1,2}} => continue
+//
+// - {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
+// {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
+// {@code {3}} = {@code {1,3}} => continue
+//
+//
+//
+// EXACT AMBIGUITY DETECTION
+//
+// If all states Report the same conflicting set of alternatives, then we
+// know we have the exact ambiguity set.
+//
+// |A_i|>1
and
+// A_i = A_j
for all i, j.
+//
+// In other words, we continue examining lookahead until all {@code A_i}
+// have more than one alternative and all {@code A_i} are the same. If
+// {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
+// because the resolved set is {@code {1}}. To determine what the real
+// ambiguity is, we have to know whether the ambiguity is between one and
+// two or one and three so we keep going. We can only stop prediction when
+// we need exact ambiguity detection when the sets look like
+// {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...
+//
+func PredictionModeresolvesToJustOneViableAlt(altsets []*BitSet) int {
+ return PredictionModegetSingleViableAlt(altsets)
+}
+
+//
+// Determines if every alternative subset in {@code altsets} contains more
+// than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every {@link BitSet} in {@code altsets} has
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+//
+func PredictionModeallSubsetsConflict(altsets []*BitSet) bool {
+ return !PredictionModehasNonConflictingAltSet(altsets)
+}
+
+//
+// Determines if any single alternative subset in {@code altsets} contains
+// exactly one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} 1, otherwise {@code false}
+//
+func PredictionModehasNonConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+//
+// Determines if any single alternative subset in {@code altsets} contains
+// more than one alternative.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if {@code altsets} contains a {@link BitSet} with
+// {@link BitSet//cardinality cardinality} > 1, otherwise {@code false}
+//
+func PredictionModehasConflictingAltSet(altsets []*BitSet) bool {
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if alts.length() > 1 {
+ return true
+ }
+ }
+ return false
+}
+
+//
+// Determines if every alternative subset in {@code altsets} is equivalent.
+//
+// @param altsets a collection of alternative subsets
+// @return {@code true} if every member of {@code altsets} is equal to the
+// others, otherwise {@code false}
+//
+func PredictionModeallSubsetsEqual(altsets []*BitSet) bool {
+ var first *BitSet
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ if first == nil {
+ first = alts
+ } else if alts != first {
+ return false
+ }
+ }
+
+ return true
+}
+
+//
+// Returns the unique alternative predicted by all alternative subsets in
+// {@code altsets}. If no such alternative exists, this method returns
+// {@link ATN//INVALID_ALT_NUMBER}.
+//
+// @param altsets a collection of alternative subsets
+//
+func PredictionModegetUniqueAlt(altsets []*BitSet) int {
+ all := PredictionModeGetAlts(altsets)
+ if all.length() == 1 {
+ return all.minValue()
+ }
+
+ return ATNInvalidAltNumber
+}
+
+// Gets the complete set of represented alternatives for a collection of
+// alternative subsets. This method returns the union of each {@link BitSet}
+// in {@code altsets}.
+//
+// @param altsets a collection of alternative subsets
+// @return the set of represented alternatives in {@code altsets}
+//
+func PredictionModeGetAlts(altsets []*BitSet) *BitSet {
+ all := NewBitSet()
+ for _, alts := range altsets {
+ all.or(alts)
+ }
+ return all
+}
+
+//
+// This func gets the conflicting alt subsets from a configuration set.
+// For each configuration {@code c} in {@code configs}:
+//
+//
+// map[c] U= c.{@link ATNConfig//alt alt} // map hash/equals uses s and x, not
+// alt and not pred
+//
+//
+func PredictionModegetConflictingAltSubsets(configs ATNConfigSet) []*BitSet {
+ configToAlts := make(map[int]*BitSet)
+
+ for _, c := range configs.GetItems() {
+ key := 31 * c.GetState().GetStateNumber() + c.GetContext().hash()
+
+ alts, ok := configToAlts[key]
+ if !ok {
+ alts = NewBitSet()
+ configToAlts[key] = alts
+ }
+ alts.add(c.GetAlt())
+ }
+
+ values := make([]*BitSet, 0, 10)
+ for _, v := range configToAlts {
+ values = append(values, v)
+ }
+ return values
+}
+
+//
+// Get a map from state to alt subset from a configuration set. For each
+// configuration {@code c} in {@code configs}:
+//
+//
+// map[c.{@link ATNConfig//state state}] U= c.{@link ATNConfig//alt alt}
+//
+//
+func PredictionModeGetStateToAltMap(configs ATNConfigSet) *AltDict {
+ m := NewAltDict()
+
+ for _, c := range configs.GetItems() {
+ alts := m.Get(c.GetState().String())
+ if alts == nil {
+ alts = NewBitSet()
+ m.put(c.GetState().String(), alts)
+ }
+ alts.(*BitSet).add(c.GetAlt())
+ }
+ return m
+}
+
+func PredictionModehasStateAssociatedWithOneAlt(configs ATNConfigSet) bool {
+ values := PredictionModeGetStateToAltMap(configs).values()
+ for i := 0; i < len(values); i++ {
+ if values[i].(*BitSet).length() == 1 {
+ return true
+ }
+ }
+ return false
+}
+
+func PredictionModegetSingleViableAlt(altsets []*BitSet) int {
+ result := ATNInvalidAltNumber
+
+ for i := 0; i < len(altsets); i++ {
+ alts := altsets[i]
+ minAlt := alts.minValue()
+ if result == ATNInvalidAltNumber {
+ result = minAlt
+ } else if result != minAlt { // more than 1 viable alt
+ return ATNInvalidAltNumber
+ }
+ }
+ return result
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
new file mode 100644
index 000000000..93efcf355
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/recognizer.go
@@ -0,0 +1,217 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strings"
+
+ "strconv"
+)
+
+type Recognizer interface {
+ GetLiteralNames() []string
+ GetSymbolicNames() []string
+ GetRuleNames() []string
+
+ Sempred(RuleContext, int, int) bool
+ Precpred(RuleContext, int) bool
+
+ GetState() int
+ SetState(int)
+ Action(RuleContext, int, int)
+ AddErrorListener(ErrorListener)
+ RemoveErrorListeners()
+ GetATN() *ATN
+ GetErrorListenerDispatch() ErrorListener
+}
+
+type BaseRecognizer struct {
+ listeners []ErrorListener
+ state int
+
+ RuleNames []string
+ LiteralNames []string
+ SymbolicNames []string
+ GrammarFileName string
+}
+
+func NewBaseRecognizer() *BaseRecognizer {
+ rec := new(BaseRecognizer)
+ rec.listeners = []ErrorListener{ConsoleErrorListenerINSTANCE}
+ rec.state = -1
+ return rec
+}
+
+var tokenTypeMapCache = make(map[string]int)
+var ruleIndexMapCache = make(map[string]int)
+
+func (b *BaseRecognizer) checkVersion(toolVersion string) {
+ runtimeVersion := "4.10.1"
+ if runtimeVersion != toolVersion {
+ fmt.Println("ANTLR runtime and generated code versions disagree: " + runtimeVersion + "!=" + toolVersion)
+ }
+}
+
+func (b *BaseRecognizer) Action(context RuleContext, ruleIndex, actionIndex int) {
+ panic("action not implemented on Recognizer!")
+}
+
+func (b *BaseRecognizer) AddErrorListener(listener ErrorListener) {
+ b.listeners = append(b.listeners, listener)
+}
+
+func (b *BaseRecognizer) RemoveErrorListeners() {
+ b.listeners = make([]ErrorListener, 0)
+}
+
+func (b *BaseRecognizer) GetRuleNames() []string {
+ return b.RuleNames
+}
+
+func (b *BaseRecognizer) GetTokenNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetSymbolicNames() []string {
+ return b.SymbolicNames
+}
+
+func (b *BaseRecognizer) GetLiteralNames() []string {
+ return b.LiteralNames
+}
+
+func (b *BaseRecognizer) GetState() int {
+ return b.state
+}
+
+func (b *BaseRecognizer) SetState(v int) {
+ b.state = v
+}
+
+//func (b *Recognizer) GetTokenTypeMap() {
+// var tokenNames = b.GetTokenNames()
+// if (tokenNames==nil) {
+// panic("The current recognizer does not provide a list of token names.")
+// }
+// var result = tokenTypeMapCache[tokenNames]
+// if(result==nil) {
+// result = tokenNames.reduce(function(o, k, i) { o[k] = i })
+// result.EOF = TokenEOF
+// tokenTypeMapCache[tokenNames] = result
+// }
+// return result
+//}
+
+// Get a map from rule names to rule indexes.
+//
+// Used for XPath and tree pattern compilation.
+//
+func (b *BaseRecognizer) GetRuleIndexMap() map[string]int {
+
+ panic("Method not defined!")
+ // var ruleNames = b.GetRuleNames()
+ // if (ruleNames==nil) {
+ // panic("The current recognizer does not provide a list of rule names.")
+ // }
+ //
+ // var result = ruleIndexMapCache[ruleNames]
+ // if(result==nil) {
+ // result = ruleNames.reduce(function(o, k, i) { o[k] = i })
+ // ruleIndexMapCache[ruleNames] = result
+ // }
+ // return result
+}
+
+func (b *BaseRecognizer) GetTokenType(tokenName string) int {
+ panic("Method not defined!")
+ // var ttype = b.GetTokenTypeMap()[tokenName]
+ // if (ttype !=nil) {
+ // return ttype
+ // } else {
+ // return TokenInvalidType
+ // }
+}
+
+//func (b *Recognizer) GetTokenTypeMap() map[string]int {
+// Vocabulary vocabulary = getVocabulary()
+//
+// Synchronized (tokenTypeMapCache) {
+// Map result = tokenTypeMapCache.Get(vocabulary)
+// if (result == null) {
+// result = new HashMap()
+// for (int i = 0; i < GetATN().maxTokenType; i++) {
+// String literalName = vocabulary.getLiteralName(i)
+// if (literalName != null) {
+// result.put(literalName, i)
+// }
+//
+// String symbolicName = vocabulary.GetSymbolicName(i)
+// if (symbolicName != null) {
+// result.put(symbolicName, i)
+// }
+// }
+//
+// result.put("EOF", Token.EOF)
+// result = Collections.unmodifiableMap(result)
+// tokenTypeMapCache.put(vocabulary, result)
+// }
+//
+// return result
+// }
+//}
+
+// What is the error header, normally line/character position information?//
+func (b *BaseRecognizer) GetErrorHeader(e RecognitionException) string {
+ line := e.GetOffendingToken().GetLine()
+ column := e.GetOffendingToken().GetColumn()
+ return "line " + strconv.Itoa(line) + ":" + strconv.Itoa(column)
+}
+
+// How should a token be displayed in an error message? The default
+// is to display just the text, but during development you might
+// want to have a lot of information spit out. Override in that case
+// to use t.String() (which, for CommonToken, dumps everything about
+// the token). This is better than forcing you to override a method in
+// your token objects because you don't have to go modify your lexer
+// so that it creates a NewJava type.
+//
+// @deprecated This method is not called by the ANTLR 4 Runtime. Specific
+// implementations of {@link ANTLRErrorStrategy} may provide a similar
+// feature when necessary. For example, see
+// {@link DefaultErrorStrategy//GetTokenErrorDisplay}.
+//
+func (b *BaseRecognizer) GetTokenErrorDisplay(t Token) string {
+ if t == nil {
+ return ""
+ }
+ s := t.GetText()
+ if s == "" {
+ if t.GetTokenType() == TokenEOF {
+ s = ""
+ } else {
+ s = "<" + strconv.Itoa(t.GetTokenType()) + ">"
+ }
+ }
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+
+ return "'" + s + "'"
+}
+
+func (b *BaseRecognizer) GetErrorListenerDispatch() ErrorListener {
+ return NewProxyErrorListener(b.listeners)
+}
+
+// subclass needs to override these if there are sempreds or actions
+// that the ATN interp needs to execute
+func (b *BaseRecognizer) Sempred(localctx RuleContext, ruleIndex int, actionIndex int) bool {
+ return true
+}
+
+func (b *BaseRecognizer) Precpred(localctx RuleContext, precedence int) bool {
+ return true
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
new file mode 100644
index 000000000..600cf8c06
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/rule_context.go
@@ -0,0 +1,114 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// A rule context is a record of a single rule invocation. It knows
+// which context invoked it, if any. If there is no parent context, then
+// naturally the invoking state is not valid. The parent link
+// provides a chain upwards from the current rule invocation to the root
+// of the invocation tree, forming a stack. We actually carry no
+// information about the rule associated with b context (except
+// when parsing). We keep only the state number of the invoking state from
+// the ATN submachine that invoked b. Contrast b with the s
+// pointer inside ParserRuleContext that tracks the current state
+// being "executed" for the current rule.
+//
+// The parent contexts are useful for computing lookahead sets and
+// getting error information.
+//
+// These objects are used during parsing and prediction.
+// For the special case of parsers, we use the subclass
+// ParserRuleContext.
+//
+// @see ParserRuleContext
+//
+
+type RuleContext interface {
+ RuleNode
+
+ GetInvokingState() int
+ SetInvokingState(int)
+
+ GetRuleIndex() int
+ IsEmpty() bool
+
+ GetAltNumber() int
+ SetAltNumber(altNumber int)
+
+ String([]string, RuleContext) string
+}
+
+type BaseRuleContext struct {
+ parentCtx RuleContext
+ invokingState int
+ RuleIndex int
+}
+
+func NewBaseRuleContext(parent RuleContext, invokingState int) *BaseRuleContext {
+
+ rn := new(BaseRuleContext)
+
+ // What context invoked b rule?
+ rn.parentCtx = parent
+
+ // What state invoked the rule associated with b context?
+ // The "return address" is the followState of invokingState
+ // If parent is nil, b should be -1.
+ if parent == nil {
+ rn.invokingState = -1
+ } else {
+ rn.invokingState = invokingState
+ }
+
+ return rn
+}
+
+func (b *BaseRuleContext) GetBaseRuleContext() *BaseRuleContext {
+ return b
+}
+
+func (b *BaseRuleContext) SetParent(v Tree) {
+ if v == nil {
+ b.parentCtx = nil
+ } else {
+ b.parentCtx = v.(RuleContext)
+ }
+}
+
+func (b *BaseRuleContext) GetInvokingState() int {
+ return b.invokingState
+}
+
+func (b *BaseRuleContext) SetInvokingState(t int) {
+ b.invokingState = t
+}
+
+func (b *BaseRuleContext) GetRuleIndex() int {
+ return b.RuleIndex
+}
+
+func (b *BaseRuleContext) GetAltNumber() int {
+ return ATNInvalidAltNumber
+}
+
+func (b *BaseRuleContext) SetAltNumber(altNumber int) {}
+
+// A context is empty if there is no invoking state meaning nobody call
+// current context.
+func (b *BaseRuleContext) IsEmpty() bool {
+ return b.invokingState == -1
+}
+
+// Return the combined text of all child nodes. This method only considers
+// tokens which have been added to the parse tree.
+//
+// Since tokens on hidden channels (e.g. whitespace or comments) are not
+// added to the parse trees, they will not appear in the output of b
+// method.
+//
+
+func (b *BaseRuleContext) GetParent() Tree {
+ return b.parentCtx
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
new file mode 100644
index 000000000..9ada43077
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/semantic_context.go
@@ -0,0 +1,466 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "fmt"
+ "strconv"
+)
+
+// A tree structure used to record the semantic context in which
+// an ATN configuration is valid. It's either a single predicate,
+// a conjunction {@code p1&&p2}, or a sum of products {@code p1||p2}.
+//
+//
I have scoped the {@link AND}, {@link OR}, and {@link Predicate} subclasses of
+// {@link SemanticContext} within the scope of this outer class.
+//
+
+type SemanticContext interface {
+ comparable
+
+ evaluate(parser Recognizer, outerContext RuleContext) bool
+ evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext
+
+ hash() int
+ String() string
+}
+
+func SemanticContextandContext(a, b SemanticContext) SemanticContext {
+ if a == nil || a == SemanticContextNone {
+ return b
+ }
+ if b == nil || b == SemanticContextNone {
+ return a
+ }
+ result := NewAND(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+func SemanticContextorContext(a, b SemanticContext) SemanticContext {
+ if a == nil {
+ return b
+ }
+ if b == nil {
+ return a
+ }
+ if a == SemanticContextNone || b == SemanticContextNone {
+ return SemanticContextNone
+ }
+ result := NewOR(a, b)
+ if len(result.opnds) == 1 {
+ return result.opnds[0]
+ }
+
+ return result
+}
+
+type Predicate struct {
+ ruleIndex int
+ predIndex int
+ isCtxDependent bool
+}
+
+func NewPredicate(ruleIndex, predIndex int, isCtxDependent bool) *Predicate {
+ p := new(Predicate)
+
+ p.ruleIndex = ruleIndex
+ p.predIndex = predIndex
+ p.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ return p
+}
+
+//The default {@link SemanticContext}, which is semantically equivalent to
+//a predicate of the form {@code {true}?}.
+
+var SemanticContextNone SemanticContext = NewPredicate(-1, -1, false)
+
+func (p *Predicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ return p
+}
+
+func (p *Predicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+
+ var localctx RuleContext
+
+ if p.isCtxDependent {
+ localctx = outerContext
+ }
+
+ return parser.Sempred(localctx, p.ruleIndex, p.predIndex)
+}
+
+func (p *Predicate) equals(other interface{}) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*Predicate); !ok {
+ return false
+ } else {
+ return p.ruleIndex == other.(*Predicate).ruleIndex &&
+ p.predIndex == other.(*Predicate).predIndex &&
+ p.isCtxDependent == other.(*Predicate).isCtxDependent
+ }
+}
+
+func (p *Predicate) hash() int {
+ h := murmurInit(0)
+ h = murmurUpdate(h, p.ruleIndex)
+ h = murmurUpdate(h, p.predIndex)
+ if p.isCtxDependent {
+ h = murmurUpdate(h, 1)
+ } else {
+ h = murmurUpdate(h, 0)
+ }
+ return murmurFinish(h, 3)
+}
+
+func (p *Predicate) String() string {
+ return "{" + strconv.Itoa(p.ruleIndex) + ":" + strconv.Itoa(p.predIndex) + "}?"
+}
+
+type PrecedencePredicate struct {
+ precedence int
+}
+
+func NewPrecedencePredicate(precedence int) *PrecedencePredicate {
+
+ p := new(PrecedencePredicate)
+ p.precedence = precedence
+
+ return p
+}
+
+func (p *PrecedencePredicate) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ return parser.Precpred(outerContext, p.precedence)
+}
+
+func (p *PrecedencePredicate) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ if parser.Precpred(outerContext, p.precedence) {
+ return SemanticContextNone
+ }
+
+ return nil
+}
+
+func (p *PrecedencePredicate) compareTo(other *PrecedencePredicate) int {
+ return p.precedence - other.precedence
+}
+
+func (p *PrecedencePredicate) equals(other interface{}) bool {
+ if p == other {
+ return true
+ } else if _, ok := other.(*PrecedencePredicate); !ok {
+ return false
+ } else {
+ return p.precedence == other.(*PrecedencePredicate).precedence
+ }
+}
+
+func (p *PrecedencePredicate) hash() int {
+ h := uint32(1)
+ h = 31*h + uint32(p.precedence)
+ return int(h)
+}
+
+func (p *PrecedencePredicate) String() string {
+ return "{" + strconv.Itoa(p.precedence) + ">=prec}?"
+}
+
+func PrecedencePredicatefilterPrecedencePredicates(set Set) []*PrecedencePredicate {
+ result := make([]*PrecedencePredicate, 0)
+
+ set.Each(func(v interface{}) bool {
+ if c2, ok := v.(*PrecedencePredicate); ok {
+ result = append(result, c2)
+ }
+ return true
+ })
+
+ return result
+}
+
+// A semantic context which is true whenever none of the contained contexts
+// is false.`
+
+type AND struct {
+ opnds []SemanticContext
+}
+
+func NewAND(a, b SemanticContext) *AND {
+
+ operands := newArray2DHashSet(nil, nil)
+ if aa, ok := a.(*AND); ok {
+ for _, o := range aa.opnds {
+ operands.Add(o)
+ }
+ } else {
+ operands.Add(a)
+ }
+
+ if ba, ok := b.(*AND); ok {
+ for _, o := range ba.opnds {
+ operands.Add(o)
+ }
+ } else {
+ operands.Add(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence < reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Add(reduced)
+ }
+
+ vs := operands.Values()
+ opnds := make([]SemanticContext, len(vs))
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
+
+ and := new(AND)
+ and.opnds = opnds
+
+ return and
+}
+
+func (a *AND) equals(other interface{}) bool {
+ if a == other {
+ return true
+ } else if _, ok := other.(*AND); !ok {
+ return false
+ } else {
+ for i, v := range other.(*AND).opnds {
+ if !a.opnds[i].equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// {@inheritDoc}
+//
+//
+// The evaluation of predicates by a context is short-circuiting, but
+// unordered.
+//
+func (a *AND) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(a.opnds); i++ {
+ if !a.opnds[i].evaluate(parser, outerContext) {
+ return false
+ }
+ }
+ return true
+}
+
+func (a *AND) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+
+ for i := 0; i < len(a.opnds); i++ {
+ context := a.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == nil {
+ // The AND context is false if any element is false
+ return nil
+ } else if evaluated != SemanticContextNone {
+ // Reduce the result by Skipping true elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return a
+ }
+
+ if len(operands) == 0 {
+ // all elements were true, so the AND context is true
+ return SemanticContextNone
+ }
+
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextandContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (a *AND) hash() int {
+ h := murmurInit(37) // Init with a value different from OR
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (a *OR) hash() int {
+ h := murmurInit(41) // Init with a value different from AND
+ for _, op := range a.opnds {
+ h = murmurUpdate(h, op.hash())
+ }
+ return murmurFinish(h, len(a.opnds))
+}
+
+func (a *AND) String() string {
+ s := ""
+
+ for _, o := range a.opnds {
+ s += "&& " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
+
+//
+// A semantic context which is true whenever at least one of the contained
+// contexts is true.
+//
+
+type OR struct {
+ opnds []SemanticContext
+}
+
+func NewOR(a, b SemanticContext) *OR {
+
+ operands := newArray2DHashSet(nil, nil)
+ if aa, ok := a.(*OR); ok {
+ for _, o := range aa.opnds {
+ operands.Add(o)
+ }
+ } else {
+ operands.Add(a)
+ }
+
+ if ba, ok := b.(*OR); ok {
+ for _, o := range ba.opnds {
+ operands.Add(o)
+ }
+ } else {
+ operands.Add(b)
+ }
+ precedencePredicates := PrecedencePredicatefilterPrecedencePredicates(operands)
+ if len(precedencePredicates) > 0 {
+ // interested in the transition with the lowest precedence
+ var reduced *PrecedencePredicate
+
+ for _, p := range precedencePredicates {
+ if reduced == nil || p.precedence > reduced.precedence {
+ reduced = p
+ }
+ }
+
+ operands.Add(reduced)
+ }
+
+ vs := operands.Values()
+
+ opnds := make([]SemanticContext, len(vs))
+ for i, v := range vs {
+ opnds[i] = v.(SemanticContext)
+ }
+
+ o := new(OR)
+ o.opnds = opnds
+
+ return o
+}
+
+func (o *OR) equals(other interface{}) bool {
+ if o == other {
+ return true
+ } else if _, ok := other.(*OR); !ok {
+ return false
+ } else {
+ for i, v := range other.(*OR).opnds {
+ if !o.opnds[i].equals(v) {
+ return false
+ }
+ }
+ return true
+ }
+}
+
+//
+// The evaluation of predicates by o context is short-circuiting, but
+// unordered.
+//
+func (o *OR) evaluate(parser Recognizer, outerContext RuleContext) bool {
+ for i := 0; i < len(o.opnds); i++ {
+ if o.opnds[i].evaluate(parser, outerContext) {
+ return true
+ }
+ }
+ return false
+}
+
+func (o *OR) evalPrecedence(parser Recognizer, outerContext RuleContext) SemanticContext {
+ differs := false
+ operands := make([]SemanticContext, 0)
+ for i := 0; i < len(o.opnds); i++ {
+ context := o.opnds[i]
+ evaluated := context.evalPrecedence(parser, outerContext)
+ differs = differs || (evaluated != context)
+ if evaluated == SemanticContextNone {
+ // The OR context is true if any element is true
+ return SemanticContextNone
+ } else if evaluated != nil {
+ // Reduce the result by Skipping false elements
+ operands = append(operands, evaluated)
+ }
+ }
+ if !differs {
+ return o
+ }
+ if len(operands) == 0 {
+ // all elements were false, so the OR context is false
+ return nil
+ }
+ var result SemanticContext
+
+ for _, o := range operands {
+ if result == nil {
+ result = o
+ } else {
+ result = SemanticContextorContext(result, o)
+ }
+ }
+
+ return result
+}
+
+func (o *OR) String() string {
+ s := ""
+
+ for _, o := range o.opnds {
+ s += "|| " + fmt.Sprint(o)
+ }
+
+ if len(s) > 3 {
+ return s[0:3]
+ }
+
+ return s
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
new file mode 100644
index 000000000..2d8e99095
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token.go
@@ -0,0 +1,210 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "strconv"
+ "strings"
+)
+
+type TokenSourceCharStreamPair struct {
+ tokenSource TokenSource
+ charStream CharStream
+}
+
+// A token has properties: text, type, line, character position in the line
+// (so we can ignore tabs), token channel, index, and source from which
+// we obtained this token.
+
+type Token interface {
+ GetSource() *TokenSourceCharStreamPair
+ GetTokenType() int
+ GetChannel() int
+ GetStart() int
+ GetStop() int
+ GetLine() int
+ GetColumn() int
+
+ GetText() string
+ SetText(s string)
+
+ GetTokenIndex() int
+ SetTokenIndex(v int)
+
+ GetTokenSource() TokenSource
+ GetInputStream() CharStream
+}
+
+type BaseToken struct {
+ source *TokenSourceCharStreamPair
+ tokenType int // token type of the token
+ channel int // The parser ignores everything not on DEFAULT_CHANNEL
+ start int // optional return -1 if not implemented.
+ stop int // optional return -1 if not implemented.
+ tokenIndex int // from 0..n-1 of the token object in the input stream
+ line int // line=1..n of the 1st character
+ column int // beginning of the line at which it occurs, 0..n-1
+ text string // text of the token.
+ readOnly bool
+}
+
+const (
+ TokenInvalidType = 0
+
+ // During lookahead operations, this "token" signifies we hit rule end ATN state
+ // and did not follow it despite needing to.
+ TokenEpsilon = -2
+
+ TokenMinUserTokenType = 1
+
+ TokenEOF = -1
+
+ // All tokens go to the parser (unless Skip() is called in that rule)
+ // on a particular "channel". The parser tunes to a particular channel
+ // so that whitespace etc... can go to the parser on a "hidden" channel.
+
+ TokenDefaultChannel = 0
+
+ // Anything on different channel than DEFAULT_CHANNEL is not parsed
+ // by parser.
+
+ TokenHiddenChannel = 1
+)
+
+func (b *BaseToken) GetChannel() int {
+ return b.channel
+}
+
+func (b *BaseToken) GetStart() int {
+ return b.start
+}
+
+func (b *BaseToken) GetStop() int {
+ return b.stop
+}
+
+func (b *BaseToken) GetLine() int {
+ return b.line
+}
+
+func (b *BaseToken) GetColumn() int {
+ return b.column
+}
+
+func (b *BaseToken) GetTokenType() int {
+ return b.tokenType
+}
+
+func (b *BaseToken) GetSource() *TokenSourceCharStreamPair {
+ return b.source
+}
+
+func (b *BaseToken) GetTokenIndex() int {
+ return b.tokenIndex
+}
+
+func (b *BaseToken) SetTokenIndex(v int) {
+ b.tokenIndex = v
+}
+
+func (b *BaseToken) GetTokenSource() TokenSource {
+ return b.source.tokenSource
+}
+
+func (b *BaseToken) GetInputStream() CharStream {
+ return b.source.charStream
+}
+
+type CommonToken struct {
+ *BaseToken
+}
+
+func NewCommonToken(source *TokenSourceCharStreamPair, tokenType, channel, start, stop int) *CommonToken {
+
+ t := new(CommonToken)
+
+ t.BaseToken = new(BaseToken)
+
+ t.source = source
+ t.tokenType = tokenType
+ t.channel = channel
+ t.start = start
+ t.stop = stop
+ t.tokenIndex = -1
+ if t.source.tokenSource != nil {
+ t.line = source.tokenSource.GetLine()
+ t.column = source.tokenSource.GetCharPositionInLine()
+ } else {
+ t.column = -1
+ }
+ return t
+}
+
+// An empty {@link Pair} which is used as the default value of
+// {@link //source} for tokens that do not have a source.
+
+//CommonToken.EMPTY_SOURCE = [ nil, nil ]
+
+// Constructs a New{@link CommonToken} as a copy of another {@link Token}.
+//
+//
+// If {@code oldToken} is also a {@link CommonToken} instance, the newly
+// constructed token will share a reference to the {@link //text} field and
+// the {@link Pair} stored in {@link //source}. Otherwise, {@link //text} will
+// be assigned the result of calling {@link //GetText}, and {@link //source}
+// will be constructed from the result of {@link Token//GetTokenSource} and
+// {@link Token//GetInputStream}.
+//
+// @param oldToken The token to copy.
+//
+func (c *CommonToken) clone() *CommonToken {
+ t := NewCommonToken(c.source, c.tokenType, c.channel, c.start, c.stop)
+ t.tokenIndex = c.GetTokenIndex()
+ t.line = c.GetLine()
+ t.column = c.GetColumn()
+ t.text = c.GetText()
+ return t
+}
+
+func (c *CommonToken) GetText() string {
+ if c.text != "" {
+ return c.text
+ }
+ input := c.GetInputStream()
+ if input == nil {
+ return ""
+ }
+ n := input.Size()
+ if c.start < n && c.stop < n {
+ return input.GetTextFromInterval(NewInterval(c.start, c.stop))
+ }
+ return ""
+}
+
+func (c *CommonToken) SetText(text string) {
+ c.text = text
+}
+
+func (c *CommonToken) String() string {
+ txt := c.GetText()
+ if txt != "" {
+ txt = strings.Replace(txt, "\n", "\\n", -1)
+ txt = strings.Replace(txt, "\r", "\\r", -1)
+ txt = strings.Replace(txt, "\t", "\\t", -1)
+ } else {
+ txt = ""
+ }
+
+ var ch string
+ if c.channel > 0 {
+ ch = ",channel=" + strconv.Itoa(c.channel)
+ } else {
+ ch = ""
+ }
+
+ return "[@" + strconv.Itoa(c.tokenIndex) + "," + strconv.Itoa(c.start) + ":" + strconv.Itoa(c.stop) + "='" +
+ txt + "',<" + strconv.Itoa(c.tokenType) + ">" +
+ ch + "," + strconv.Itoa(c.line) + ":" + strconv.Itoa(c.column) + "]"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
new file mode 100644
index 000000000..e023978fe
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_source.go
@@ -0,0 +1,17 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenSource interface {
+ NextToken() Token
+ Skip()
+ More()
+ GetLine() int
+ GetCharPositionInLine() int
+ GetInputStream() CharStream
+ GetSourceName() string
+ setTokenFactory(factory TokenFactory)
+ GetTokenFactory() TokenFactory
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
new file mode 100644
index 000000000..df92c8147
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/token_stream.go
@@ -0,0 +1,20 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+type TokenStream interface {
+ IntStream
+
+ LT(k int) Token
+
+ Get(index int) Token
+ GetTokenSource() TokenSource
+ SetTokenSource(TokenSource)
+
+ GetAllText() string
+ GetTextFromInterval(*Interval) string
+ GetTextFromRuleContext(RuleContext) string
+ GetTextFromTokens(Token, Token) string
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
new file mode 100644
index 000000000..96a03f02a
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tokenstream_rewriter.go
@@ -0,0 +1,649 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+package antlr
+
+import (
+"bytes"
+"fmt"
+)
+
+
+//
+// Useful for rewriting out a buffered input token stream after doing some
+// augmentation or other manipulations on it.
+
+//
+// You can insert stuff, replace, and delete chunks. Note that the operations
+// are done lazily--only if you convert the buffer to a {@link String} with
+// {@link TokenStream#getText()}. This is very efficient because you are not
+// moving data around all the time. As the buffer of tokens is converted to
+// strings, the {@link #getText()} method(s) scan the input token stream and
+// check to see if there is an operation at the current index. If so, the
+// operation is done and then normal {@link String} rendering continues on the
+// buffer. This is like having multiple Turing machine instruction streams
+// (programs) operating on a single input tape. :)
+//
+
+// This rewriter makes no modifications to the token stream. It does not ask the
+// stream to fill itself up nor does it advance the input cursor. The token
+// stream {@link TokenStream#index()} will return the same value before and
+// after any {@link #getText()} call.
+
+//
+// The rewriter only works on tokens that you have in the buffer and ignores the
+// current input cursor. If you are buffering tokens on-demand, calling
+// {@link #getText()} halfway through the input will only do rewrites for those
+// tokens in the first half of the file.
+
+//
+// Since the operations are done lazily at {@link #getText}-time, operations do
+// not screw up the token index values. That is, an insert operation at token
+// index {@code i} does not change the index values for tokens
+// {@code i}+1..n-1.
+
+//
+// Because operations never actually alter the buffer, you may always get the
+// original token stream back without undoing anything. Since the instructions
+// are queued up, you can easily simulate transactions and roll back any changes
+// if there is an error just by removing instructions. For example,
+
+//
+// CharStream input = new ANTLRFileStream("input");
+// TLexer lex = new TLexer(input);
+// CommonTokenStream tokens = new CommonTokenStream(lex);
+// T parser = new T(tokens);
+// TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
+// parser.startRule();
+//
+
+//
+// Then in the rules, you can execute (assuming rewriter is visible):
+
+//
+// Token t,u;
+// ...
+// rewriter.insertAfter(t, "text to put after t");}
+// rewriter.insertAfter(u, "text after u");}
+// System.out.println(rewriter.getText());
+//
+
+//
+// You can also have multiple "instruction streams" and get multiple rewrites
+// from a single pass over the input. Just name the instruction streams and use
+// that name again when printing the buffer. This could be useful for generating
+// a C file and also its header file--all from the same buffer:
+
+//
+// rewriter.insertAfter("pass1", t, "text to put after t");}
+// rewriter.insertAfter("pass2", u, "text after u");}
+// System.out.println(rewriter.getText("pass1"));
+// System.out.println(rewriter.getText("pass2"));
+//
+
+//
+// If you don't use named rewrite streams, a "default" stream is used as the
+// first example shows.
+
+
+
+const(
+ Default_Program_Name = "default"
+ Program_Init_Size = 100
+ Min_Token_Index = 0
+)
+
+// Define the rewrite operation hierarchy
+
+type RewriteOperation interface {
+ // Execute the rewrite operation by possibly adding to the buffer.
+ // Return the index of the next token to operate on.
+ Execute(buffer *bytes.Buffer) int
+ String() string
+ GetInstructionIndex() int
+ GetIndex() int
+ GetText() string
+ GetOpName() string
+ GetTokens() TokenStream
+ SetInstructionIndex(val int)
+ SetIndex(int)
+ SetText(string)
+ SetOpName(string)
+ SetTokens(TokenStream)
+}
+
+type BaseRewriteOperation struct {
+ //Current index of rewrites list
+ instruction_index int
+ //Token buffer index
+ index int
+ //Substitution text
+ text string
+ //Actual operation name
+ op_name string
+ //Pointer to token steam
+ tokens TokenStream
+}
+
+func (op *BaseRewriteOperation)GetInstructionIndex() int{
+ return op.instruction_index
+}
+
+func (op *BaseRewriteOperation)GetIndex() int{
+ return op.index
+}
+
+func (op *BaseRewriteOperation)GetText() string{
+ return op.text
+}
+
+func (op *BaseRewriteOperation)GetOpName() string{
+ return op.op_name
+}
+
+func (op *BaseRewriteOperation)GetTokens() TokenStream{
+ return op.tokens
+}
+
+func (op *BaseRewriteOperation)SetInstructionIndex(val int){
+ op.instruction_index = val
+}
+
+func (op *BaseRewriteOperation)SetIndex(val int) {
+ op.index = val
+}
+
+func (op *BaseRewriteOperation)SetText(val string){
+ op.text = val
+}
+
+func (op *BaseRewriteOperation)SetOpName(val string){
+ op.op_name = val
+}
+
+func (op *BaseRewriteOperation)SetTokens(val TokenStream) {
+ op.tokens = val
+}
+
+
+func (op *BaseRewriteOperation) Execute(buffer *bytes.Buffer) int{
+ return op.index
+}
+
+func (op *BaseRewriteOperation) String() string {
+ return fmt.Sprintf("<%s@%d:\"%s\">",
+ op.op_name,
+ op.tokens.Get(op.GetIndex()),
+ op.text,
+ )
+
+}
+
+
+type InsertBeforeOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertBeforeOp(index int, text string, stream TokenStream) *InsertBeforeOp{
+ return &InsertBeforeOp{BaseRewriteOperation:BaseRewriteOperation{
+ index:index,
+ text:text,
+ op_name:"InsertBeforeOp",
+ tokens:stream,
+ }}
+}
+
+func (op *InsertBeforeOp) Execute(buffer *bytes.Buffer) int{
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index+1
+}
+
+func (op *InsertBeforeOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// Distinguish between insert after/before to do the "insert afters"
+// first and then the "insert befores" at same index. Implementation
+// of "insert after" is "insert before index+1".
+
+type InsertAfterOp struct {
+ BaseRewriteOperation
+}
+
+func NewInsertAfterOp(index int, text string, stream TokenStream) *InsertAfterOp{
+ return &InsertAfterOp{BaseRewriteOperation:BaseRewriteOperation{
+ index:index+1,
+ text:text,
+ tokens:stream,
+ }}
+}
+
+func (op *InsertAfterOp) Execute(buffer *bytes.Buffer) int {
+ buffer.WriteString(op.text)
+ if op.tokens.Get(op.index).GetTokenType() != TokenEOF{
+ buffer.WriteString(op.tokens.Get(op.index).GetText())
+ }
+ return op.index+1
+}
+
+func (op *InsertAfterOp) String() string {
+ return op.BaseRewriteOperation.String()
+}
+
+// I'm going to try replacing range from x..y with (y-x)+1 ReplaceOp
+// instructions.
+type ReplaceOp struct{
+ BaseRewriteOperation
+ LastIndex int
+}
+
+func NewReplaceOp(from, to int, text string, stream TokenStream)*ReplaceOp {
+ return &ReplaceOp{
+ BaseRewriteOperation:BaseRewriteOperation{
+ index:from,
+ text:text,
+ op_name:"ReplaceOp",
+ tokens:stream,
+ },
+ LastIndex:to,
+ }
+}
+
+func (op *ReplaceOp)Execute(buffer *bytes.Buffer) int{
+ if op.text != ""{
+ buffer.WriteString(op.text)
+ }
+ return op.LastIndex +1
+}
+
+func (op *ReplaceOp) String() string {
+ if op.text == "" {
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex))
+ }
+ return fmt.Sprintf("",
+ op.tokens.Get(op.index), op.tokens.Get(op.LastIndex), op.text)
+}
+
+
+type TokenStreamRewriter struct {
+ //Our source stream
+ tokens TokenStream
+ // You may have multiple, named streams of rewrite operations.
+ // I'm calling these things "programs."
+ // Maps String (name) → rewrite (List)
+ programs map[string][]RewriteOperation
+ last_rewrite_token_indexes map[string]int
+}
+
+func NewTokenStreamRewriter(tokens TokenStream) *TokenStreamRewriter{
+ return &TokenStreamRewriter{
+ tokens: tokens,
+ programs: map[string][]RewriteOperation{
+ Default_Program_Name:make([]RewriteOperation,0, Program_Init_Size),
+ },
+ last_rewrite_token_indexes: map[string]int{},
+ }
+}
+
+func (tsr *TokenStreamRewriter) GetTokenStream() TokenStream{
+ return tsr.tokens
+}
+
+// Rollback the instruction stream for a program so that
+// the indicated instruction (via instructionIndex) is no
+// longer in the stream. UNTESTED!
+func (tsr *TokenStreamRewriter) Rollback(program_name string, instruction_index int){
+ is, ok := tsr.programs[program_name]
+ if ok{
+ tsr.programs[program_name] = is[Min_Token_Index:instruction_index]
+ }
+}
+
+func (tsr *TokenStreamRewriter) RollbackDefault(instruction_index int){
+ tsr.Rollback(Default_Program_Name, instruction_index)
+}
+//Reset the program so that no instructions exist
+func (tsr *TokenStreamRewriter) DeleteProgram(program_name string){
+ tsr.Rollback(program_name, Min_Token_Index) //TODO: double test on that cause lower bound is not included
+}
+
+func (tsr *TokenStreamRewriter) DeleteProgramDefault(){
+ tsr.DeleteProgram(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfter(program_name string, index int, text string){
+ // to insert after, just insert before next index (even if past end)
+ var op RewriteOperation = NewInsertAfterOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterDefault(index int, text string){
+ tsr.InsertAfter(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertAfterToken(program_name string, token Token, text string){
+ tsr.InsertAfter(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr* TokenStreamRewriter) InsertBefore(program_name string, index int, text string){
+ var op RewriteOperation = NewInsertBeforeOp(index, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeDefault(index int, text string){
+ tsr.InsertBefore(Default_Program_Name, index, text)
+}
+
+func (tsr *TokenStreamRewriter) InsertBeforeToken(program_name string,token Token, text string){
+ tsr.InsertBefore(program_name, token.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter) Replace(program_name string, from, to int, text string){
+ if from > to || from < 0 || to < 0 || to >= tsr.tokens.Size(){
+ panic(fmt.Sprintf("replace: range invalid: %d..%d(size=%d)",
+ from, to, tsr.tokens.Size()))
+ }
+ var op RewriteOperation = NewReplaceOp(from, to, text, tsr.tokens)
+ rewrites := tsr.GetProgram(program_name)
+ op.SetInstructionIndex(len(rewrites))
+ tsr.AddToProgram(program_name, op)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceDefault(from, to int, text string) {
+ tsr.Replace(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceDefaultPos(index int, text string){
+ tsr.ReplaceDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceToken(program_name string, from, to Token, text string){
+ tsr.Replace(program_name, from.GetTokenIndex(), to.GetTokenIndex(), text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceTokenDefault(from, to Token, text string){
+ tsr.ReplaceToken(Default_Program_Name, from, to, text)
+}
+
+func (tsr *TokenStreamRewriter)ReplaceTokenDefaultPos(index Token, text string){
+ tsr.ReplaceTokenDefault(index, index, text)
+}
+
+func (tsr *TokenStreamRewriter)Delete(program_name string, from, to int){
+ tsr.Replace(program_name, from, to, "" )
+}
+
+func (tsr *TokenStreamRewriter)DeleteDefault(from, to int){
+ tsr.Delete(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter)DeleteDefaultPos(index int){
+ tsr.DeleteDefault(index,index)
+}
+
+func (tsr *TokenStreamRewriter)DeleteToken(program_name string, from, to Token) {
+ tsr.ReplaceToken(program_name, from, to, "")
+}
+
+func (tsr *TokenStreamRewriter)DeleteTokenDefault(from,to Token){
+ tsr.DeleteToken(Default_Program_Name, from, to)
+}
+
+func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndex(program_name string)int {
+ i, ok := tsr.last_rewrite_token_indexes[program_name]
+ if !ok{
+ return -1
+ }
+ return i
+}
+
+func (tsr *TokenStreamRewriter)GetLastRewriteTokenIndexDefault()int{
+ return tsr.GetLastRewriteTokenIndex(Default_Program_Name)
+}
+
+func (tsr *TokenStreamRewriter)SetLastRewriteTokenIndex(program_name string, i int){
+ tsr.last_rewrite_token_indexes[program_name] = i
+}
+
+func (tsr *TokenStreamRewriter)InitializeProgram(name string)[]RewriteOperation{
+ is := make([]RewriteOperation, 0, Program_Init_Size)
+ tsr.programs[name] = is
+ return is
+}
+
+func (tsr *TokenStreamRewriter)AddToProgram(name string, op RewriteOperation){
+ is := tsr.GetProgram(name)
+ is = append(is, op)
+ tsr.programs[name] = is
+}
+
+func (tsr *TokenStreamRewriter)GetProgram(name string) []RewriteOperation {
+ is, ok := tsr.programs[name]
+ if !ok{
+ is = tsr.InitializeProgram(name)
+ }
+ return is
+}
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter)GetTextDefault() string{
+ return tsr.GetText(
+ Default_Program_Name,
+ NewInterval(0, tsr.tokens.Size()-1))
+}
+// Return the text from the original tokens altered per the
+// instructions given to this rewriter.
+func (tsr *TokenStreamRewriter)GetText(program_name string, interval *Interval) string {
+ rewrites := tsr.programs[program_name]
+ start := interval.Start
+ stop := interval.Stop
+ // ensure start/end are in range
+ stop = min(stop, tsr.tokens.Size()-1)
+ start = max(start,0)
+ if rewrites == nil || len(rewrites) == 0{
+ return tsr.tokens.GetTextFromInterval(interval) // no instructions to execute
+ }
+ buf := bytes.Buffer{}
+ // First, optimize instruction stream
+ indexToOp := reduceToSingleOperationPerIndex(rewrites)
+ // Walk buffer, executing instructions and emitting tokens
+ for i:=start; i<=stop && i= tsr.tokens.Size()-1 {buf.WriteString(op.GetText())}
+ }
+ }
+ return buf.String()
+}
+
+// We need to combine operations and report invalid operations (like
+// overlapping replaces that are not completed nested). Inserts to
+// same index need to be combined etc... Here are the cases:
+//
+// I.i.u I.j.v leave alone, nonoverlapping
+// I.i.u I.i.v combine: Iivu
+//
+// R.i-j.u R.x-y.v | i-j in x-y delete first R
+// R.i-j.u R.i-j.v delete first R
+// R.i-j.u R.x-y.v | x-y in i-j ERROR
+// R.i-j.u R.x-y.v | boundaries overlap ERROR
+//
+// Delete special case of replace (text==null):
+// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+//
+// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
+// we're not deleting i)
+// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
+// R.x-y.v I.i.u | i in x-y ERROR
+// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
+// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
+//
+// I.i.u = insert u before op @ index i
+// R.x-y.u = replace x-y indexed tokens with u
+//
+// First we need to examine replaces. For any replace op:
+//
+// 1. wipe out any insertions before op within that range.
+// 2. Drop any replace op before that is contained completely within
+// that range.
+// 3. Throw exception upon boundary overlap with any previous replace.
+//
+// Then we can deal with inserts:
+//
+// 1. for any inserts to same index, combine even if not adjacent.
+// 2. for any prior replace with same left boundary, combine this
+// insert with replace and delete this replace.
+// 3. throw exception if index in same range as previous replace
+//
+// Don't actually delete; make op null in list. Easier to walk list.
+// Later we can throw as we add to index → op map.
+//
+// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
+// inserted stuff would be before the replace range. But, if you
+// add tokens in front of a method body '{' and then delete the method
+// body, I think the stuff before the '{' you added should disappear too.
+//
+// Return a map from token index to operation.
+//
+func reduceToSingleOperationPerIndex(rewrites []RewriteOperation) map[int]RewriteOperation{
+ // WALK REPLACES
+ for i:=0; i < len(rewrites); i++{
+ op := rewrites[i]
+ if op == nil{continue}
+ rop, ok := op.(*ReplaceOp)
+ if !ok{continue}
+ // Wipe prior inserts within range
+ for j:=0; j rop.index && iop.index <=rop.LastIndex{
+ // delete insert as it's a no-op.
+ rewrites[iop.instruction_index] = nil
+ }
+ }
+ }
+ // Drop any prior replaces contained within
+ for j:=0; j=rop.index && prevop.LastIndex <= rop.LastIndex{
+ // delete replace as it's a no-op.
+ rewrites[prevop.instruction_index] = nil
+ continue
+ }
+ // throw exception unless disjoint or identical
+ disjoint := prevop.LastIndex < rop.index || prevop.index > rop.LastIndex
+ // Delete special case of replace (text==null):
+ // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
+ if prevop.text == "" && rop.text == "" && !disjoint{
+ rewrites[prevop.instruction_index] = nil
+ rop.index = min(prevop.index, rop.index)
+ rop.LastIndex = max(prevop.LastIndex, rop.LastIndex)
+ println("new rop" + rop.String()) //TODO: remove console write, taken from Java version
+ }else if !disjoint{
+ panic("replace op boundaries of " + rop.String() + " overlap with previous " + prevop.String())
+ }
+ }
+ }
+ }
+ // WALK INSERTS
+ for i:=0; i < len(rewrites); i++ {
+ op := rewrites[i]
+ if op == nil{continue}
+ //hack to replicate inheritance in composition
+ _, iok := rewrites[i].(*InsertBeforeOp)
+ _, aok := rewrites[i].(*InsertAfterOp)
+ if !iok && !aok{continue}
+ iop := rewrites[i]
+ // combine current insert with prior if any at same index
+ // deviating a bit from TokenStreamRewriter.java - hard to incorporate inheritance logic
+ for j:=0; j= rop.index && iop.GetIndex() <= rop.LastIndex{
+ panic("insert op "+iop.String()+" within boundaries of previous "+rop.String())
+ }
+ }
+ }
+ }
+ m := map[int]RewriteOperation{}
+ for i:=0; i < len(rewrites); i++{
+ op := rewrites[i]
+ if op == nil {continue}
+ if _, ok := m[op.GetIndex()]; ok{
+ panic("should only be one op per index")
+ }
+ m[op.GetIndex()] = op
+ }
+ return m
+}
+
+
+/*
+ Quick fixing Go lack of overloads
+ */
+
+func max(a,b int)int{
+ if a>b{
+ return a
+ }else {
+ return b
+ }
+}
+func min(a,b int)int{
+ if aThis is a one way link. It emanates from a state (usually via a list of
+// transitions) and has a target state.
+//
+// Since we never have to change the ATN transitions once we construct it,
+// the states. We'll use the term Edge for the DFA to distinguish them from
+// ATN transitions.
+
+type Transition interface {
+ getTarget() ATNState
+ setTarget(ATNState)
+ getIsEpsilon() bool
+ getLabel() *IntervalSet
+ getSerializationType() int
+ Matches(int, int, int) bool
+}
+
+type BaseTransition struct {
+ target ATNState
+ isEpsilon bool
+ label int
+ intervalSet *IntervalSet
+ serializationType int
+}
+
+func NewBaseTransition(target ATNState) *BaseTransition {
+
+ if target == nil {
+ panic("target cannot be nil.")
+ }
+
+ t := new(BaseTransition)
+
+ t.target = target
+ // Are we epsilon, action, sempred?
+ t.isEpsilon = false
+ t.intervalSet = nil
+
+ return t
+}
+
+func (t *BaseTransition) getTarget() ATNState {
+ return t.target
+}
+
+func (t *BaseTransition) setTarget(s ATNState) {
+ t.target = s
+}
+
+func (t *BaseTransition) getIsEpsilon() bool {
+ return t.isEpsilon
+}
+
+func (t *BaseTransition) getLabel() *IntervalSet {
+ return t.intervalSet
+}
+
+func (t *BaseTransition) getSerializationType() int {
+ return t.serializationType
+}
+
+func (t *BaseTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ panic("Not implemented")
+}
+
+const (
+ TransitionEPSILON = 1
+ TransitionRANGE = 2
+ TransitionRULE = 3
+ TransitionPREDICATE = 4 // e.g., {isType(input.LT(1))}?
+ TransitionATOM = 5
+ TransitionACTION = 6
+ TransitionSET = 7 // ~(A|B) or ~atom, wildcard, which convert to next 2
+ TransitionNOTSET = 8
+ TransitionWILDCARD = 9
+ TransitionPRECEDENCE = 10
+)
+
+var TransitionserializationNames = []string{
+ "INVALID",
+ "EPSILON",
+ "RANGE",
+ "RULE",
+ "PREDICATE",
+ "ATOM",
+ "ACTION",
+ "SET",
+ "NOT_SET",
+ "WILDCARD",
+ "PRECEDENCE",
+}
+
+//var TransitionserializationTypes struct {
+// EpsilonTransition int
+// RangeTransition int
+// RuleTransition int
+// PredicateTransition int
+// AtomTransition int
+// ActionTransition int
+// SetTransition int
+// NotSetTransition int
+// WildcardTransition int
+// PrecedencePredicateTransition int
+//}{
+// TransitionEPSILON,
+// TransitionRANGE,
+// TransitionRULE,
+// TransitionPREDICATE,
+// TransitionATOM,
+// TransitionACTION,
+// TransitionSET,
+// TransitionNOTSET,
+// TransitionWILDCARD,
+// TransitionPRECEDENCE
+//}
+
+// TODO: make all transitions sets? no, should remove set edges
+type AtomTransition struct {
+ *BaseTransition
+}
+
+func NewAtomTransition(target ATNState, intervalSet int) *AtomTransition {
+
+ t := new(AtomTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.label = intervalSet // The token type or character value or, signifies special intervalSet.
+ t.intervalSet = t.makeLabel()
+ t.serializationType = TransitionATOM
+
+ return t
+}
+
+func (t *AtomTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addOne(t.label)
+ return s
+}
+
+func (t *AtomTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return t.label == symbol
+}
+
+func (t *AtomTransition) String() string {
+ return strconv.Itoa(t.label)
+}
+
+type RuleTransition struct {
+ *BaseTransition
+
+ followState ATNState
+ ruleIndex, precedence int
+}
+
+func NewRuleTransition(ruleStart ATNState, ruleIndex, precedence int, followState ATNState) *RuleTransition {
+
+ t := new(RuleTransition)
+ t.BaseTransition = NewBaseTransition(ruleStart)
+
+ t.ruleIndex = ruleIndex
+ t.precedence = precedence
+ t.followState = followState
+ t.serializationType = TransitionRULE
+ t.isEpsilon = true
+
+ return t
+}
+
+func (t *RuleTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+type EpsilonTransition struct {
+ *BaseTransition
+
+ outermostPrecedenceReturn int
+}
+
+func NewEpsilonTransition(target ATNState, outermostPrecedenceReturn int) *EpsilonTransition {
+
+ t := new(EpsilonTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionEPSILON
+ t.isEpsilon = true
+ t.outermostPrecedenceReturn = outermostPrecedenceReturn
+ return t
+}
+
+func (t *EpsilonTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *EpsilonTransition) String() string {
+ return "epsilon"
+}
+
+type RangeTransition struct {
+ *BaseTransition
+
+ start, stop int
+}
+
+func NewRangeTransition(target ATNState, start, stop int) *RangeTransition {
+
+ t := new(RangeTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionRANGE
+ t.start = start
+ t.stop = stop
+ t.intervalSet = t.makeLabel()
+ return t
+}
+
+func (t *RangeTransition) makeLabel() *IntervalSet {
+ s := NewIntervalSet()
+ s.addRange(t.start, t.stop)
+ return s
+}
+
+func (t *RangeTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= t.start && symbol <= t.stop
+}
+
+func (t *RangeTransition) String() string {
+ var sb strings.Builder
+ sb.WriteByte('\'')
+ sb.WriteRune(rune(t.start))
+ sb.WriteString("'..'")
+ sb.WriteRune(rune(t.stop))
+ sb.WriteByte('\'')
+ return sb.String()
+}
+
+type AbstractPredicateTransition interface {
+ Transition
+ IAbstractPredicateTransitionFoo()
+}
+
+type BaseAbstractPredicateTransition struct {
+ *BaseTransition
+}
+
+func NewBasePredicateTransition(target ATNState) *BaseAbstractPredicateTransition {
+
+ t := new(BaseAbstractPredicateTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ return t
+}
+
+func (a *BaseAbstractPredicateTransition) IAbstractPredicateTransitionFoo() {}
+
+type PredicateTransition struct {
+ *BaseAbstractPredicateTransition
+
+ isCtxDependent bool
+ ruleIndex, predIndex int
+}
+
+func NewPredicateTransition(target ATNState, ruleIndex, predIndex int, isCtxDependent bool) *PredicateTransition {
+
+ t := new(PredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPREDICATE
+ t.ruleIndex = ruleIndex
+ t.predIndex = predIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
+}
+
+func (t *PredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *PredicateTransition) getPredicate() *Predicate {
+ return NewPredicate(t.ruleIndex, t.predIndex, t.isCtxDependent)
+}
+
+func (t *PredicateTransition) String() string {
+ return "pred_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.predIndex)
+}
+
+type ActionTransition struct {
+ *BaseTransition
+
+ isCtxDependent bool
+ ruleIndex, actionIndex, predIndex int
+}
+
+func NewActionTransition(target ATNState, ruleIndex, actionIndex int, isCtxDependent bool) *ActionTransition {
+
+ t := new(ActionTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionACTION
+ t.ruleIndex = ruleIndex
+ t.actionIndex = actionIndex
+ t.isCtxDependent = isCtxDependent // e.g., $i ref in pred
+ t.isEpsilon = true
+ return t
+}
+
+func (t *ActionTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *ActionTransition) String() string {
+ return "action_" + strconv.Itoa(t.ruleIndex) + ":" + strconv.Itoa(t.actionIndex)
+}
+
+type SetTransition struct {
+ *BaseTransition
+}
+
+func NewSetTransition(target ATNState, set *IntervalSet) *SetTransition {
+
+ t := new(SetTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionSET
+ if set != nil {
+ t.intervalSet = set
+ } else {
+ t.intervalSet = NewIntervalSet()
+ t.intervalSet.addOne(TokenInvalidType)
+ }
+
+ return t
+}
+
+func (t *SetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return t.intervalSet.contains(symbol)
+}
+
+func (t *SetTransition) String() string {
+ return t.intervalSet.String()
+}
+
+type NotSetTransition struct {
+ *SetTransition
+}
+
+func NewNotSetTransition(target ATNState, set *IntervalSet) *NotSetTransition {
+
+ t := new(NotSetTransition)
+
+ t.SetTransition = NewSetTransition(target, set)
+
+ t.serializationType = TransitionNOTSET
+
+ return t
+}
+
+func (t *NotSetTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol && !t.intervalSet.contains(symbol)
+}
+
+func (t *NotSetTransition) String() string {
+ return "~" + t.intervalSet.String()
+}
+
+type WildcardTransition struct {
+ *BaseTransition
+}
+
+func NewWildcardTransition(target ATNState) *WildcardTransition {
+
+ t := new(WildcardTransition)
+ t.BaseTransition = NewBaseTransition(target)
+
+ t.serializationType = TransitionWILDCARD
+ return t
+}
+
+func (t *WildcardTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
+}
+
+func (t *WildcardTransition) String() string {
+ return "."
+}
+
+type PrecedencePredicateTransition struct {
+ *BaseAbstractPredicateTransition
+
+ precedence int
+}
+
+func NewPrecedencePredicateTransition(target ATNState, precedence int) *PrecedencePredicateTransition {
+
+ t := new(PrecedencePredicateTransition)
+ t.BaseAbstractPredicateTransition = NewBasePredicateTransition(target)
+
+ t.serializationType = TransitionPRECEDENCE
+ t.precedence = precedence
+ t.isEpsilon = true
+
+ return t
+}
+
+func (t *PrecedencePredicateTransition) Matches(symbol, minVocabSymbol, maxVocabSymbol int) bool {
+ return false
+}
+
+func (t *PrecedencePredicateTransition) getPredicate() *PrecedencePredicate {
+ return NewPrecedencePredicate(t.precedence)
+}
+
+func (t *PrecedencePredicateTransition) String() string {
+ return fmt.Sprint(t.precedence) + " >= _p"
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
new file mode 100644
index 000000000..08ce22bba
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/tree.go
@@ -0,0 +1,256 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+// The basic notion of a tree has a parent, a payload, and a list of children.
+// It is the most abstract interface for all the trees used by ANTLR.
+///
+
+var TreeInvalidInterval = NewInterval(-1, -2)
+
+type Tree interface {
+ GetParent() Tree
+ SetParent(Tree)
+ GetPayload() interface{}
+ GetChild(i int) Tree
+ GetChildCount() int
+ GetChildren() []Tree
+}
+
+type SyntaxTree interface {
+ Tree
+
+ GetSourceInterval() *Interval
+}
+
+type ParseTree interface {
+ SyntaxTree
+
+ Accept(Visitor ParseTreeVisitor) interface{}
+ GetText() string
+
+ ToStringTree([]string, Recognizer) string
+}
+
+type RuleNode interface {
+ ParseTree
+
+ GetRuleContext() RuleContext
+ GetBaseRuleContext() *BaseRuleContext
+}
+
+type TerminalNode interface {
+ ParseTree
+
+ GetSymbol() Token
+}
+
+type ErrorNode interface {
+ TerminalNode
+
+ errorNode()
+}
+
+type ParseTreeVisitor interface {
+ Visit(tree ParseTree) interface{}
+ VisitChildren(node RuleNode) interface{}
+ VisitTerminal(node TerminalNode) interface{}
+ VisitErrorNode(node ErrorNode) interface{}
+}
+
+type BaseParseTreeVisitor struct{}
+
+var _ ParseTreeVisitor = &BaseParseTreeVisitor{}
+
+func (v *BaseParseTreeVisitor) Visit(tree ParseTree) interface{} { return tree.Accept(v) }
+func (v *BaseParseTreeVisitor) VisitChildren(node RuleNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitTerminal(node TerminalNode) interface{} { return nil }
+func (v *BaseParseTreeVisitor) VisitErrorNode(node ErrorNode) interface{} { return nil }
+
+// TODO
+//func (this ParseTreeVisitor) Visit(ctx) {
+// if (Utils.isArray(ctx)) {
+// self := this
+// return ctx.map(function(child) { return VisitAtom(self, child)})
+// } else {
+// return VisitAtom(this, ctx)
+// }
+//}
+//
+//func VisitAtom(Visitor, ctx) {
+// if (ctx.parser == nil) { //is terminal
+// return
+// }
+//
+// name := ctx.parser.ruleNames[ctx.ruleIndex]
+// funcName := "Visit" + Utils.titleCase(name)
+//
+// return Visitor[funcName](ctx)
+//}
+
+type ParseTreeListener interface {
+ VisitTerminal(node TerminalNode)
+ VisitErrorNode(node ErrorNode)
+ EnterEveryRule(ctx ParserRuleContext)
+ ExitEveryRule(ctx ParserRuleContext)
+}
+
+type BaseParseTreeListener struct{}
+
+var _ ParseTreeListener = &BaseParseTreeListener{}
+
+func (l *BaseParseTreeListener) VisitTerminal(node TerminalNode) {}
+func (l *BaseParseTreeListener) VisitErrorNode(node ErrorNode) {}
+func (l *BaseParseTreeListener) EnterEveryRule(ctx ParserRuleContext) {}
+func (l *BaseParseTreeListener) ExitEveryRule(ctx ParserRuleContext) {}
+
+type TerminalNodeImpl struct {
+ parentCtx RuleContext
+
+ symbol Token
+}
+
+var _ TerminalNode = &TerminalNodeImpl{}
+
+func NewTerminalNodeImpl(symbol Token) *TerminalNodeImpl {
+ tn := new(TerminalNodeImpl)
+
+ tn.parentCtx = nil
+ tn.symbol = symbol
+
+ return tn
+}
+
+func (t *TerminalNodeImpl) GetChild(i int) Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) GetChildren() []Tree {
+ return nil
+}
+
+func (t *TerminalNodeImpl) SetChildren(tree []Tree) {
+ panic("Cannot set children on terminal node")
+}
+
+func (t *TerminalNodeImpl) GetSymbol() Token {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetParent() Tree {
+ return t.parentCtx
+}
+
+func (t *TerminalNodeImpl) SetParent(tree Tree) {
+ t.parentCtx = tree.(RuleContext)
+}
+
+func (t *TerminalNodeImpl) GetPayload() interface{} {
+ return t.symbol
+}
+
+func (t *TerminalNodeImpl) GetSourceInterval() *Interval {
+ if t.symbol == nil {
+ return TreeInvalidInterval
+ }
+ tokenIndex := t.symbol.GetTokenIndex()
+ return NewInterval(tokenIndex, tokenIndex)
+}
+
+func (t *TerminalNodeImpl) GetChildCount() int {
+ return 0
+}
+
+func (t *TerminalNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitTerminal(t)
+}
+
+func (t *TerminalNodeImpl) GetText() string {
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) String() string {
+ if t.symbol.GetTokenType() == TokenEOF {
+ return ""
+ }
+
+ return t.symbol.GetText()
+}
+
+func (t *TerminalNodeImpl) ToStringTree(s []string, r Recognizer) string {
+ return t.String()
+}
+
+// Represents a token that was consumed during reSynchronization
+// rather than during a valid Match operation. For example,
+// we will create this kind of a node during single token insertion
+// and deletion as well as during "consume until error recovery set"
+// upon no viable alternative exceptions.
+
+type ErrorNodeImpl struct {
+ *TerminalNodeImpl
+}
+
+var _ ErrorNode = &ErrorNodeImpl{}
+
+func NewErrorNodeImpl(token Token) *ErrorNodeImpl {
+ en := new(ErrorNodeImpl)
+ en.TerminalNodeImpl = NewTerminalNodeImpl(token)
+ return en
+}
+
+func (e *ErrorNodeImpl) errorNode() {}
+
+func (e *ErrorNodeImpl) Accept(v ParseTreeVisitor) interface{} {
+ return v.VisitErrorNode(e)
+}
+
+type ParseTreeWalker struct {
+}
+
+func NewParseTreeWalker() *ParseTreeWalker {
+ return new(ParseTreeWalker)
+}
+
+// Performs a walk on the given parse tree starting at the root and going down recursively
+// with depth-first search. On each node, EnterRule is called before
+// recursively walking down into child nodes, then
+// ExitRule is called after the recursive call to wind up.
+func (p *ParseTreeWalker) Walk(listener ParseTreeListener, t Tree) {
+ switch tt := t.(type) {
+ case ErrorNode:
+ listener.VisitErrorNode(tt)
+ case TerminalNode:
+ listener.VisitTerminal(tt)
+ default:
+ p.EnterRule(listener, t.(RuleNode))
+ for i := 0; i < t.GetChildCount(); i++ {
+ child := t.GetChild(i)
+ p.Walk(listener, child)
+ }
+ p.ExitRule(listener, t.(RuleNode))
+ }
+}
+
+//
+// Enters a grammar rule by first triggering the generic event {@link ParseTreeListener//EnterEveryRule}
+// then by triggering the event specific to the given parse tree node
+//
+func (p *ParseTreeWalker) EnterRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ listener.EnterEveryRule(ctx)
+ ctx.EnterRule(listener)
+}
+
+// Exits a grammar rule by first triggering the event specific to the given parse tree node
+// then by triggering the generic event {@link ParseTreeListener//ExitEveryRule}
+//
+func (p *ParseTreeWalker) ExitRule(listener ParseTreeListener, r RuleNode) {
+ ctx := r.GetRuleContext().(ParserRuleContext)
+ ctx.ExitRule(listener)
+ listener.ExitEveryRule(ctx)
+}
+
+var ParseTreeWalkerDefault = NewParseTreeWalker()
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
new file mode 100644
index 000000000..80144ecad
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/trees.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import "fmt"
+
+/** A set of utility routines useful for all kinds of ANTLR trees. */
+
+// Print out a whole tree in LISP form. {@link //getNodeText} is used on the
+// node payloads to get the text for the nodes. Detect
+// parse trees and extract data appropriately.
+func TreesStringTree(tree Tree, ruleNames []string, recog Recognizer) string {
+
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ s := TreesGetNodeText(tree, ruleNames, nil)
+
+ s = EscapeWhitespace(s, false)
+ c := tree.GetChildCount()
+ if c == 0 {
+ return s
+ }
+ res := "(" + s + " "
+ if c > 0 {
+ s = TreesStringTree(tree.GetChild(0), ruleNames, nil)
+ res += s
+ }
+ for i := 1; i < c; i++ {
+ s = TreesStringTree(tree.GetChild(i), ruleNames, nil)
+ res += (" " + s)
+ }
+ res += ")"
+ return res
+}
+
+func TreesGetNodeText(t Tree, ruleNames []string, recog Parser) string {
+ if recog != nil {
+ ruleNames = recog.GetRuleNames()
+ }
+
+ if ruleNames != nil {
+ switch t2 := t.(type) {
+ case RuleNode:
+ t3 := t2.GetRuleContext()
+ altNumber := t3.GetAltNumber()
+
+ if altNumber != ATNInvalidAltNumber {
+ return fmt.Sprintf("%s:%d", ruleNames[t3.GetRuleIndex()], altNumber)
+ }
+ return ruleNames[t3.GetRuleIndex()]
+ case ErrorNode:
+ return fmt.Sprint(t2)
+ case TerminalNode:
+ if t2.GetSymbol() != nil {
+ return t2.GetSymbol().GetText()
+ }
+ }
+ }
+
+ // no recog for rule names
+ payload := t.GetPayload()
+ if p2, ok := payload.(Token); ok {
+ return p2.GetText()
+ }
+
+ return fmt.Sprint(t.GetPayload())
+}
+
+// Return ordered list of all children of this node
+func TreesGetChildren(t Tree) []Tree {
+ list := make([]Tree, 0)
+ for i := 0; i < t.GetChildCount(); i++ {
+ list = append(list, t.GetChild(i))
+ }
+ return list
+}
+
+// Return a list of all ancestors of this node. The first node of
+// list is the root and the last is the parent of this node.
+//
+func TreesgetAncestors(t Tree) []Tree {
+ ancestors := make([]Tree, 0)
+ t = t.GetParent()
+ for t != nil {
+ f := []Tree{t}
+ ancestors = append(f, ancestors...)
+ t = t.GetParent()
+ }
+ return ancestors
+}
+
+func TreesFindAllTokenNodes(t ParseTree, ttype int) []ParseTree {
+ return TreesfindAllNodes(t, ttype, true)
+}
+
+func TreesfindAllRuleNodes(t ParseTree, ruleIndex int) []ParseTree {
+ return TreesfindAllNodes(t, ruleIndex, false)
+}
+
+func TreesfindAllNodes(t ParseTree, index int, findTokens bool) []ParseTree {
+ nodes := make([]ParseTree, 0)
+ treesFindAllNodes(t, index, findTokens, &nodes)
+ return nodes
+}
+
+func treesFindAllNodes(t ParseTree, index int, findTokens bool, nodes *[]ParseTree) {
+ // check this node (the root) first
+
+ t2, ok := t.(TerminalNode)
+ t3, ok2 := t.(ParserRuleContext)
+
+ if findTokens && ok {
+ if t2.GetSymbol().GetTokenType() == index {
+ *nodes = append(*nodes, t2)
+ }
+ } else if !findTokens && ok2 {
+ if t3.GetRuleIndex() == index {
+ *nodes = append(*nodes, t3)
+ }
+ }
+ // check children
+ for i := 0; i < t.GetChildCount(); i++ {
+ treesFindAllNodes(t.GetChild(i).(ParseTree), index, findTokens, nodes)
+ }
+}
+
+func TreesDescendants(t ParseTree) []ParseTree {
+ nodes := []ParseTree{t}
+ for i := 0; i < t.GetChildCount(); i++ {
+ nodes = append(nodes, TreesDescendants(t.GetChild(i).(ParseTree))...)
+ }
+ return nodes
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
new file mode 100644
index 000000000..ec219df98
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils.go
@@ -0,0 +1,355 @@
+// Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
+// Use of this file is governed by the BSD 3-clause license that
+// can be found in the LICENSE.txt file in the project root.
+
+package antlr
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "math/bits"
+ "strconv"
+ "strings"
+)
+
+func intMin(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func intMax(a, b int) int {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// A simple integer stack
+
+type IntStack []int
+
+var ErrEmptyStack = errors.New("Stack is empty")
+
+func (s *IntStack) Pop() (int, error) {
+ l := len(*s) - 1
+ if l < 0 {
+ return 0, ErrEmptyStack
+ }
+ v := (*s)[l]
+ *s = (*s)[0:l]
+ return v, nil
+}
+
+func (s *IntStack) Push(e int) {
+ *s = append(*s, e)
+}
+
+func standardEqualsFunction(a interface{}, b interface{}) bool {
+
+ ac, oka := a.(comparable)
+ bc, okb := b.(comparable)
+
+ if !oka || !okb {
+ panic("Not Comparable")
+ }
+
+ return ac.equals(bc)
+}
+
+func standardHashFunction(a interface{}) int {
+ if h, ok := a.(hasher); ok {
+ return h.hash()
+ }
+
+ panic("Not Hasher")
+}
+
+type hasher interface {
+ hash() int
+}
+
+const bitsPerWord = 64
+
+func indexForBit(bit int) int {
+ return bit / bitsPerWord
+}
+
+func wordForBit(data []uint64, bit int) uint64 {
+ idx := indexForBit(bit)
+ if idx >= len(data) {
+ return 0
+ }
+ return data[idx]
+}
+
+func maskForBit(bit int) uint64 {
+ return uint64(1) << (bit % bitsPerWord)
+}
+
+func wordsNeeded(bit int) int {
+ return indexForBit(bit) + 1
+}
+
+type BitSet struct {
+ data []uint64
+}
+
+func NewBitSet() *BitSet {
+ return &BitSet{}
+}
+
+func (b *BitSet) add(value int) {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ size := wordsNeeded(value)
+ data := make([]uint64, size)
+ copy(data, b.data)
+ b.data = data
+ }
+ b.data[idx] |= maskForBit(value)
+}
+
+func (b *BitSet) clear(index int) {
+ idx := indexForBit(index)
+ if idx >= len(b.data) {
+ return
+ }
+ b.data[idx] &= ^maskForBit(index)
+}
+
+func (b *BitSet) or(set *BitSet) {
+ // Get min size necessary to represent the bits in both sets.
+ bLen := b.minLen()
+ setLen := set.minLen()
+ maxLen := intMax(bLen, setLen)
+ if maxLen > len(b.data) {
+ // Increase the size of len(b.data) to repesent the bits in both sets.
+ data := make([]uint64, maxLen)
+ copy(data, b.data)
+ b.data = data
+ }
+ // len(b.data) is at least setLen.
+ for i := 0; i < setLen; i++ {
+ b.data[i] |= set.data[i]
+ }
+}
+
+func (b *BitSet) remove(value int) {
+ b.clear(value)
+}
+
+func (b *BitSet) contains(value int) bool {
+ idx := indexForBit(value)
+ if idx >= len(b.data) {
+ return false
+ }
+ return (b.data[idx] & maskForBit(value)) != 0
+}
+
+func (b *BitSet) minValue() int {
+ for i, v := range b.data {
+ if v == 0 {
+ continue
+ }
+ return i*bitsPerWord + bits.TrailingZeros64(v)
+ }
+ return 2147483647
+}
+
+func (b *BitSet) equals(other interface{}) bool {
+ otherBitSet, ok := other.(*BitSet)
+ if !ok {
+ return false
+ }
+
+ if b == otherBitSet {
+ return true
+ }
+
+ // We only compare set bits, so we cannot rely on the two slices having the same size. Its
+ // possible for two BitSets to have different slice lengths but the same set bits. So we only
+ // compare the relavent words and ignore the trailing zeros.
+ bLen := b.minLen()
+ otherLen := otherBitSet.minLen()
+
+ if bLen != otherLen {
+ return false
+ }
+
+ for i := 0; i < bLen; i++ {
+ if b.data[i] != otherBitSet.data[i] {
+ return false
+ }
+ }
+
+ return true
+}
+
+func (b *BitSet) minLen() int {
+ for i := len(b.data); i > 0; i-- {
+ if b.data[i-1] != 0 {
+ return i
+ }
+ }
+ return 0
+}
+
+func (b *BitSet) length() int {
+ cnt := 0
+ for _, val := range b.data {
+ cnt += bits.OnesCount64(val)
+ }
+ return cnt
+}
+
+func (b *BitSet) String() string {
+ vals := make([]string, 0, b.length())
+
+ for i, v := range b.data {
+ for v != 0 {
+ n := bits.TrailingZeros64(v)
+ vals = append(vals, strconv.Itoa(i*bitsPerWord+n))
+ v &= ^(uint64(1) << n)
+ }
+ }
+
+ return "{" + strings.Join(vals, ", ") + "}"
+}
+
+type AltDict struct {
+ data map[string]interface{}
+}
+
+func NewAltDict() *AltDict {
+ d := new(AltDict)
+ d.data = make(map[string]interface{})
+ return d
+}
+
+func (a *AltDict) Get(key string) interface{} {
+ key = "k-" + key
+ return a.data[key]
+}
+
+func (a *AltDict) put(key string, value interface{}) {
+ key = "k-" + key
+ a.data[key] = value
+}
+
+func (a *AltDict) values() []interface{} {
+ vs := make([]interface{}, len(a.data))
+ i := 0
+ for _, v := range a.data {
+ vs[i] = v
+ i++
+ }
+ return vs
+}
+
+type DoubleDict struct {
+ data map[int]map[int]interface{}
+}
+
+func NewDoubleDict() *DoubleDict {
+ dd := new(DoubleDict)
+ dd.data = make(map[int]map[int]interface{})
+ return dd
+}
+
+func (d *DoubleDict) Get(a, b int) interface{} {
+ data := d.data[a]
+
+ if data == nil {
+ return nil
+ }
+
+ return data[b]
+}
+
+func (d *DoubleDict) set(a, b int, o interface{}) {
+ data := d.data[a]
+
+ if data == nil {
+ data = make(map[int]interface{})
+ d.data[a] = data
+ }
+
+ data[b] = o
+}
+
+func EscapeWhitespace(s string, escapeSpaces bool) string {
+
+ s = strings.Replace(s, "\t", "\\t", -1)
+ s = strings.Replace(s, "\n", "\\n", -1)
+ s = strings.Replace(s, "\r", "\\r", -1)
+ if escapeSpaces {
+ s = strings.Replace(s, " ", "\u00B7", -1)
+ }
+ return s
+}
+
+func TerminalNodeToStringArray(sa []TerminalNode) []string {
+ st := make([]string, len(sa))
+
+ for i, s := range sa {
+ st[i] = fmt.Sprintf("%v", s)
+ }
+
+ return st
+}
+
+func PrintArrayJavaStyle(sa []string) string {
+ var buffer bytes.Buffer
+
+ buffer.WriteString("[")
+
+ for i, s := range sa {
+ buffer.WriteString(s)
+ if i != len(sa)-1 {
+ buffer.WriteString(", ")
+ }
+ }
+
+ buffer.WriteString("]")
+
+ return buffer.String()
+}
+
+// murmur hash
+func murmurInit(seed int) int {
+ return seed
+}
+
+func murmurUpdate(h int, value int) int {
+ const c1 uint32 = 0xCC9E2D51
+ const c2 uint32 = 0x1B873593
+ const r1 uint32 = 15
+ const r2 uint32 = 13
+ const m uint32 = 5
+ const n uint32 = 0xE6546B64
+
+ k := uint32(value)
+ k *= c1
+ k = (k << r1) | (k >> (32 - r1))
+ k *= c2
+
+ hash := uint32(h) ^ k
+ hash = (hash << r2) | (hash >> (32 - r2))
+ hash = hash*m + n
+ return int(hash)
+}
+
+func murmurFinish(h int, numberOfWords int) int {
+ var hash = uint32(h)
+ hash ^= uint32(numberOfWords) << 2
+ hash ^= hash >> 16
+ hash *= 0x85ebca6b
+ hash ^= hash >> 13
+ hash *= 0xc2b2ae35
+ hash ^= hash >> 16
+
+ return int(hash)
+}
diff --git a/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
new file mode 100644
index 000000000..0d4eac698
--- /dev/null
+++ b/vendor/github.com/antlr/antlr4/runtime/Go/antlr/utils_set.go
@@ -0,0 +1,237 @@
+package antlr
+
+import "math"
+
+const (
+ _initalCapacity = 16
+ _initalBucketCapacity = 8
+ _loadFactor = 0.75
+)
+
+var _ Set = (*array2DHashSet)(nil)
+
+type Set interface {
+ Add(value interface{}) (added interface{})
+ Len() int
+ Get(value interface{}) (found interface{})
+ Contains(value interface{}) bool
+ Values() []interface{}
+ Each(f func(interface{}) bool)
+}
+
+type array2DHashSet struct {
+ buckets [][]interface{}
+ hashcodeFunction func(interface{}) int
+ equalsFunction func(interface{}, interface{}) bool
+
+ n int // How many elements in set
+ threshold int // when to expand
+
+ currentPrime int // jump by 4 primes each expand or whatever
+ initialBucketCapacity int
+}
+
+func (as *array2DHashSet) Each(f func(interface{}) bool) {
+ if as.Len() < 1 {
+ return
+ }
+
+ for _, bucket := range as.buckets {
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+ if !f(o) {
+ return
+ }
+ }
+ }
+}
+
+func (as *array2DHashSet) Values() []interface{} {
+ if as.Len() < 1 {
+ return nil
+ }
+
+ values := make([]interface{}, 0, as.Len())
+ as.Each(func(i interface{}) bool {
+ values = append(values, i)
+ return true
+ })
+ return values
+}
+
+func (as *array2DHashSet) Contains(value interface{}) bool {
+ return as.Get(value) != nil
+}
+
+func (as *array2DHashSet) Add(value interface{}) interface{} {
+ if as.n > as.threshold {
+ as.expand()
+ }
+ return as.innerAdd(value)
+}
+
+func (as *array2DHashSet) expand() {
+ old := as.buckets
+
+ as.currentPrime += 4
+
+ var (
+ newCapacity = len(as.buckets) << 1
+ newTable = as.createBuckets(newCapacity)
+ newBucketLengths = make([]int, len(newTable))
+ )
+
+ as.buckets = newTable
+ as.threshold = int(float64(newCapacity) * _loadFactor)
+
+ for _, bucket := range old {
+ if bucket == nil {
+ continue
+ }
+
+ for _, o := range bucket {
+ if o == nil {
+ break
+ }
+
+ b := as.getBuckets(o)
+ bucketLength := newBucketLengths[b]
+ var newBucket []interface{}
+ if bucketLength == 0 {
+ // new bucket
+ newBucket = as.createBucket(as.initialBucketCapacity)
+ newTable[b] = newBucket
+ } else {
+ newBucket = newTable[b]
+ if bucketLength == len(newBucket) {
+ // expand
+ newBucketCopy := make([]interface{}, len(newBucket)<<1)
+ copy(newBucketCopy[:bucketLength], newBucket)
+ newBucket = newBucketCopy
+ newTable[b] = newBucket
+ }
+ }
+
+ newBucket[bucketLength] = o
+ newBucketLengths[b]++
+ }
+ }
+}
+
+func (as *array2DHashSet) Len() int {
+ return as.n
+}
+
+func (as *array2DHashSet) Get(o interface{}) interface{} {
+ if o == nil {
+ return nil
+ }
+
+ b := as.getBuckets(o)
+ bucket := as.buckets[b]
+ if bucket == nil { // no bucket
+ return nil
+ }
+
+ for _, e := range bucket {
+ if e == nil {
+ return nil // empty slot; not there
+ }
+ if as.equalsFunction(e, o) {
+ return e
+ }
+ }
+
+ return nil
+}
+
+func (as *array2DHashSet) innerAdd(o interface{}) interface{} {
+ b := as.getBuckets(o)
+
+ bucket := as.buckets[b]
+
+ // new bucket
+ if bucket == nil {
+ bucket = as.createBucket(as.initialBucketCapacity)
+ bucket[0] = o
+
+ as.buckets[b] = bucket
+ as.n++
+ return o
+ }
+
+ // look for it in bucket
+ for i := 0; i < len(bucket); i++ {
+ existing := bucket[i]
+ if existing == nil { // empty slot; not there, add.
+ bucket[i] = o
+ as.n++
+ return o
+ }
+
+ if as.equalsFunction(existing, o) { // found existing, quit
+ return existing
+ }
+ }
+
+ // full bucket, expand and add to end
+ oldLength := len(bucket)
+ bucketCopy := make([]interface{}, oldLength<<1)
+ copy(bucketCopy[:oldLength], bucket)
+ bucket = bucketCopy
+ as.buckets[b] = bucket
+ bucket[oldLength] = o
+ as.n++
+ return o
+}
+
+func (as *array2DHashSet) getBuckets(value interface{}) int {
+ hash := as.hashcodeFunction(value)
+ return hash & (len(as.buckets) - 1)
+}
+
+func (as *array2DHashSet) createBuckets(cap int) [][]interface{} {
+ return make([][]interface{}, cap)
+}
+
+func (as *array2DHashSet) createBucket(cap int) []interface{} {
+ return make([]interface{}, cap)
+}
+
+func newArray2DHashSetWithCap(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(interface{}, interface{}) bool,
+ initCap int,
+ initBucketCap int,
+) *array2DHashSet {
+ if hashcodeFunction == nil {
+ hashcodeFunction = standardHashFunction
+ }
+
+ if equalsFunction == nil {
+ equalsFunction = standardEqualsFunction
+ }
+
+ ret := &array2DHashSet{
+ hashcodeFunction: hashcodeFunction,
+ equalsFunction: equalsFunction,
+
+ n: 0,
+ threshold: int(math.Floor(_initalCapacity * _loadFactor)),
+
+ currentPrime: 1,
+ initialBucketCapacity: initBucketCap,
+ }
+
+ ret.buckets = ret.createBuckets(initCap)
+ return ret
+}
+
+func newArray2DHashSet(
+ hashcodeFunction func(interface{}) int,
+ equalsFunction func(interface{}, interface{}) bool,
+) *array2DHashSet {
+ return newArray2DHashSetWithCap(hashcodeFunction, equalsFunction, _initalCapacity, _initalBucketCapacity)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
index 8bf0e5b78..33c88305c 100644
--- a/vendor/github.com/cespare/xxhash/v2/README.md
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -70,3 +70,5 @@ benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
- [FreeCache](https://github.com/coocood/freecache)
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
+- [Ristretto](https://github.com/dgraph-io/ristretto)
+- [Badger](https://github.com/dgraph-io/badger)
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
index a9e0d45c9..78bddf1ce 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -19,10 +19,13 @@ const (
// Store the primes in an array as well.
//
// The consts are used when possible in Go code to avoid MOVs but we need a
-// contiguous array of the assembly code.
+// contiguous array for the assembly code.
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
// Digest implements hash.Hash64.
+//
+// Note that a zero-valued Digest is not ready to receive writes.
+// Call Reset or create a Digest using New before calling other methods.
type Digest struct {
v1 uint64
v2 uint64
@@ -33,19 +36,31 @@ type Digest struct {
n int // how much of mem is used
}
-// New creates a new Digest that computes the 64-bit xxHash algorithm.
+// New creates a new Digest with a zero seed.
func New() *Digest {
+ return NewWithSeed(0)
+}
+
+// NewWithSeed creates a new Digest with the given seed.
+func NewWithSeed(seed uint64) *Digest {
var d Digest
- d.Reset()
+ d.ResetWithSeed(seed)
return &d
}
// Reset clears the Digest's state so that it can be reused.
+// It uses a seed value of zero.
func (d *Digest) Reset() {
- d.v1 = primes[0] + prime2
- d.v2 = prime2
- d.v3 = 0
- d.v4 = -primes[0]
+ d.ResetWithSeed(0)
+}
+
+// ResetWithSeed clears the Digest's state so that it can be reused.
+// It uses the given seed to initialize the state.
+func (d *Digest) ResetWithSeed(seed uint64) {
+ d.v1 = seed + prime1 + prime2
+ d.v2 = seed + prime2
+ d.v3 = seed
+ d.v4 = seed - prime1
d.total = 0
d.n = 0
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
index 9216e0a40..78f95f256 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_asm.go
@@ -6,7 +6,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
//
//go:noescape
func Sum64(b []byte) uint64
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
index 26df13bba..118e49e81 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -3,7 +3,7 @@
package xxhash
-// Sum64 computes the 64-bit xxHash digest of b.
+// Sum64 computes the 64-bit xxHash digest of b with a zero seed.
func Sum64(b []byte) uint64 {
// A simpler version would be
// d := New()
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
index e86f1b5fd..05f5e7dfe 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -5,7 +5,7 @@
package xxhash
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
func Sum64String(s string) uint64 {
return Sum64([]byte(s))
}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
index 1c1638fd8..cf9d42aed 100644
--- a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -33,7 +33,7 @@ import (
//
// See https://github.com/golang/go/issues/42739 for discussion.
-// Sum64String computes the 64-bit xxHash digest of s.
+// Sum64String computes the 64-bit xxHash digest of s with a zero seed.
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
func Sum64String(s string) uint64 {
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLLexer.g4 b/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLLexer.g4
new file mode 100644
index 000000000..d83124abd
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLLexer.g4
@@ -0,0 +1,79 @@
+lexer grammar CESQLLexer;
+
+// NOTE:
+// This grammar is case-sensitive, although CESQL keywords are case-insensitive.
+// In order to implement case-insensitivity, check out
+// https://github.com/antlr/antlr4/blob/master/doc/case-insensitive-lexing.md#custom-character-streams-approach
+
+// Skip tab, carriage return and newlines
+
+SPACE: [ \t\r\n]+ -> skip;
+
+// Fragments for Literal primitives
+
+fragment ID_LITERAL: [a-zA-Z0-9]+;
+fragment DQUOTA_STRING: '"' ( '\\'. | '""' | ~('"'| '\\') )* '"';
+fragment SQUOTA_STRING: '\'' ('\\'. | '\'\'' | ~('\'' | '\\'))* '\'';
+fragment INT_DIGIT: [0-9];
+fragment FN_LITERAL: [A-Z] [A-Z_]*;
+
+// Constructors symbols
+
+LR_BRACKET: '(';
+RR_BRACKET: ')';
+COMMA: ',';
+SINGLE_QUOTE_SYMB: '\'';
+DOUBLE_QUOTE_SYMB: '"';
+
+fragment QUOTE_SYMB
+ : SINGLE_QUOTE_SYMB | DOUBLE_QUOTE_SYMB
+ ;
+
+// Operators
+// - Logic
+
+AND: 'AND';
+OR: 'OR';
+XOR: 'XOR';
+NOT: 'NOT';
+
+// - Arithmetics
+
+STAR: '*';
+DIVIDE: '/';
+MODULE: '%';
+PLUS: '+';
+MINUS: '-';
+
+// - Comparison
+
+EQUAL: '=';
+NOT_EQUAL: '!=';
+GREATER: '>';
+GREATER_OR_EQUAL: '>=';
+LESS: '<';
+LESS_GREATER: '<>';
+LESS_OR_EQUAL: '<=';
+
+// Like, exists, in
+
+LIKE: 'LIKE';
+EXISTS: 'EXISTS';
+IN: 'IN';
+
+// Booleans
+
+TRUE: 'TRUE';
+FALSE: 'FALSE';
+
+// Literals
+
+DQUOTED_STRING_LITERAL: DQUOTA_STRING;
+SQUOTED_STRING_LITERAL: SQUOTA_STRING;
+INTEGER_LITERAL: ('+' | '-')? INT_DIGIT+;
+
+// Identifiers
+
+IDENTIFIER: [a-zA-Z]+;
+IDENTIFIER_WITH_NUMBER: [a-zA-Z0-9]+;
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE: [A-Z] [A-Z_]*;
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLParser.g4 b/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLParser.g4
new file mode 100644
index 000000000..abab0bac8
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/CESQLParser.g4
@@ -0,0 +1,62 @@
+grammar CESQLParser;
+
+import CESQLLexer;
+
+// Entrypoint
+cesql: expression EOF;
+
+// Structure of operations, function invocations and expression
+expression
+ : functionIdentifier functionParameterList #functionInvocationExpression
+ // unary operators are the highest priority
+ | NOT expression #unaryLogicExpression
+ | MINUS expression # unaryNumericExpression
+ // LIKE, EXISTS and IN takes precedence over all the other binary operators
+ | expression NOT? LIKE stringLiteral #likeExpression
+ | EXISTS identifier #existsExpression
+ | expression NOT? IN setExpression #inExpression
+ // Numeric operations
+ | expression (STAR | DIVIDE | MODULE) expression #binaryMultiplicativeExpression
+ | expression (PLUS | MINUS) expression #binaryAdditiveExpression
+ // Comparison operations
+ | expression (EQUAL | NOT_EQUAL | LESS_GREATER | GREATER_OR_EQUAL | LESS_OR_EQUAL | LESS | GREATER) expression #binaryComparisonExpression
+ // Logic operations
+ | expression (AND | OR | XOR) expression #binaryLogicExpression
+ // Subexpressions and atoms
+ | LR_BRACKET expression RR_BRACKET #subExpression
+ | atom #atomExpression
+ ;
+
+atom
+ : booleanLiteral #booleanAtom
+ | integerLiteral #integerAtom
+ | stringLiteral #stringAtom
+ | identifier #identifierAtom
+ ;
+
+// Identifiers
+
+identifier
+ : (IDENTIFIER | IDENTIFIER_WITH_NUMBER)
+ ;
+functionIdentifier
+ : (IDENTIFIER | FUNCTION_IDENTIFIER_WITH_UNDERSCORE)
+ ;
+
+// Literals
+
+booleanLiteral: (TRUE | FALSE);
+stringLiteral: (DQUOTED_STRING_LITERAL | SQUOTED_STRING_LITERAL);
+integerLiteral: INTEGER_LITERAL;
+
+// Functions
+
+functionParameterList
+ : LR_BRACKET ( expression ( COMMA expression )* )? RR_BRACKET
+ ;
+
+// Sets
+
+setExpression
+ : LR_BRACKET expression ( COMMA expression )* RR_BRACKET // Empty sets are not allowed
+ ;
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/LICENSE b/vendor/github.com/cloudevents/sdk-go/sql/v2/LICENSE
new file mode 100644
index 000000000..261eeb9e9
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/Makefile b/vendor/github.com/cloudevents/sdk-go/sql/v2/Makefile
new file mode 100644
index 000000000..e69de29bb
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/README.md b/vendor/github.com/cloudevents/sdk-go/sql/v2/README.md
new file mode 100644
index 000000000..948f48f41
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/README.md
@@ -0,0 +1,80 @@
+# CloudEvents Expression Language Go implementation
+
+CloudEvents Expression Language implementation.
+
+Note: this package is a work in progress, APIs might break in future releases.
+
+## User guide
+
+To start using it:
+
+```go
+import cesqlparser "github.com/cloudevents/sdk-go/sql/v2/parser"
+
+// Parse the expression
+expression, err := cesqlparser.Parse("subject = 'Hello world'")
+
+// Res can be either int32, bool or string
+res, err := expression.Evaluate(event)
+```
+
+Add a user defined function
+```go
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ cefn "github.com/cloudevents/sdk-go/sql/v2/function"
+ cesqlparser "github.com/cloudevents/sdk-go/sql/v2/parser"
+ ceruntime "github.com/cloudevents/sdk-go/sql/v2/runtime"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+// Create a test event
+event := cloudevents.NewEvent()
+event.SetID("aaaa-bbbb-dddd")
+event.SetSource("https://my-source")
+event.SetType("dev.tekton.event")
+
+// Create and add a new user defined function
+var HasPrefixFunction cesql.Function = cefn.NewFunction(
+ "HASPREFIX",
+ []cesql.Type{cesql.StringType, cesql.StringType},
+ nil,
+ func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ prefix := i[1].(string)
+
+ return strings.HasPrefix(str, prefix), nil
+ },
+)
+
+err := ceruntime.AddFunction(HasPrefixFunction)
+
+// parse the expression
+expression, err := cesqlparser.Parse("HASPREFIX(type, 'dev.tekton.event')")
+ if err != nil {
+ fmt.Println("parser err: ", err)
+ os.Exit(1)
+ }
+
+// Evalute the expression with the test event
+res, err := expression.Evaluate(event)
+
+if res.(bool) {
+ fmt.Println("Event type has the prefix")
+} else {
+ fmt.Println("Event type doesn't have the prefix")
+}
+```
+
+## Development guide
+
+To regenerate the parser, make sure you have [ANTLR4 installed](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md) and then run:
+
+```shell
+antlr4 -v 4.10.1 -Dlanguage=Go -package gen -o gen -visitor -no-listener CESQLParser.g4
+```
+
+Then you need to run this sed command as a workaround until this ANTLR [issue](https://github.com/antlr/antlr4/issues/2433) is resolved. Without this, building for 32bit platforms will throw an int overflow error:
+```shell
+sed -i 's/(1<(int64(1)<= dummyLastError
+ }
+ return false
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression.go
new file mode 100644
index 000000000..e68e43d42
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression.go
@@ -0,0 +1,17 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package v2
+
+import cloudevents "github.com/cloudevents/sdk-go/v2"
+
+// Expression represents a parsed CloudEvents SQL Expression.
+type Expression interface {
+
+ // Evaluate the expression using the provided input type.
+ // The return value can be either int32, bool or string.
+ // The evaluation fails as soon as an error arises.
+ Evaluate(event cloudevents.Event) (interface{}, error)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/base_expressions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/base_expressions.go
new file mode 100644
index 000000000..82f9c953b
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/base_expressions.go
@@ -0,0 +1,17 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import cesql "github.com/cloudevents/sdk-go/sql/v2"
+
+type baseUnaryExpression struct {
+ child cesql.Expression
+}
+
+type baseBinaryExpression struct {
+ left cesql.Expression
+ right cesql.Expression
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/comparison_expressions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/comparison_expressions.go
new file mode 100644
index 000000000..a39ad3ce7
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/comparison_expressions.go
@@ -0,0 +1,56 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type equalExpression struct {
+ baseBinaryExpression
+ equal bool
+}
+
+func (s equalExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ leftVal, err := s.left.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ rightVal, err := s.right.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ leftVal, err = utils.Cast(leftVal, cesql.TypeFromVal(rightVal))
+ if err != nil {
+ return false, err
+ }
+
+ return (leftVal == rightVal) == s.equal, nil
+}
+
+func NewEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return equalExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ equal: true,
+ }
+}
+
+func NewNotEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return equalExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ equal: false,
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/exists_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/exists_expression.go
new file mode 100644
index 000000000..82586e091
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/exists_expression.go
@@ -0,0 +1,24 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type existsExpression struct {
+ identifier string
+}
+
+func (l existsExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ return utils.ContainsAttribute(event, l.identifier), nil
+}
+
+func NewExistsExpression(identifier string) cesql.Expression {
+ return existsExpression{identifier: identifier}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/function_invocation_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/function_invocation_expression.go
new file mode 100644
index 000000000..577272f26
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/function_invocation_expression.go
@@ -0,0 +1,71 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ "fmt"
+
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ sqlerrors "github.com/cloudevents/sdk-go/sql/v2/errors"
+ "github.com/cloudevents/sdk-go/sql/v2/runtime"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type functionInvocationExpression struct {
+ name string
+ argumentsExpression []cesql.Expression
+}
+
+func (expr functionInvocationExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ fn := runtime.ResolveFunction(expr.name, len(expr.argumentsExpression))
+ if fn == nil {
+ return false, sqlerrors.NewMissingFunctionError(expr.name)
+ }
+
+ args := make([]interface{}, len(expr.argumentsExpression))
+
+ defaultVal := fn.ReturnType().ZeroValue()
+
+ for i, expr := range expr.argumentsExpression {
+ arg, err := expr.Evaluate(event)
+ if err != nil {
+ return defaultVal, err
+ }
+
+ argType := fn.ArgType(i)
+ if argType == nil {
+ return defaultVal, sqlerrors.NewFunctionEvaluationError(fmt.Errorf("cannot resolve arg type at index %d for function %s", i, fn.Name()))
+ }
+
+ arg, err = utils.Cast(arg, *argType)
+ if err != nil {
+ return defaultVal, err
+ }
+
+ args[i] = arg
+ }
+
+ result, err := fn.Run(event, args)
+ if result == nil {
+ if err != nil {
+ err = sqlerrors.NewFunctionEvaluationError(fmt.Errorf("function %s encountered error %w and did not return any value, defaulting to the default value for the function", fn.Name(), err))
+ } else {
+ err = sqlerrors.NewFunctionEvaluationError(fmt.Errorf("function %s did not return any value, defaulting to the default value for the function", fn.Name()))
+ }
+
+ return defaultVal, err
+ }
+
+ return result, err
+}
+
+func NewFunctionInvocationExpression(name string, argumentsExpression []cesql.Expression) cesql.Expression {
+ return functionInvocationExpression{
+ name: name,
+ argumentsExpression: argumentsExpression,
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/identifier_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/identifier_expression.go
new file mode 100644
index 000000000..fe85052f9
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/identifier_expression.go
@@ -0,0 +1,30 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ sqlerrors "github.com/cloudevents/sdk-go/sql/v2/errors"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type identifierExpression struct {
+ identifier string
+}
+
+func (l identifierExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ value := utils.GetAttribute(event, l.identifier)
+ if value == nil {
+ return false, sqlerrors.NewMissingAttributeError(l.identifier)
+ }
+
+ return value, nil
+}
+
+func NewIdentifierExpression(identifier string) cesql.Expression {
+ return identifierExpression{identifier: identifier}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/in_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/in_expression.go
new file mode 100644
index 000000000..ef1c587c0
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/in_expression.go
@@ -0,0 +1,46 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type inExpression struct {
+ leftExpression cesql.Expression
+ setExpression []cesql.Expression
+}
+
+func (l inExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ leftValue, err := l.leftExpression.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ for _, rightExpression := range l.setExpression {
+ rightValue, err := rightExpression.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ rightValue, err = utils.Cast(rightValue, cesql.TypeFromVal(leftValue))
+ if err != nil {
+ return false, err
+ }
+
+ if leftValue == rightValue {
+ return true, nil
+ }
+ }
+
+ return false, nil
+}
+
+func NewInExpression(leftExpression cesql.Expression, setExpression []cesql.Expression) cesql.Expression {
+ return inExpression{leftExpression, setExpression}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/integer_comparison_expressions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/integer_comparison_expressions.go
new file mode 100644
index 000000000..12e529b79
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/integer_comparison_expressions.go
@@ -0,0 +1,89 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type integerComparisonExpression struct {
+ baseBinaryExpression
+ fn func(x, y int32) bool
+}
+
+func (s integerComparisonExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ leftVal, err := s.left.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ rightVal, err := s.right.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ leftVal, err = utils.Cast(leftVal, cesql.IntegerType)
+ if err != nil {
+ return false, err
+ }
+
+ rightVal, err = utils.Cast(rightVal, cesql.IntegerType)
+ if err != nil {
+ return false, err
+ }
+
+ return s.fn(leftVal.(int32), rightVal.(int32)), nil
+}
+
+func NewLessExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return integerComparisonExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) bool {
+ return x < y
+ },
+ }
+}
+
+func NewLessOrEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return integerComparisonExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) bool {
+ return x <= y
+ },
+ }
+}
+
+func NewGreaterExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return integerComparisonExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) bool {
+ return x > y
+ },
+ }
+}
+
+func NewGreaterOrEqualExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return integerComparisonExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) bool {
+ return x >= y
+ },
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/like_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/like_expression.go
new file mode 100644
index 000000000..ed43db494
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/like_expression.go
@@ -0,0 +1,91 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type likeExpression struct {
+ baseUnaryExpression
+ pattern string
+}
+
+func (l likeExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ val, err := l.child.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ val, err = utils.Cast(val, cesql.StringType)
+ if err != nil {
+ return false, err
+ }
+
+ return matchString(val.(string), l.pattern), nil
+
+}
+
+func NewLikeExpression(child cesql.Expression, pattern string) (cesql.Expression, error) {
+ return likeExpression{
+ baseUnaryExpression: baseUnaryExpression{
+ child: child,
+ },
+ pattern: pattern,
+ }, nil
+}
+
+func matchString(text, pattern string) bool {
+ textLen := len(text)
+ patternLen := len(pattern)
+ textIdx := 0
+ patternIdx := 0
+ lastWildcardIdx := -1
+ lastMatchIdx := 0
+
+ if patternLen == 0 {
+ return patternLen == textLen
+ }
+
+ for textIdx < textLen {
+ if patternIdx < patternLen-1 && pattern[patternIdx] == '\\' &&
+ ((pattern[patternIdx+1] == '_' || pattern[patternIdx+1] == '%') &&
+ pattern[patternIdx+1] == text[textIdx]) {
+ // handle escaped characters -> pattern needs to increment two places here
+ patternIdx += 2
+ textIdx += 1
+ } else if patternIdx < patternLen && (pattern[patternIdx] == '_' || pattern[patternIdx] == text[textIdx]) {
+ // handle non escaped characters
+ textIdx += 1
+ patternIdx += 1
+ } else if patternIdx < patternLen && pattern[patternIdx] == '%' {
+ // handle wildcard characters
+ lastWildcardIdx = patternIdx
+ lastMatchIdx = textIdx
+ patternIdx += 1
+ } else if lastWildcardIdx != -1 {
+ // greedy match didn't work, try again from the last known match
+ patternIdx = lastWildcardIdx + 1
+ lastMatchIdx += 1
+ textIdx = lastMatchIdx
+ } else {
+ return false
+ }
+ }
+
+ // consume remaining pattern characters as long as they are wildcards
+ for patternIdx < patternLen {
+ if pattern[patternIdx] != '%' {
+ return false
+ }
+
+ patternIdx += 1
+ }
+
+ return true
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/literal_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/literal_expression.go
new file mode 100644
index 000000000..5c139fc8c
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/literal_expression.go
@@ -0,0 +1,23 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type literalExpression struct {
+ value interface{}
+}
+
+func (l literalExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ return l.value, nil
+}
+
+func NewLiteralExpression(value interface{}) cesql.Expression {
+ return literalExpression{value: value}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/logic_expressions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/logic_expressions.go
new file mode 100644
index 000000000..4812debf2
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/logic_expressions.go
@@ -0,0 +1,89 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type logicExpression struct {
+ baseBinaryExpression
+ fn func(x, y bool) bool
+ verb string
+}
+
+func (s logicExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ leftVal, err := s.left.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ leftVal, err = utils.Cast(leftVal, cesql.BooleanType)
+ if err != nil {
+ return false, err
+ }
+
+ // Don't bother to check the other expression unless we need to
+ if s.verb == "AND" && leftVal.(bool) == false {
+ return false, nil
+ }
+ if s.verb == "OR" && leftVal.(bool) == true {
+ return true, nil
+ }
+
+ rightVal, err := s.right.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ rightVal, err = utils.Cast(rightVal, cesql.BooleanType)
+ if err != nil {
+ return false, err
+ }
+
+ return s.fn(leftVal.(bool), rightVal.(bool)), nil
+}
+
+func NewAndExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return logicExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y bool) bool {
+ return x && y
+ },
+ verb: "AND",
+ }
+}
+
+func NewOrExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return logicExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y bool) bool {
+ return x || y
+ },
+ verb: "OR",
+ }
+}
+
+func NewXorExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return logicExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y bool) bool {
+ return x != y
+ },
+ verb: "XOR",
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/math_expressions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/math_expressions.go
new file mode 100644
index 000000000..50b45d70b
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/math_expressions.go
@@ -0,0 +1,108 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ sqlerrors "github.com/cloudevents/sdk-go/sql/v2/errors"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type mathExpression struct {
+ baseBinaryExpression
+ fn func(x, y int32) (int32, error)
+}
+
+func (s mathExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ leftVal, err := s.left.Evaluate(event)
+ if err != nil {
+ return int32(0), err
+ }
+
+ rightVal, err := s.right.Evaluate(event)
+ if err != nil {
+ return int32(0), err
+ }
+
+ leftVal, err = utils.Cast(leftVal, cesql.IntegerType)
+ if err != nil {
+ return int32(0), err
+ }
+
+ rightVal, err = utils.Cast(rightVal, cesql.IntegerType)
+ if err != nil {
+ return int32(0), err
+ }
+
+ return s.fn(leftVal.(int32), rightVal.(int32))
+}
+
+func NewSumExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ return x + y, nil
+ },
+ }
+}
+
+func NewDifferenceExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ return x - y, nil
+ },
+ }
+}
+
+func NewMultiplicationExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ return x * y, nil
+ },
+ }
+}
+
+func NewModuleExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ if y == 0 {
+ return 0, sqlerrors.NewMathError("division by zero")
+ }
+ return x % y, nil
+ },
+ }
+}
+
+func NewDivisionExpression(left cesql.Expression, right cesql.Expression) cesql.Expression {
+ return mathExpression{
+ baseBinaryExpression: baseBinaryExpression{
+ left: left,
+ right: right,
+ },
+ fn: func(x, y int32) (int32, error) {
+ if y == 0 {
+ return 0, sqlerrors.NewMathError("division by zero")
+ }
+ return x / y, nil
+ },
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/negate_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/negate_expression.go
new file mode 100644
index 000000000..d271f3243
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/negate_expression.go
@@ -0,0 +1,32 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type negateExpression baseUnaryExpression
+
+func (l negateExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ val, err := l.child.Evaluate(event)
+ if err != nil {
+ return int32(0), err
+ }
+
+ val, err = utils.Cast(val, cesql.IntegerType)
+ if err != nil {
+ return int32(0), err
+ }
+
+ return -(val.(int32)), nil
+}
+
+func NewNegateExpression(child cesql.Expression) cesql.Expression {
+ return negateExpression{child: child}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/not_expression.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/not_expression.go
new file mode 100644
index 000000000..a1bedac10
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/expression/not_expression.go
@@ -0,0 +1,32 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package expression
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type notExpression baseUnaryExpression
+
+func (l notExpression) Evaluate(event cloudevents.Event) (interface{}, error) {
+ val, err := l.child.Evaluate(event)
+ if err != nil {
+ return false, err
+ }
+
+ val, err = utils.Cast(val, cesql.BooleanType)
+ if err != nil {
+ return false, err
+ }
+
+ return !(val.(bool)), nil
+}
+
+func NewNotExpression(child cesql.Expression) cesql.Expression {
+ return notExpression{child: child}
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function.go
new file mode 100644
index 000000000..fb7efb655
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function.go
@@ -0,0 +1,18 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package v2
+
+import cloudevents "github.com/cloudevents/sdk-go/v2"
+
+type Function interface {
+ Name() string
+ Arity() int
+ IsVariadic() bool
+ ArgType(index int) *Type
+ ReturnType() Type
+
+ Run(event cloudevents.Event, arguments []interface{}) (interface{}, error)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function/casting_functions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/casting_functions.go
new file mode 100644
index 000000000..35c1d562e
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/casting_functions.go
@@ -0,0 +1,62 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package function
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ "github.com/cloudevents/sdk-go/sql/v2/utils"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+var IntFunction function = function{
+ name: "INT",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ returnType: cesql.IntegerType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.Cast(i[0], cesql.IntegerType)
+ },
+}
+
+var BoolFunction function = function{
+ name: "BOOL",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ returnType: cesql.BooleanType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.Cast(i[0], cesql.BooleanType)
+ },
+}
+
+var StringFunction function = function{
+ name: "STRING",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.Cast(i[0], cesql.StringType)
+ },
+}
+
+var IsIntFunction function = function{
+ name: "IS_INT",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ returnType: cesql.BooleanType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.CanCast(i[0], cesql.IntegerType), nil
+ },
+}
+
+var IsBoolFunction function = function{
+ name: "IS_BOOL",
+ fixedArgs: []cesql.Type{cesql.AnyType},
+ variadicArgs: nil,
+ returnType: cesql.BooleanType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return utils.CanCast(i[0], cesql.BooleanType), nil
+ },
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function/function.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/function.go
new file mode 100644
index 000000000..ddc3d0489
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/function.go
@@ -0,0 +1,62 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package function
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+type FuncType func(cloudevents.Event, []interface{}) (interface{}, error)
+
+type function struct {
+ name string
+ fixedArgs []cesql.Type
+ variadicArgs *cesql.Type
+ returnType cesql.Type
+ fn FuncType
+}
+
+func (f function) Name() string {
+ return f.name
+}
+
+func (f function) Arity() int {
+ return len(f.fixedArgs)
+}
+
+func (f function) IsVariadic() bool {
+ return f.variadicArgs != nil
+}
+
+func (f function) ArgType(index int) *cesql.Type {
+ if index < len(f.fixedArgs) {
+ return &f.fixedArgs[index]
+ }
+ return f.variadicArgs
+}
+
+func (f function) ReturnType() cesql.Type {
+ return f.returnType
+}
+
+func (f function) Run(event cloudevents.Event, arguments []interface{}) (interface{}, error) {
+ return f.fn(event, arguments)
+}
+
+func NewFunction(name string,
+ fixedArgs []cesql.Type,
+ variadicArgs *cesql.Type,
+ returnType cesql.Type,
+ fn FuncType) cesql.Function {
+ return function{
+ name: name,
+ fixedArgs: fixedArgs,
+ variadicArgs: variadicArgs,
+ returnType: returnType,
+ fn: fn,
+ }
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function/integer_functions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/integer_functions.go
new file mode 100644
index 000000000..f7f3f7225
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/integer_functions.go
@@ -0,0 +1,30 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package function
+
+import (
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ sqlerrors "github.com/cloudevents/sdk-go/sql/v2/errors"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+ "math"
+)
+
+var AbsFunction function = function{
+ name: "ABS",
+ fixedArgs: []cesql.Type{cesql.IntegerType},
+ variadicArgs: nil,
+ returnType: cesql.IntegerType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ x := i[0].(int32)
+ if x == math.MinInt32 {
+ return int32(math.MaxInt32), sqlerrors.NewMathError("integer overflow while computing ABS")
+ }
+ if x < 0 {
+ return -x, nil
+ }
+ return x, nil
+ },
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/function/string_functions.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/string_functions.go
new file mode 100644
index 000000000..66eeb2c16
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/function/string_functions.go
@@ -0,0 +1,188 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package function
+
+import (
+ "fmt"
+ "strings"
+
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ sqlerrors "github.com/cloudevents/sdk-go/sql/v2/errors"
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+)
+
+var LengthFunction function = function{
+ name: "LENGTH",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ variadicArgs: nil,
+ returnType: cesql.IntegerType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return int32(len(i[0].(string))), nil
+ },
+}
+
+var ConcatFunction function = function{
+ name: "CONCAT",
+ variadicArgs: cesql.TypePtr(cesql.StringType),
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ var sb strings.Builder
+ for _, v := range i {
+ sb.WriteString(v.(string))
+ }
+ return sb.String(), nil
+ },
+}
+
+var ConcatWSFunction function = function{
+ name: "CONCAT_WS",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ variadicArgs: cesql.TypePtr(cesql.StringType),
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, args []interface{}) (interface{}, error) {
+ if len(args) == 1 {
+ return "", nil
+ }
+ separator := args[0].(string)
+
+ var sb strings.Builder
+ for i := 1; i < len(args)-1; i++ {
+ sb.WriteString(args[i].(string))
+ sb.WriteString(separator)
+ }
+ sb.WriteString(args[len(args)-1].(string))
+ return sb.String(), nil
+ },
+}
+
+var LowerFunction function = function{
+ name: "LOWER",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return strings.ToLower(i[0].(string)), nil
+ },
+}
+
+var UpperFunction function = function{
+ name: "UPPER",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return strings.ToUpper(i[0].(string)), nil
+ },
+}
+
+var TrimFunction function = function{
+ name: "TRIM",
+ fixedArgs: []cesql.Type{cesql.StringType},
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ return strings.TrimSpace(i[0].(string)), nil
+ },
+}
+
+var LeftFunction function = function{
+ name: "LEFT",
+ fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ y := int(i[1].(int32))
+
+ if y > len(str) {
+ return str, nil
+ }
+
+ if y < 0 {
+ return str, sqlerrors.NewFunctionEvaluationError(fmt.Errorf("LEFT y argument is < 0: %d", y))
+ }
+
+ return str[0:y], nil
+ },
+}
+
+var RightFunction function = function{
+ name: "RIGHT",
+ fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ y := int(i[1].(int32))
+
+ if y > len(str) {
+ return str, nil
+ }
+
+ if y < 0 {
+ return str, sqlerrors.NewFunctionEvaluationError(fmt.Errorf("RIGHT y argument is < 0: %d", y))
+ }
+
+ return str[len(str)-y:], nil
+ },
+}
+
+var SubstringFunction function = function{
+ name: "SUBSTRING",
+ fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType},
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ pos := int(i[1].(int32))
+
+ if pos == 0 {
+ return "", nil
+ }
+
+ if pos < -len(str) || pos > len(str) {
+ return "", sqlerrors.NewFunctionEvaluationError(fmt.Errorf("SUBSTRING invalid pos argument: %d", pos))
+ }
+
+ var beginning int
+ if pos < 0 {
+ beginning = len(str) + pos
+ } else {
+ beginning = pos - 1
+ }
+
+ return str[beginning:], nil
+ },
+}
+
+var SubstringWithLengthFunction function = function{
+ name: "SUBSTRING",
+ fixedArgs: []cesql.Type{cesql.StringType, cesql.IntegerType, cesql.IntegerType},
+ returnType: cesql.StringType,
+ fn: func(event cloudevents.Event, i []interface{}) (interface{}, error) {
+ str := i[0].(string)
+ pos := int(i[1].(int32))
+ length := int(i[2].(int32))
+
+ if pos == 0 {
+ return "", nil
+ }
+
+ if pos < -len(str) || pos > len(str) {
+ return "", sqlerrors.NewFunctionEvaluationError(fmt.Errorf("SUBSTRING invalid pos argument: %d", pos))
+ }
+
+ var beginning int
+ if pos < 0 {
+ beginning = len(str) + pos
+ } else {
+ beginning = pos - 1
+ }
+
+ var end int
+ if beginning+length > len(str) {
+ end = len(str)
+ } else {
+ end = beginning + length
+ }
+
+ return str[beginning:end], nil
+ },
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.interp b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.interp
new file mode 100644
index 000000000..51dce9280
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.interp
@@ -0,0 +1,87 @@
+token literal names:
+null
+null
+'('
+')'
+','
+'\''
+'"'
+'AND'
+'OR'
+'XOR'
+'NOT'
+'*'
+'/'
+'%'
+'+'
+'-'
+'='
+'!='
+'>'
+'>='
+'<'
+'<>'
+'<='
+'LIKE'
+'EXISTS'
+'IN'
+'TRUE'
+'FALSE'
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+SPACE
+LR_BRACKET
+RR_BRACKET
+COMMA
+SINGLE_QUOTE_SYMB
+DOUBLE_QUOTE_SYMB
+AND
+OR
+XOR
+NOT
+STAR
+DIVIDE
+MODULE
+PLUS
+MINUS
+EQUAL
+NOT_EQUAL
+GREATER
+GREATER_OR_EQUAL
+LESS
+LESS_GREATER
+LESS_OR_EQUAL
+LIKE
+EXISTS
+IN
+TRUE
+FALSE
+DQUOTED_STRING_LITERAL
+SQUOTED_STRING_LITERAL
+INTEGER_LITERAL
+IDENTIFIER
+IDENTIFIER_WITH_NUMBER
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE
+
+rule names:
+cesql
+expression
+atom
+identifier
+functionIdentifier
+booleanLiteral
+stringLiteral
+integerLiteral
+functionParameterList
+setExpression
+
+
+atn:
+[4, 1, 33, 110, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 39, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 55, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 61, 8, 1, 1, 1, 1, 1, 5, 1, 65, 8, 1, 10, 1, 12, 1, 68, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 74, 8, 2, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 5, 8, 90, 8, 8, 10, 8, 12, 8, 93, 9, 8, 3, 8, 95, 8, 8, 1, 8, 1, 8, 1, 9, 1, 9, 1, 9, 1, 9, 5, 9, 103, 8, 9, 10, 9, 12, 9, 106, 9, 9, 1, 9, 1, 9, 1, 9, 0, 1, 2, 10, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 8, 1, 0, 11, 13, 1, 0, 14, 15, 1, 0, 16, 22, 1, 0, 7, 9, 1, 0, 31, 32, 2, 0, 31, 31, 33, 33, 1, 0, 26, 27, 1, 0, 28, 29, 118, 0, 20, 1, 0, 0, 0, 2, 38, 1, 0, 0, 0, 4, 73, 1, 0, 0, 0, 6, 75, 1, 0, 0, 0, 8, 77, 1, 0, 0, 0, 10, 79, 1, 0, 0, 0, 12, 81, 1, 0, 0, 0, 14, 83, 1, 0, 0, 0, 16, 85, 1, 0, 0, 0, 18, 98, 1, 0, 0, 0, 20, 21, 3, 2, 1, 0, 21, 22, 5, 0, 0, 1, 22, 1, 1, 0, 0, 0, 23, 24, 6, 1, -1, 0, 24, 25, 3, 8, 4, 0, 25, 26, 3, 16, 8, 0, 26, 39, 1, 0, 0, 0, 27, 28, 5, 10, 0, 0, 28, 39, 3, 2, 1, 11, 29, 30, 5, 15, 0, 0, 30, 39, 3, 2, 1, 10, 31, 32, 5, 24, 0, 0, 32, 39, 3, 6, 3, 0, 33, 34, 5, 2, 0, 0, 34, 35, 3, 2, 1, 0, 35, 36, 5, 3, 0, 0, 36, 39, 1, 0, 0, 0, 37, 39, 3, 4, 2, 0, 38, 23, 1, 0, 0, 0, 38, 27, 1, 0, 0, 0, 38, 29, 1, 0, 0, 0, 38, 31, 1, 0, 0, 0, 38, 33, 1, 0, 0, 0, 38, 37, 1, 0, 0, 0, 39, 66, 1, 0, 0, 0, 40, 41, 10, 6, 0, 0, 41, 42, 7, 0, 0, 0, 42, 65, 3, 2, 1, 7, 43, 44, 10, 5, 0, 0, 44, 45, 7, 1, 0, 0, 45, 65, 3, 2, 1, 6, 46, 47, 10, 4, 0, 0, 47, 48, 7, 2, 0, 0, 48, 65, 3, 2, 1, 5, 49, 50, 10, 3, 0, 0, 50, 51, 7, 3, 0, 0, 51, 65, 3, 2, 1, 3, 52, 54, 10, 9, 0, 0, 53, 55, 5, 10, 0, 0, 54, 53, 1, 0, 0, 0, 54, 55, 1, 0, 0, 0, 55, 56, 1, 0, 0, 0, 56, 57, 5, 23, 0, 0, 57, 65, 3, 12, 6, 0, 58, 60, 10, 7, 0, 0, 59, 61, 5, 10, 0, 0, 60, 59, 1, 0, 0, 0, 60, 61, 1, 0, 0, 0, 61, 62, 1, 0, 0, 0, 62, 63, 5, 25, 0, 0, 63, 65, 3, 18, 9, 0, 64, 40, 1, 0, 0, 0, 64, 43, 1, 0, 0, 0, 64, 46, 1, 0, 0, 0, 64, 49, 1, 0, 0, 0, 64, 52, 1, 0, 0, 0, 64, 58, 1, 0, 0, 0, 65, 68, 1, 0, 0, 0, 66, 64, 1, 0, 0, 0, 66, 67, 1, 0, 0, 0, 67, 3, 1, 0, 0, 0, 68, 66, 1, 0, 0, 0, 69, 74, 3, 10, 5, 0, 70, 74, 3, 14, 7, 0, 71, 74, 3, 12, 6, 0, 72, 74, 3, 6, 3, 0, 73, 69, 1, 0, 0, 0, 73, 70, 1, 0, 0, 0, 73, 71, 1, 0, 0, 0, 73, 72, 1, 0, 0, 0, 74, 5, 1, 0, 0, 0, 75, 76, 7, 4, 0, 0, 76, 7, 1, 0, 0, 0, 77, 78, 7, 5, 0, 0, 78, 9, 1, 0, 0, 0, 79, 80, 7, 6, 0, 0, 80, 11, 1, 0, 0, 0, 81, 82, 7, 7, 0, 0, 82, 13, 1, 0, 0, 0, 83, 84, 5, 30, 0, 0, 84, 15, 1, 0, 0, 0, 85, 94, 5, 2, 0, 0, 86, 91, 3, 2, 1, 0, 87, 88, 5, 4, 0, 0, 88, 90, 3, 2, 1, 0, 89, 87, 1, 0, 0, 0, 90, 93, 1, 0, 0, 0, 91, 89, 1, 0, 0, 0, 91, 92, 1, 0, 0, 0, 92, 95, 1, 0, 0, 0, 93, 91, 1, 0, 0, 0, 94, 86, 1, 0, 0, 0, 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 97, 5, 3, 0, 0, 97, 17, 1, 0, 0, 0, 98, 99, 5, 2, 0, 0, 99, 104, 3, 2, 1, 0, 100, 101, 5, 4, 0, 0, 101, 103, 3, 2, 1, 0, 102, 100, 1, 0, 0, 0, 103, 106, 1, 0, 0, 0, 104, 102, 1, 0, 0, 0, 104, 105, 1, 0, 0, 0, 105, 107, 1, 0, 0, 0, 106, 104, 1, 0, 0, 0, 107, 108, 5, 3, 0, 0, 108, 19, 1, 0, 0, 0, 9, 38, 54, 60, 64, 66, 73, 91, 94, 104]
\ No newline at end of file
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.tokens b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.tokens
new file mode 100644
index 000000000..913f0bc71
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParser.tokens
@@ -0,0 +1,59 @@
+SPACE=1
+LR_BRACKET=2
+RR_BRACKET=3
+COMMA=4
+SINGLE_QUOTE_SYMB=5
+DOUBLE_QUOTE_SYMB=6
+AND=7
+OR=8
+XOR=9
+NOT=10
+STAR=11
+DIVIDE=12
+MODULE=13
+PLUS=14
+MINUS=15
+EQUAL=16
+NOT_EQUAL=17
+GREATER=18
+GREATER_OR_EQUAL=19
+LESS=20
+LESS_GREATER=21
+LESS_OR_EQUAL=22
+LIKE=23
+EXISTS=24
+IN=25
+TRUE=26
+FALSE=27
+DQUOTED_STRING_LITERAL=28
+SQUOTED_STRING_LITERAL=29
+INTEGER_LITERAL=30
+IDENTIFIER=31
+IDENTIFIER_WITH_NUMBER=32
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE=33
+'('=2
+')'=3
+','=4
+'\''=5
+'"'=6
+'AND'=7
+'OR'=8
+'XOR'=9
+'NOT'=10
+'*'=11
+'/'=12
+'%'=13
+'+'=14
+'-'=15
+'='=16
+'!='=17
+'>'=18
+'>='=19
+'<'=20
+'<>'=21
+'<='=22
+'LIKE'=23
+'EXISTS'=24
+'IN'=25
+'TRUE'=26
+'FALSE'=27
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.interp b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.interp
new file mode 100644
index 000000000..befa543b4
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.interp
@@ -0,0 +1,122 @@
+token literal names:
+null
+null
+'('
+')'
+','
+'\''
+'"'
+'AND'
+'OR'
+'XOR'
+'NOT'
+'*'
+'/'
+'%'
+'+'
+'-'
+'='
+'!='
+'>'
+'>='
+'<'
+'<>'
+'<='
+'LIKE'
+'EXISTS'
+'IN'
+'TRUE'
+'FALSE'
+null
+null
+null
+null
+null
+null
+
+token symbolic names:
+null
+SPACE
+LR_BRACKET
+RR_BRACKET
+COMMA
+SINGLE_QUOTE_SYMB
+DOUBLE_QUOTE_SYMB
+AND
+OR
+XOR
+NOT
+STAR
+DIVIDE
+MODULE
+PLUS
+MINUS
+EQUAL
+NOT_EQUAL
+GREATER
+GREATER_OR_EQUAL
+LESS
+LESS_GREATER
+LESS_OR_EQUAL
+LIKE
+EXISTS
+IN
+TRUE
+FALSE
+DQUOTED_STRING_LITERAL
+SQUOTED_STRING_LITERAL
+INTEGER_LITERAL
+IDENTIFIER
+IDENTIFIER_WITH_NUMBER
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE
+
+rule names:
+SPACE
+ID_LITERAL
+DQUOTA_STRING
+SQUOTA_STRING
+INT_DIGIT
+FN_LITERAL
+LR_BRACKET
+RR_BRACKET
+COMMA
+SINGLE_QUOTE_SYMB
+DOUBLE_QUOTE_SYMB
+QUOTE_SYMB
+AND
+OR
+XOR
+NOT
+STAR
+DIVIDE
+MODULE
+PLUS
+MINUS
+EQUAL
+NOT_EQUAL
+GREATER
+GREATER_OR_EQUAL
+LESS
+LESS_GREATER
+LESS_OR_EQUAL
+LIKE
+EXISTS
+IN
+TRUE
+FALSE
+DQUOTED_STRING_LITERAL
+SQUOTED_STRING_LITERAL
+INTEGER_LITERAL
+IDENTIFIER
+IDENTIFIER_WITH_NUMBER
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE
+
+channel names:
+DEFAULT_TOKEN_CHANNEL
+HIDDEN
+
+mode names:
+DEFAULT_MODE
+
+atn:
+[4, 0, 33, 238, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2, 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15, 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7, 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25, 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2, 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36, 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 1, 0, 4, 0, 81, 8, 0, 11, 0, 12, 0, 82, 1, 0, 1, 0, 1, 1, 4, 1, 88, 8, 1, 11, 1, 12, 1, 89, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 5, 2, 98, 8, 2, 10, 2, 12, 2, 101, 9, 2, 1, 2, 1, 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 5, 3, 111, 8, 3, 10, 3, 12, 3, 114, 9, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 5, 5, 122, 8, 5, 10, 5, 12, 5, 125, 9, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10, 1, 11, 1, 11, 3, 11, 139, 8, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1, 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16, 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1, 21, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25, 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1, 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 3, 35, 215, 8, 35, 1, 35, 4, 35, 218, 8, 35, 11, 35, 12, 35, 219, 1, 36, 4, 36, 223, 8, 36, 11, 36, 12, 36, 224, 1, 37, 4, 37, 228, 8, 37, 11, 37, 12, 37, 229, 1, 38, 1, 38, 5, 38, 234, 8, 38, 10, 38, 12, 38, 237, 9, 38, 0, 0, 39, 1, 1, 3, 0, 5, 0, 7, 0, 9, 0, 11, 0, 13, 2, 15, 3, 17, 4, 19, 5, 21, 6, 23, 0, 25, 7, 27, 8, 29, 9, 31, 10, 33, 11, 35, 12, 37, 13, 39, 14, 41, 15, 43, 16, 45, 17, 47, 18, 49, 19, 51, 20, 53, 21, 55, 22, 57, 23, 59, 24, 61, 25, 63, 26, 65, 27, 67, 28, 69, 29, 71, 30, 73, 31, 75, 32, 77, 33, 1, 0, 9, 3, 0, 9, 10, 13, 13, 32, 32, 3, 0, 48, 57, 65, 90, 97, 122, 2, 0, 34, 34, 92, 92, 2, 0, 39, 39, 92, 92, 1, 0, 48, 57, 1, 0, 65, 90, 2, 0, 65, 90, 95, 95, 2, 0, 43, 43, 45, 45, 2, 0, 65, 90, 97, 122, 246, 0, 1, 1, 0, 0, 0, 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0, 0, 0, 21, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0, 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1, 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45, 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0, 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0, 0, 61, 1, 0, 0, 0, 0, 63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 67, 1, 0, 0, 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, 0, 0, 73, 1, 0, 0, 0, 0, 75, 1, 0, 0, 0, 0, 77, 1, 0, 0, 0, 1, 80, 1, 0, 0, 0, 3, 87, 1, 0, 0, 0, 5, 91, 1, 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 117, 1, 0, 0, 0, 11, 119, 1, 0, 0, 0, 13, 126, 1, 0, 0, 0, 15, 128, 1, 0, 0, 0, 17, 130, 1, 0, 0, 0, 19, 132, 1, 0, 0, 0, 21, 134, 1, 0, 0, 0, 23, 138, 1, 0, 0, 0, 25, 140, 1, 0, 0, 0, 27, 144, 1, 0, 0, 0, 29, 147, 1, 0, 0, 0, 31, 151, 1, 0, 0, 0, 33, 155, 1, 0, 0, 0, 35, 157, 1, 0, 0, 0, 37, 159, 1, 0, 0, 0, 39, 161, 1, 0, 0, 0, 41, 163, 1, 0, 0, 0, 43, 165, 1, 0, 0, 0, 45, 167, 1, 0, 0, 0, 47, 170, 1, 0, 0, 0, 49, 172, 1, 0, 0, 0, 51, 175, 1, 0, 0, 0, 53, 177, 1, 0, 0, 0, 55, 180, 1, 0, 0, 0, 57, 183, 1, 0, 0, 0, 59, 188, 1, 0, 0, 0, 61, 195, 1, 0, 0, 0, 63, 198, 1, 0, 0, 0, 65, 203, 1, 0, 0, 0, 67, 209, 1, 0, 0, 0, 69, 211, 1, 0, 0, 0, 71, 214, 1, 0, 0, 0, 73, 222, 1, 0, 0, 0, 75, 227, 1, 0, 0, 0, 77, 231, 1, 0, 0, 0, 79, 81, 7, 0, 0, 0, 80, 79, 1, 0, 0, 0, 81, 82, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83, 1, 0, 0, 0, 83, 84, 1, 0, 0, 0, 84, 85, 6, 0, 0, 0, 85, 2, 1, 0, 0, 0, 86, 88, 7, 1, 0, 0, 87, 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 87, 1, 0, 0, 0, 89, 90, 1, 0, 0, 0, 90, 4, 1, 0, 0, 0, 91, 99, 5, 34, 0, 0, 92, 93, 5, 92, 0, 0, 93, 98, 9, 0, 0, 0, 94, 95, 5, 34, 0, 0, 95, 98, 5, 34, 0, 0, 96, 98, 8, 2, 0, 0, 97, 92, 1, 0, 0, 0, 97, 94, 1, 0, 0, 0, 97, 96, 1, 0, 0, 0, 98, 101, 1, 0, 0, 0, 99, 97, 1, 0, 0, 0, 99, 100, 1, 0, 0, 0, 100, 102, 1, 0, 0, 0, 101, 99, 1, 0, 0, 0, 102, 103, 5, 34, 0, 0, 103, 6, 1, 0, 0, 0, 104, 112, 5, 39, 0, 0, 105, 106, 5, 92, 0, 0, 106, 111, 9, 0, 0, 0, 107, 108, 5, 39, 0, 0, 108, 111, 5, 39, 0, 0, 109, 111, 8, 3, 0, 0, 110, 105, 1, 0, 0, 0, 110, 107, 1, 0, 0, 0, 110, 109, 1, 0, 0, 0, 111, 114, 1, 0, 0, 0, 112, 110, 1, 0, 0, 0, 112, 113, 1, 0, 0, 0, 113, 115, 1, 0, 0, 0, 114, 112, 1, 0, 0, 0, 115, 116, 5, 39, 0, 0, 116, 8, 1, 0, 0, 0, 117, 118, 7, 4, 0, 0, 118, 10, 1, 0, 0, 0, 119, 123, 7, 5, 0, 0, 120, 122, 7, 6, 0, 0, 121, 120, 1, 0, 0, 0, 122, 125, 1, 0, 0, 0, 123, 121, 1, 0, 0, 0, 123, 124, 1, 0, 0, 0, 124, 12, 1, 0, 0, 0, 125, 123, 1, 0, 0, 0, 126, 127, 5, 40, 0, 0, 127, 14, 1, 0, 0, 0, 128, 129, 5, 41, 0, 0, 129, 16, 1, 0, 0, 0, 130, 131, 5, 44, 0, 0, 131, 18, 1, 0, 0, 0, 132, 133, 5, 39, 0, 0, 133, 20, 1, 0, 0, 0, 134, 135, 5, 34, 0, 0, 135, 22, 1, 0, 0, 0, 136, 139, 3, 19, 9, 0, 137, 139, 3, 21, 10, 0, 138, 136, 1, 0, 0, 0, 138, 137, 1, 0, 0, 0, 139, 24, 1, 0, 0, 0, 140, 141, 5, 65, 0, 0, 141, 142, 5, 78, 0, 0, 142, 143, 5, 68, 0, 0, 143, 26, 1, 0, 0, 0, 144, 145, 5, 79, 0, 0, 145, 146, 5, 82, 0, 0, 146, 28, 1, 0, 0, 0, 147, 148, 5, 88, 0, 0, 148, 149, 5, 79, 0, 0, 149, 150, 5, 82, 0, 0, 150, 30, 1, 0, 0, 0, 151, 152, 5, 78, 0, 0, 152, 153, 5, 79, 0, 0, 153, 154, 5, 84, 0, 0, 154, 32, 1, 0, 0, 0, 155, 156, 5, 42, 0, 0, 156, 34, 1, 0, 0, 0, 157, 158, 5, 47, 0, 0, 158, 36, 1, 0, 0, 0, 159, 160, 5, 37, 0, 0, 160, 38, 1, 0, 0, 0, 161, 162, 5, 43, 0, 0, 162, 40, 1, 0, 0, 0, 163, 164, 5, 45, 0, 0, 164, 42, 1, 0, 0, 0, 165, 166, 5, 61, 0, 0, 166, 44, 1, 0, 0, 0, 167, 168, 5, 33, 0, 0, 168, 169, 5, 61, 0, 0, 169, 46, 1, 0, 0, 0, 170, 171, 5, 62, 0, 0, 171, 48, 1, 0, 0, 0, 172, 173, 5, 62, 0, 0, 173, 174, 5, 61, 0, 0, 174, 50, 1, 0, 0, 0, 175, 176, 5, 60, 0, 0, 176, 52, 1, 0, 0, 0, 177, 178, 5, 60, 0, 0, 178, 179, 5, 62, 0, 0, 179, 54, 1, 0, 0, 0, 180, 181, 5, 60, 0, 0, 181, 182, 5, 61, 0, 0, 182, 56, 1, 0, 0, 0, 183, 184, 5, 76, 0, 0, 184, 185, 5, 73, 0, 0, 185, 186, 5, 75, 0, 0, 186, 187, 5, 69, 0, 0, 187, 58, 1, 0, 0, 0, 188, 189, 5, 69, 0, 0, 189, 190, 5, 88, 0, 0, 190, 191, 5, 73, 0, 0, 191, 192, 5, 83, 0, 0, 192, 193, 5, 84, 0, 0, 193, 194, 5, 83, 0, 0, 194, 60, 1, 0, 0, 0, 195, 196, 5, 73, 0, 0, 196, 197, 5, 78, 0, 0, 197, 62, 1, 0, 0, 0, 198, 199, 5, 84, 0, 0, 199, 200, 5, 82, 0, 0, 200, 201, 5, 85, 0, 0, 201, 202, 5, 69, 0, 0, 202, 64, 1, 0, 0, 0, 203, 204, 5, 70, 0, 0, 204, 205, 5, 65, 0, 0, 205, 206, 5, 76, 0, 0, 206, 207, 5, 83, 0, 0, 207, 208, 5, 69, 0, 0, 208, 66, 1, 0, 0, 0, 209, 210, 3, 5, 2, 0, 210, 68, 1, 0, 0, 0, 211, 212, 3, 7, 3, 0, 212, 70, 1, 0, 0, 0, 213, 215, 7, 7, 0, 0, 214, 213, 1, 0, 0, 0, 214, 215, 1, 0, 0, 0, 215, 217, 1, 0, 0, 0, 216, 218, 3, 9, 4, 0, 217, 216, 1, 0, 0, 0, 218, 219, 1, 0, 0, 0, 219, 217, 1, 0, 0, 0, 219, 220, 1, 0, 0, 0, 220, 72, 1, 0, 0, 0, 221, 223, 7, 8, 0, 0, 222, 221, 1, 0, 0, 0, 223, 224, 1, 0, 0, 0, 224, 222, 1, 0, 0, 0, 224, 225, 1, 0, 0, 0, 225, 74, 1, 0, 0, 0, 226, 228, 7, 1, 0, 0, 227, 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230, 1, 0, 0, 0, 230, 76, 1, 0, 0, 0, 231, 235, 7, 5, 0, 0, 232, 234, 7, 6, 0, 0, 233, 232, 1, 0, 0, 0, 234, 237, 1, 0, 0, 0, 235, 233, 1, 0, 0, 0, 235, 236, 1, 0, 0, 0, 236, 78, 1, 0, 0, 0, 237, 235, 1, 0, 0, 0, 14, 0, 82, 89, 97, 99, 110, 112, 123, 138, 214, 219, 224, 229, 235, 1, 6, 0, 0]
\ No newline at end of file
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.tokens b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.tokens
new file mode 100644
index 000000000..913f0bc71
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/CESQLParserLexer.tokens
@@ -0,0 +1,59 @@
+SPACE=1
+LR_BRACKET=2
+RR_BRACKET=3
+COMMA=4
+SINGLE_QUOTE_SYMB=5
+DOUBLE_QUOTE_SYMB=6
+AND=7
+OR=8
+XOR=9
+NOT=10
+STAR=11
+DIVIDE=12
+MODULE=13
+PLUS=14
+MINUS=15
+EQUAL=16
+NOT_EQUAL=17
+GREATER=18
+GREATER_OR_EQUAL=19
+LESS=20
+LESS_GREATER=21
+LESS_OR_EQUAL=22
+LIKE=23
+EXISTS=24
+IN=25
+TRUE=26
+FALSE=27
+DQUOTED_STRING_LITERAL=28
+SQUOTED_STRING_LITERAL=29
+INTEGER_LITERAL=30
+IDENTIFIER=31
+IDENTIFIER_WITH_NUMBER=32
+FUNCTION_IDENTIFIER_WITH_UNDERSCORE=33
+'('=2
+')'=3
+','=4
+'\''=5
+'"'=6
+'AND'=7
+'OR'=8
+'XOR'=9
+'NOT'=10
+'*'=11
+'/'=12
+'%'=13
+'+'=14
+'-'=15
+'='=16
+'!='=17
+'>'=18
+'>='=19
+'<'=20
+'<>'=21
+'<='=22
+'LIKE'=23
+'EXISTS'=24
+'IN'=25
+'TRUE'=26
+'FALSE'=27
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_base_visitor.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_base_visitor.go
new file mode 100644
index 000000000..5fab13e63
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_base_visitor.go
@@ -0,0 +1,104 @@
+// Code generated from CESQLParser.g4 by ANTLR 4.10.1. DO NOT EDIT.
+
+package gen // CESQLParser
+import "github.com/antlr/antlr4/runtime/Go/antlr"
+
+type BaseCESQLParserVisitor struct {
+ *antlr.BaseParseTreeVisitor
+}
+
+func (v *BaseCESQLParserVisitor) VisitCesql(ctx *CesqlContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitInExpression(ctx *InExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBinaryComparisonExpression(ctx *BinaryComparisonExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitAtomExpression(ctx *AtomExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitExistsExpression(ctx *ExistsExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBinaryLogicExpression(ctx *BinaryLogicExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitLikeExpression(ctx *LikeExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitFunctionInvocationExpression(ctx *FunctionInvocationExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBinaryMultiplicativeExpression(ctx *BinaryMultiplicativeExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitUnaryLogicExpression(ctx *UnaryLogicExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitUnaryNumericExpression(ctx *UnaryNumericExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitSubExpression(ctx *SubExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBinaryAdditiveExpression(ctx *BinaryAdditiveExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBooleanAtom(ctx *BooleanAtomContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitIntegerAtom(ctx *IntegerAtomContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitStringAtom(ctx *StringAtomContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitIdentifierAtom(ctx *IdentifierAtomContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitIdentifier(ctx *IdentifierContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitFunctionIdentifier(ctx *FunctionIdentifierContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitBooleanLiteral(ctx *BooleanLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitStringLiteral(ctx *StringLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitIntegerLiteral(ctx *IntegerLiteralContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitFunctionParameterList(ctx *FunctionParameterListContext) interface{} {
+ return v.VisitChildren(ctx)
+}
+
+func (v *BaseCESQLParserVisitor) VisitSetExpression(ctx *SetExpressionContext) interface{} {
+ return v.VisitChildren(ctx)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_lexer.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_lexer.go
new file mode 100644
index 000000000..22dc8dacc
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_lexer.go
@@ -0,0 +1,247 @@
+// Code generated from CESQLParser.g4 by ANTLR 4.10.1. DO NOT EDIT.
+
+package gen
+
+import (
+ "fmt"
+ "sync"
+ "unicode"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr"
+)
+
+// Suppress unused import error
+var _ = fmt.Printf
+var _ = sync.Once{}
+var _ = unicode.IsLetter
+
+type CESQLParserLexer struct {
+ *antlr.BaseLexer
+ channelNames []string
+ modeNames []string
+ // TODO: EOF string
+}
+
+var cesqlparserlexerLexerStaticData struct {
+ once sync.Once
+ serializedATN []int32
+ channelNames []string
+ modeNames []string
+ literalNames []string
+ symbolicNames []string
+ ruleNames []string
+ predictionContextCache *antlr.PredictionContextCache
+ atn *antlr.ATN
+ decisionToDFA []*antlr.DFA
+}
+
+func cesqlparserlexerLexerInit() {
+ staticData := &cesqlparserlexerLexerStaticData
+ staticData.channelNames = []string{
+ "DEFAULT_TOKEN_CHANNEL", "HIDDEN",
+ }
+ staticData.modeNames = []string{
+ "DEFAULT_MODE",
+ }
+ staticData.literalNames = []string{
+ "", "", "'('", "')'", "','", "'''", "'\"'", "'AND'", "'OR'", "'XOR'",
+ "'NOT'", "'*'", "'/'", "'%'", "'+'", "'-'", "'='", "'!='", "'>'", "'>='",
+ "'<'", "'<>'", "'<='", "'LIKE'", "'EXISTS'", "'IN'", "'TRUE'", "'FALSE'",
+ }
+ staticData.symbolicNames = []string{
+ "", "SPACE", "LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB",
+ "DOUBLE_QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE",
+ "PLUS", "MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL",
+ "LESS", "LESS_GREATER", "LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE",
+ "FALSE", "DQUOTED_STRING_LITERAL", "SQUOTED_STRING_LITERAL", "INTEGER_LITERAL",
+ "IDENTIFIER", "IDENTIFIER_WITH_NUMBER", "FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
+ }
+ staticData.ruleNames = []string{
+ "SPACE", "ID_LITERAL", "DQUOTA_STRING", "SQUOTA_STRING", "INT_DIGIT",
+ "FN_LITERAL", "LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB",
+ "DOUBLE_QUOTE_SYMB", "QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR",
+ "DIVIDE", "MODULE", "PLUS", "MINUS", "EQUAL", "NOT_EQUAL", "GREATER",
+ "GREATER_OR_EQUAL", "LESS", "LESS_GREATER", "LESS_OR_EQUAL", "LIKE",
+ "EXISTS", "IN", "TRUE", "FALSE", "DQUOTED_STRING_LITERAL", "SQUOTED_STRING_LITERAL",
+ "INTEGER_LITERAL", "IDENTIFIER", "IDENTIFIER_WITH_NUMBER", "FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
+ }
+ staticData.predictionContextCache = antlr.NewPredictionContextCache()
+ staticData.serializedATN = []int32{
+ 4, 0, 33, 238, 6, -1, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2,
+ 4, 7, 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 2,
+ 10, 7, 10, 2, 11, 7, 11, 2, 12, 7, 12, 2, 13, 7, 13, 2, 14, 7, 14, 2, 15,
+ 7, 15, 2, 16, 7, 16, 2, 17, 7, 17, 2, 18, 7, 18, 2, 19, 7, 19, 2, 20, 7,
+ 20, 2, 21, 7, 21, 2, 22, 7, 22, 2, 23, 7, 23, 2, 24, 7, 24, 2, 25, 7, 25,
+ 2, 26, 7, 26, 2, 27, 7, 27, 2, 28, 7, 28, 2, 29, 7, 29, 2, 30, 7, 30, 2,
+ 31, 7, 31, 2, 32, 7, 32, 2, 33, 7, 33, 2, 34, 7, 34, 2, 35, 7, 35, 2, 36,
+ 7, 36, 2, 37, 7, 37, 2, 38, 7, 38, 1, 0, 4, 0, 81, 8, 0, 11, 0, 12, 0,
+ 82, 1, 0, 1, 0, 1, 1, 4, 1, 88, 8, 1, 11, 1, 12, 1, 89, 1, 2, 1, 2, 1,
+ 2, 1, 2, 1, 2, 1, 2, 5, 2, 98, 8, 2, 10, 2, 12, 2, 101, 9, 2, 1, 2, 1,
+ 2, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 1, 3, 5, 3, 111, 8, 3, 10, 3, 12, 3, 114,
+ 9, 3, 1, 3, 1, 3, 1, 4, 1, 4, 1, 5, 1, 5, 5, 5, 122, 8, 5, 10, 5, 12, 5,
+ 125, 9, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 9, 1, 9, 1, 10, 1, 10,
+ 1, 11, 1, 11, 3, 11, 139, 8, 11, 1, 12, 1, 12, 1, 12, 1, 12, 1, 13, 1,
+ 13, 1, 13, 1, 14, 1, 14, 1, 14, 1, 14, 1, 15, 1, 15, 1, 15, 1, 15, 1, 16,
+ 1, 16, 1, 17, 1, 17, 1, 18, 1, 18, 1, 19, 1, 19, 1, 20, 1, 20, 1, 21, 1,
+ 21, 1, 22, 1, 22, 1, 22, 1, 23, 1, 23, 1, 24, 1, 24, 1, 24, 1, 25, 1, 25,
+ 1, 26, 1, 26, 1, 26, 1, 27, 1, 27, 1, 27, 1, 28, 1, 28, 1, 28, 1, 28, 1,
+ 28, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 29, 1, 30, 1, 30, 1, 30,
+ 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 32, 1, 32, 1, 32, 1, 32, 1, 32, 1,
+ 32, 1, 33, 1, 33, 1, 34, 1, 34, 1, 35, 3, 35, 215, 8, 35, 1, 35, 4, 35,
+ 218, 8, 35, 11, 35, 12, 35, 219, 1, 36, 4, 36, 223, 8, 36, 11, 36, 12,
+ 36, 224, 1, 37, 4, 37, 228, 8, 37, 11, 37, 12, 37, 229, 1, 38, 1, 38, 5,
+ 38, 234, 8, 38, 10, 38, 12, 38, 237, 9, 38, 0, 0, 39, 1, 1, 3, 0, 5, 0,
+ 7, 0, 9, 0, 11, 0, 13, 2, 15, 3, 17, 4, 19, 5, 21, 6, 23, 0, 25, 7, 27,
+ 8, 29, 9, 31, 10, 33, 11, 35, 12, 37, 13, 39, 14, 41, 15, 43, 16, 45, 17,
+ 47, 18, 49, 19, 51, 20, 53, 21, 55, 22, 57, 23, 59, 24, 61, 25, 63, 26,
+ 65, 27, 67, 28, 69, 29, 71, 30, 73, 31, 75, 32, 77, 33, 1, 0, 9, 3, 0,
+ 9, 10, 13, 13, 32, 32, 3, 0, 48, 57, 65, 90, 97, 122, 2, 0, 34, 34, 92,
+ 92, 2, 0, 39, 39, 92, 92, 1, 0, 48, 57, 1, 0, 65, 90, 2, 0, 65, 90, 95,
+ 95, 2, 0, 43, 43, 45, 45, 2, 0, 65, 90, 97, 122, 246, 0, 1, 1, 0, 0, 0,
+ 0, 13, 1, 0, 0, 0, 0, 15, 1, 0, 0, 0, 0, 17, 1, 0, 0, 0, 0, 19, 1, 0, 0,
+ 0, 0, 21, 1, 0, 0, 0, 0, 25, 1, 0, 0, 0, 0, 27, 1, 0, 0, 0, 0, 29, 1, 0,
+ 0, 0, 0, 31, 1, 0, 0, 0, 0, 33, 1, 0, 0, 0, 0, 35, 1, 0, 0, 0, 0, 37, 1,
+ 0, 0, 0, 0, 39, 1, 0, 0, 0, 0, 41, 1, 0, 0, 0, 0, 43, 1, 0, 0, 0, 0, 45,
+ 1, 0, 0, 0, 0, 47, 1, 0, 0, 0, 0, 49, 1, 0, 0, 0, 0, 51, 1, 0, 0, 0, 0,
+ 53, 1, 0, 0, 0, 0, 55, 1, 0, 0, 0, 0, 57, 1, 0, 0, 0, 0, 59, 1, 0, 0, 0,
+ 0, 61, 1, 0, 0, 0, 0, 63, 1, 0, 0, 0, 0, 65, 1, 0, 0, 0, 0, 67, 1, 0, 0,
+ 0, 0, 69, 1, 0, 0, 0, 0, 71, 1, 0, 0, 0, 0, 73, 1, 0, 0, 0, 0, 75, 1, 0,
+ 0, 0, 0, 77, 1, 0, 0, 0, 1, 80, 1, 0, 0, 0, 3, 87, 1, 0, 0, 0, 5, 91, 1,
+ 0, 0, 0, 7, 104, 1, 0, 0, 0, 9, 117, 1, 0, 0, 0, 11, 119, 1, 0, 0, 0, 13,
+ 126, 1, 0, 0, 0, 15, 128, 1, 0, 0, 0, 17, 130, 1, 0, 0, 0, 19, 132, 1,
+ 0, 0, 0, 21, 134, 1, 0, 0, 0, 23, 138, 1, 0, 0, 0, 25, 140, 1, 0, 0, 0,
+ 27, 144, 1, 0, 0, 0, 29, 147, 1, 0, 0, 0, 31, 151, 1, 0, 0, 0, 33, 155,
+ 1, 0, 0, 0, 35, 157, 1, 0, 0, 0, 37, 159, 1, 0, 0, 0, 39, 161, 1, 0, 0,
+ 0, 41, 163, 1, 0, 0, 0, 43, 165, 1, 0, 0, 0, 45, 167, 1, 0, 0, 0, 47, 170,
+ 1, 0, 0, 0, 49, 172, 1, 0, 0, 0, 51, 175, 1, 0, 0, 0, 53, 177, 1, 0, 0,
+ 0, 55, 180, 1, 0, 0, 0, 57, 183, 1, 0, 0, 0, 59, 188, 1, 0, 0, 0, 61, 195,
+ 1, 0, 0, 0, 63, 198, 1, 0, 0, 0, 65, 203, 1, 0, 0, 0, 67, 209, 1, 0, 0,
+ 0, 69, 211, 1, 0, 0, 0, 71, 214, 1, 0, 0, 0, 73, 222, 1, 0, 0, 0, 75, 227,
+ 1, 0, 0, 0, 77, 231, 1, 0, 0, 0, 79, 81, 7, 0, 0, 0, 80, 79, 1, 0, 0, 0,
+ 81, 82, 1, 0, 0, 0, 82, 80, 1, 0, 0, 0, 82, 83, 1, 0, 0, 0, 83, 84, 1,
+ 0, 0, 0, 84, 85, 6, 0, 0, 0, 85, 2, 1, 0, 0, 0, 86, 88, 7, 1, 0, 0, 87,
+ 86, 1, 0, 0, 0, 88, 89, 1, 0, 0, 0, 89, 87, 1, 0, 0, 0, 89, 90, 1, 0, 0,
+ 0, 90, 4, 1, 0, 0, 0, 91, 99, 5, 34, 0, 0, 92, 93, 5, 92, 0, 0, 93, 98,
+ 9, 0, 0, 0, 94, 95, 5, 34, 0, 0, 95, 98, 5, 34, 0, 0, 96, 98, 8, 2, 0,
+ 0, 97, 92, 1, 0, 0, 0, 97, 94, 1, 0, 0, 0, 97, 96, 1, 0, 0, 0, 98, 101,
+ 1, 0, 0, 0, 99, 97, 1, 0, 0, 0, 99, 100, 1, 0, 0, 0, 100, 102, 1, 0, 0,
+ 0, 101, 99, 1, 0, 0, 0, 102, 103, 5, 34, 0, 0, 103, 6, 1, 0, 0, 0, 104,
+ 112, 5, 39, 0, 0, 105, 106, 5, 92, 0, 0, 106, 111, 9, 0, 0, 0, 107, 108,
+ 5, 39, 0, 0, 108, 111, 5, 39, 0, 0, 109, 111, 8, 3, 0, 0, 110, 105, 1,
+ 0, 0, 0, 110, 107, 1, 0, 0, 0, 110, 109, 1, 0, 0, 0, 111, 114, 1, 0, 0,
+ 0, 112, 110, 1, 0, 0, 0, 112, 113, 1, 0, 0, 0, 113, 115, 1, 0, 0, 0, 114,
+ 112, 1, 0, 0, 0, 115, 116, 5, 39, 0, 0, 116, 8, 1, 0, 0, 0, 117, 118, 7,
+ 4, 0, 0, 118, 10, 1, 0, 0, 0, 119, 123, 7, 5, 0, 0, 120, 122, 7, 6, 0,
+ 0, 121, 120, 1, 0, 0, 0, 122, 125, 1, 0, 0, 0, 123, 121, 1, 0, 0, 0, 123,
+ 124, 1, 0, 0, 0, 124, 12, 1, 0, 0, 0, 125, 123, 1, 0, 0, 0, 126, 127, 5,
+ 40, 0, 0, 127, 14, 1, 0, 0, 0, 128, 129, 5, 41, 0, 0, 129, 16, 1, 0, 0,
+ 0, 130, 131, 5, 44, 0, 0, 131, 18, 1, 0, 0, 0, 132, 133, 5, 39, 0, 0, 133,
+ 20, 1, 0, 0, 0, 134, 135, 5, 34, 0, 0, 135, 22, 1, 0, 0, 0, 136, 139, 3,
+ 19, 9, 0, 137, 139, 3, 21, 10, 0, 138, 136, 1, 0, 0, 0, 138, 137, 1, 0,
+ 0, 0, 139, 24, 1, 0, 0, 0, 140, 141, 5, 65, 0, 0, 141, 142, 5, 78, 0, 0,
+ 142, 143, 5, 68, 0, 0, 143, 26, 1, 0, 0, 0, 144, 145, 5, 79, 0, 0, 145,
+ 146, 5, 82, 0, 0, 146, 28, 1, 0, 0, 0, 147, 148, 5, 88, 0, 0, 148, 149,
+ 5, 79, 0, 0, 149, 150, 5, 82, 0, 0, 150, 30, 1, 0, 0, 0, 151, 152, 5, 78,
+ 0, 0, 152, 153, 5, 79, 0, 0, 153, 154, 5, 84, 0, 0, 154, 32, 1, 0, 0, 0,
+ 155, 156, 5, 42, 0, 0, 156, 34, 1, 0, 0, 0, 157, 158, 5, 47, 0, 0, 158,
+ 36, 1, 0, 0, 0, 159, 160, 5, 37, 0, 0, 160, 38, 1, 0, 0, 0, 161, 162, 5,
+ 43, 0, 0, 162, 40, 1, 0, 0, 0, 163, 164, 5, 45, 0, 0, 164, 42, 1, 0, 0,
+ 0, 165, 166, 5, 61, 0, 0, 166, 44, 1, 0, 0, 0, 167, 168, 5, 33, 0, 0, 168,
+ 169, 5, 61, 0, 0, 169, 46, 1, 0, 0, 0, 170, 171, 5, 62, 0, 0, 171, 48,
+ 1, 0, 0, 0, 172, 173, 5, 62, 0, 0, 173, 174, 5, 61, 0, 0, 174, 50, 1, 0,
+ 0, 0, 175, 176, 5, 60, 0, 0, 176, 52, 1, 0, 0, 0, 177, 178, 5, 60, 0, 0,
+ 178, 179, 5, 62, 0, 0, 179, 54, 1, 0, 0, 0, 180, 181, 5, 60, 0, 0, 181,
+ 182, 5, 61, 0, 0, 182, 56, 1, 0, 0, 0, 183, 184, 5, 76, 0, 0, 184, 185,
+ 5, 73, 0, 0, 185, 186, 5, 75, 0, 0, 186, 187, 5, 69, 0, 0, 187, 58, 1,
+ 0, 0, 0, 188, 189, 5, 69, 0, 0, 189, 190, 5, 88, 0, 0, 190, 191, 5, 73,
+ 0, 0, 191, 192, 5, 83, 0, 0, 192, 193, 5, 84, 0, 0, 193, 194, 5, 83, 0,
+ 0, 194, 60, 1, 0, 0, 0, 195, 196, 5, 73, 0, 0, 196, 197, 5, 78, 0, 0, 197,
+ 62, 1, 0, 0, 0, 198, 199, 5, 84, 0, 0, 199, 200, 5, 82, 0, 0, 200, 201,
+ 5, 85, 0, 0, 201, 202, 5, 69, 0, 0, 202, 64, 1, 0, 0, 0, 203, 204, 5, 70,
+ 0, 0, 204, 205, 5, 65, 0, 0, 205, 206, 5, 76, 0, 0, 206, 207, 5, 83, 0,
+ 0, 207, 208, 5, 69, 0, 0, 208, 66, 1, 0, 0, 0, 209, 210, 3, 5, 2, 0, 210,
+ 68, 1, 0, 0, 0, 211, 212, 3, 7, 3, 0, 212, 70, 1, 0, 0, 0, 213, 215, 7,
+ 7, 0, 0, 214, 213, 1, 0, 0, 0, 214, 215, 1, 0, 0, 0, 215, 217, 1, 0, 0,
+ 0, 216, 218, 3, 9, 4, 0, 217, 216, 1, 0, 0, 0, 218, 219, 1, 0, 0, 0, 219,
+ 217, 1, 0, 0, 0, 219, 220, 1, 0, 0, 0, 220, 72, 1, 0, 0, 0, 221, 223, 7,
+ 8, 0, 0, 222, 221, 1, 0, 0, 0, 223, 224, 1, 0, 0, 0, 224, 222, 1, 0, 0,
+ 0, 224, 225, 1, 0, 0, 0, 225, 74, 1, 0, 0, 0, 226, 228, 7, 1, 0, 0, 227,
+ 226, 1, 0, 0, 0, 228, 229, 1, 0, 0, 0, 229, 227, 1, 0, 0, 0, 229, 230,
+ 1, 0, 0, 0, 230, 76, 1, 0, 0, 0, 231, 235, 7, 5, 0, 0, 232, 234, 7, 6,
+ 0, 0, 233, 232, 1, 0, 0, 0, 234, 237, 1, 0, 0, 0, 235, 233, 1, 0, 0, 0,
+ 235, 236, 1, 0, 0, 0, 236, 78, 1, 0, 0, 0, 237, 235, 1, 0, 0, 0, 14, 0,
+ 82, 89, 97, 99, 110, 112, 123, 138, 214, 219, 224, 229, 235, 1, 6, 0, 0,
+ }
+ deserializer := antlr.NewATNDeserializer(nil)
+ staticData.atn = deserializer.Deserialize(staticData.serializedATN)
+ atn := staticData.atn
+ staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
+ decisionToDFA := staticData.decisionToDFA
+ for index, state := range atn.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(state, index)
+ }
+}
+
+// CESQLParserLexerInit initializes any static state used to implement CESQLParserLexer. By default the
+// static state used to implement the lexer is lazily initialized during the first call to
+// NewCESQLParserLexer(). You can call this function if you wish to initialize the static state ahead
+// of time.
+func CESQLParserLexerInit() {
+ staticData := &cesqlparserlexerLexerStaticData
+ staticData.once.Do(cesqlparserlexerLexerInit)
+}
+
+// NewCESQLParserLexer produces a new lexer instance for the optional input antlr.CharStream.
+func NewCESQLParserLexer(input antlr.CharStream) *CESQLParserLexer {
+ CESQLParserLexerInit()
+ l := new(CESQLParserLexer)
+ l.BaseLexer = antlr.NewBaseLexer(input)
+ staticData := &cesqlparserlexerLexerStaticData
+ l.Interpreter = antlr.NewLexerATNSimulator(l, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)
+ l.channelNames = staticData.channelNames
+ l.modeNames = staticData.modeNames
+ l.RuleNames = staticData.ruleNames
+ l.LiteralNames = staticData.literalNames
+ l.SymbolicNames = staticData.symbolicNames
+ l.GrammarFileName = "CESQLParser.g4"
+ // TODO: l.EOF = antlr.TokenEOF
+
+ return l
+}
+
+// CESQLParserLexer tokens.
+const (
+ CESQLParserLexerSPACE = 1
+ CESQLParserLexerLR_BRACKET = 2
+ CESQLParserLexerRR_BRACKET = 3
+ CESQLParserLexerCOMMA = 4
+ CESQLParserLexerSINGLE_QUOTE_SYMB = 5
+ CESQLParserLexerDOUBLE_QUOTE_SYMB = 6
+ CESQLParserLexerAND = 7
+ CESQLParserLexerOR = 8
+ CESQLParserLexerXOR = 9
+ CESQLParserLexerNOT = 10
+ CESQLParserLexerSTAR = 11
+ CESQLParserLexerDIVIDE = 12
+ CESQLParserLexerMODULE = 13
+ CESQLParserLexerPLUS = 14
+ CESQLParserLexerMINUS = 15
+ CESQLParserLexerEQUAL = 16
+ CESQLParserLexerNOT_EQUAL = 17
+ CESQLParserLexerGREATER = 18
+ CESQLParserLexerGREATER_OR_EQUAL = 19
+ CESQLParserLexerLESS = 20
+ CESQLParserLexerLESS_GREATER = 21
+ CESQLParserLexerLESS_OR_EQUAL = 22
+ CESQLParserLexerLIKE = 23
+ CESQLParserLexerEXISTS = 24
+ CESQLParserLexerIN = 25
+ CESQLParserLexerTRUE = 26
+ CESQLParserLexerFALSE = 27
+ CESQLParserLexerDQUOTED_STRING_LITERAL = 28
+ CESQLParserLexerSQUOTED_STRING_LITERAL = 29
+ CESQLParserLexerINTEGER_LITERAL = 30
+ CESQLParserLexerIDENTIFIER = 31
+ CESQLParserLexerIDENTIFIER_WITH_NUMBER = 32
+ CESQLParserLexerFUNCTION_IDENTIFIER_WITH_UNDERSCORE = 33
+)
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_parser.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_parser.go
new file mode 100644
index 000000000..12842bfbd
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/gen/cesqlparser_parser.go
@@ -0,0 +1,2608 @@
+// Code generated from CESQLParser.g4 by ANTLR 4.10.1. DO NOT EDIT.
+
+package gen // CESQLParser
+import (
+ "fmt"
+ "strconv"
+ "sync"
+
+ "github.com/antlr/antlr4/runtime/Go/antlr"
+)
+
+// Suppress unused import errors
+var _ = fmt.Printf
+var _ = strconv.Itoa
+var _ = sync.Once{}
+
+type CESQLParserParser struct {
+ *antlr.BaseParser
+}
+
+var cesqlparserParserStaticData struct {
+ once sync.Once
+ serializedATN []int32
+ literalNames []string
+ symbolicNames []string
+ ruleNames []string
+ predictionContextCache *antlr.PredictionContextCache
+ atn *antlr.ATN
+ decisionToDFA []*antlr.DFA
+}
+
+func cesqlparserParserInit() {
+ staticData := &cesqlparserParserStaticData
+ staticData.literalNames = []string{
+ "", "", "'('", "')'", "','", "'''", "'\"'", "'AND'", "'OR'", "'XOR'",
+ "'NOT'", "'*'", "'/'", "'%'", "'+'", "'-'", "'='", "'!='", "'>'", "'>='",
+ "'<'", "'<>'", "'<='", "'LIKE'", "'EXISTS'", "'IN'", "'TRUE'", "'FALSE'",
+ }
+ staticData.symbolicNames = []string{
+ "", "SPACE", "LR_BRACKET", "RR_BRACKET", "COMMA", "SINGLE_QUOTE_SYMB",
+ "DOUBLE_QUOTE_SYMB", "AND", "OR", "XOR", "NOT", "STAR", "DIVIDE", "MODULE",
+ "PLUS", "MINUS", "EQUAL", "NOT_EQUAL", "GREATER", "GREATER_OR_EQUAL",
+ "LESS", "LESS_GREATER", "LESS_OR_EQUAL", "LIKE", "EXISTS", "IN", "TRUE",
+ "FALSE", "DQUOTED_STRING_LITERAL", "SQUOTED_STRING_LITERAL", "INTEGER_LITERAL",
+ "IDENTIFIER", "IDENTIFIER_WITH_NUMBER", "FUNCTION_IDENTIFIER_WITH_UNDERSCORE",
+ }
+ staticData.ruleNames = []string{
+ "cesql", "expression", "atom", "identifier", "functionIdentifier", "booleanLiteral",
+ "stringLiteral", "integerLiteral", "functionParameterList", "setExpression",
+ }
+ staticData.predictionContextCache = antlr.NewPredictionContextCache()
+ staticData.serializedATN = []int32{
+ 4, 1, 33, 110, 2, 0, 7, 0, 2, 1, 7, 1, 2, 2, 7, 2, 2, 3, 7, 3, 2, 4, 7,
+ 4, 2, 5, 7, 5, 2, 6, 7, 6, 2, 7, 7, 7, 2, 8, 7, 8, 2, 9, 7, 9, 1, 0, 1,
+ 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 39, 8, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 3, 1, 55, 8, 1, 1,
+ 1, 1, 1, 1, 1, 1, 1, 3, 1, 61, 8, 1, 1, 1, 1, 1, 5, 1, 65, 8, 1, 10, 1,
+ 12, 1, 68, 9, 1, 1, 2, 1, 2, 1, 2, 1, 2, 3, 2, 74, 8, 2, 1, 3, 1, 3, 1,
+ 4, 1, 4, 1, 5, 1, 5, 1, 6, 1, 6, 1, 7, 1, 7, 1, 8, 1, 8, 1, 8, 1, 8, 5,
+ 8, 90, 8, 8, 10, 8, 12, 8, 93, 9, 8, 3, 8, 95, 8, 8, 1, 8, 1, 8, 1, 9,
+ 1, 9, 1, 9, 1, 9, 5, 9, 103, 8, 9, 10, 9, 12, 9, 106, 9, 9, 1, 9, 1, 9,
+ 1, 9, 0, 1, 2, 10, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 0, 8, 1, 0, 11, 13,
+ 1, 0, 14, 15, 1, 0, 16, 22, 1, 0, 7, 9, 1, 0, 31, 32, 2, 0, 31, 31, 33,
+ 33, 1, 0, 26, 27, 1, 0, 28, 29, 118, 0, 20, 1, 0, 0, 0, 2, 38, 1, 0, 0,
+ 0, 4, 73, 1, 0, 0, 0, 6, 75, 1, 0, 0, 0, 8, 77, 1, 0, 0, 0, 10, 79, 1,
+ 0, 0, 0, 12, 81, 1, 0, 0, 0, 14, 83, 1, 0, 0, 0, 16, 85, 1, 0, 0, 0, 18,
+ 98, 1, 0, 0, 0, 20, 21, 3, 2, 1, 0, 21, 22, 5, 0, 0, 1, 22, 1, 1, 0, 0,
+ 0, 23, 24, 6, 1, -1, 0, 24, 25, 3, 8, 4, 0, 25, 26, 3, 16, 8, 0, 26, 39,
+ 1, 0, 0, 0, 27, 28, 5, 10, 0, 0, 28, 39, 3, 2, 1, 11, 29, 30, 5, 15, 0,
+ 0, 30, 39, 3, 2, 1, 10, 31, 32, 5, 24, 0, 0, 32, 39, 3, 6, 3, 0, 33, 34,
+ 5, 2, 0, 0, 34, 35, 3, 2, 1, 0, 35, 36, 5, 3, 0, 0, 36, 39, 1, 0, 0, 0,
+ 37, 39, 3, 4, 2, 0, 38, 23, 1, 0, 0, 0, 38, 27, 1, 0, 0, 0, 38, 29, 1,
+ 0, 0, 0, 38, 31, 1, 0, 0, 0, 38, 33, 1, 0, 0, 0, 38, 37, 1, 0, 0, 0, 39,
+ 66, 1, 0, 0, 0, 40, 41, 10, 6, 0, 0, 41, 42, 7, 0, 0, 0, 42, 65, 3, 2,
+ 1, 7, 43, 44, 10, 5, 0, 0, 44, 45, 7, 1, 0, 0, 45, 65, 3, 2, 1, 6, 46,
+ 47, 10, 4, 0, 0, 47, 48, 7, 2, 0, 0, 48, 65, 3, 2, 1, 5, 49, 50, 10, 3,
+ 0, 0, 50, 51, 7, 3, 0, 0, 51, 65, 3, 2, 1, 3, 52, 54, 10, 9, 0, 0, 53,
+ 55, 5, 10, 0, 0, 54, 53, 1, 0, 0, 0, 54, 55, 1, 0, 0, 0, 55, 56, 1, 0,
+ 0, 0, 56, 57, 5, 23, 0, 0, 57, 65, 3, 12, 6, 0, 58, 60, 10, 7, 0, 0, 59,
+ 61, 5, 10, 0, 0, 60, 59, 1, 0, 0, 0, 60, 61, 1, 0, 0, 0, 61, 62, 1, 0,
+ 0, 0, 62, 63, 5, 25, 0, 0, 63, 65, 3, 18, 9, 0, 64, 40, 1, 0, 0, 0, 64,
+ 43, 1, 0, 0, 0, 64, 46, 1, 0, 0, 0, 64, 49, 1, 0, 0, 0, 64, 52, 1, 0, 0,
+ 0, 64, 58, 1, 0, 0, 0, 65, 68, 1, 0, 0, 0, 66, 64, 1, 0, 0, 0, 66, 67,
+ 1, 0, 0, 0, 67, 3, 1, 0, 0, 0, 68, 66, 1, 0, 0, 0, 69, 74, 3, 10, 5, 0,
+ 70, 74, 3, 14, 7, 0, 71, 74, 3, 12, 6, 0, 72, 74, 3, 6, 3, 0, 73, 69, 1,
+ 0, 0, 0, 73, 70, 1, 0, 0, 0, 73, 71, 1, 0, 0, 0, 73, 72, 1, 0, 0, 0, 74,
+ 5, 1, 0, 0, 0, 75, 76, 7, 4, 0, 0, 76, 7, 1, 0, 0, 0, 77, 78, 7, 5, 0,
+ 0, 78, 9, 1, 0, 0, 0, 79, 80, 7, 6, 0, 0, 80, 11, 1, 0, 0, 0, 81, 82, 7,
+ 7, 0, 0, 82, 13, 1, 0, 0, 0, 83, 84, 5, 30, 0, 0, 84, 15, 1, 0, 0, 0, 85,
+ 94, 5, 2, 0, 0, 86, 91, 3, 2, 1, 0, 87, 88, 5, 4, 0, 0, 88, 90, 3, 2, 1,
+ 0, 89, 87, 1, 0, 0, 0, 90, 93, 1, 0, 0, 0, 91, 89, 1, 0, 0, 0, 91, 92,
+ 1, 0, 0, 0, 92, 95, 1, 0, 0, 0, 93, 91, 1, 0, 0, 0, 94, 86, 1, 0, 0, 0,
+ 94, 95, 1, 0, 0, 0, 95, 96, 1, 0, 0, 0, 96, 97, 5, 3, 0, 0, 97, 17, 1,
+ 0, 0, 0, 98, 99, 5, 2, 0, 0, 99, 104, 3, 2, 1, 0, 100, 101, 5, 4, 0, 0,
+ 101, 103, 3, 2, 1, 0, 102, 100, 1, 0, 0, 0, 103, 106, 1, 0, 0, 0, 104,
+ 102, 1, 0, 0, 0, 104, 105, 1, 0, 0, 0, 105, 107, 1, 0, 0, 0, 106, 104,
+ 1, 0, 0, 0, 107, 108, 5, 3, 0, 0, 108, 19, 1, 0, 0, 0, 9, 38, 54, 60, 64,
+ 66, 73, 91, 94, 104,
+ }
+ deserializer := antlr.NewATNDeserializer(nil)
+ staticData.atn = deserializer.Deserialize(staticData.serializedATN)
+ atn := staticData.atn
+ staticData.decisionToDFA = make([]*antlr.DFA, len(atn.DecisionToState))
+ decisionToDFA := staticData.decisionToDFA
+ for index, state := range atn.DecisionToState {
+ decisionToDFA[index] = antlr.NewDFA(state, index)
+ }
+}
+
+// CESQLParserParserInit initializes any static state used to implement CESQLParserParser. By default the
+// static state used to implement the parser is lazily initialized during the first call to
+// NewCESQLParserParser(). You can call this function if you wish to initialize the static state ahead
+// of time.
+func CESQLParserParserInit() {
+ staticData := &cesqlparserParserStaticData
+ staticData.once.Do(cesqlparserParserInit)
+}
+
+// NewCESQLParserParser produces a new parser instance for the optional input antlr.TokenStream.
+func NewCESQLParserParser(input antlr.TokenStream) *CESQLParserParser {
+ CESQLParserParserInit()
+ this := new(CESQLParserParser)
+ this.BaseParser = antlr.NewBaseParser(input)
+ staticData := &cesqlparserParserStaticData
+ this.Interpreter = antlr.NewParserATNSimulator(this, staticData.atn, staticData.decisionToDFA, staticData.predictionContextCache)
+ this.RuleNames = staticData.ruleNames
+ this.LiteralNames = staticData.literalNames
+ this.SymbolicNames = staticData.symbolicNames
+ this.GrammarFileName = "CESQLParser.g4"
+
+ return this
+}
+
+// CESQLParserParser tokens.
+const (
+ CESQLParserParserEOF = antlr.TokenEOF
+ CESQLParserParserSPACE = 1
+ CESQLParserParserLR_BRACKET = 2
+ CESQLParserParserRR_BRACKET = 3
+ CESQLParserParserCOMMA = 4
+ CESQLParserParserSINGLE_QUOTE_SYMB = 5
+ CESQLParserParserDOUBLE_QUOTE_SYMB = 6
+ CESQLParserParserAND = 7
+ CESQLParserParserOR = 8
+ CESQLParserParserXOR = 9
+ CESQLParserParserNOT = 10
+ CESQLParserParserSTAR = 11
+ CESQLParserParserDIVIDE = 12
+ CESQLParserParserMODULE = 13
+ CESQLParserParserPLUS = 14
+ CESQLParserParserMINUS = 15
+ CESQLParserParserEQUAL = 16
+ CESQLParserParserNOT_EQUAL = 17
+ CESQLParserParserGREATER = 18
+ CESQLParserParserGREATER_OR_EQUAL = 19
+ CESQLParserParserLESS = 20
+ CESQLParserParserLESS_GREATER = 21
+ CESQLParserParserLESS_OR_EQUAL = 22
+ CESQLParserParserLIKE = 23
+ CESQLParserParserEXISTS = 24
+ CESQLParserParserIN = 25
+ CESQLParserParserTRUE = 26
+ CESQLParserParserFALSE = 27
+ CESQLParserParserDQUOTED_STRING_LITERAL = 28
+ CESQLParserParserSQUOTED_STRING_LITERAL = 29
+ CESQLParserParserINTEGER_LITERAL = 30
+ CESQLParserParserIDENTIFIER = 31
+ CESQLParserParserIDENTIFIER_WITH_NUMBER = 32
+ CESQLParserParserFUNCTION_IDENTIFIER_WITH_UNDERSCORE = 33
+)
+
+// CESQLParserParser rules.
+const (
+ CESQLParserParserRULE_cesql = 0
+ CESQLParserParserRULE_expression = 1
+ CESQLParserParserRULE_atom = 2
+ CESQLParserParserRULE_identifier = 3
+ CESQLParserParserRULE_functionIdentifier = 4
+ CESQLParserParserRULE_booleanLiteral = 5
+ CESQLParserParserRULE_stringLiteral = 6
+ CESQLParserParserRULE_integerLiteral = 7
+ CESQLParserParserRULE_functionParameterList = 8
+ CESQLParserParserRULE_setExpression = 9
+)
+
+// ICesqlContext is an interface to support dynamic dispatch.
+type ICesqlContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // IsCesqlContext differentiates from other interfaces.
+ IsCesqlContext()
+}
+
+type CesqlContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyCesqlContext() *CesqlContext {
+ var p = new(CesqlContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CESQLParserParserRULE_cesql
+ return p
+}
+
+func (*CesqlContext) IsCesqlContext() {}
+
+func NewCesqlContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *CesqlContext {
+ var p = new(CesqlContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CESQLParserParserRULE_cesql
+
+ return p
+}
+
+func (s *CesqlContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *CesqlContext) Expression() IExpressionContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *CesqlContext) EOF() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserEOF, 0)
+}
+
+func (s *CesqlContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *CesqlContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+func (s *CesqlContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitCesql(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CESQLParserParser) Cesql() (localctx ICesqlContext) {
+ this := p
+ _ = this
+
+ localctx = NewCesqlContext(p, p.GetParserRuleContext(), p.GetState())
+ p.EnterRule(localctx, 0, CESQLParserParserRULE_cesql)
+
+ defer func() {
+ p.ExitRule()
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ p.EnterOuterAlt(localctx, 1)
+ {
+ p.SetState(20)
+ p.expression(0)
+ }
+ {
+ p.SetState(21)
+ p.Match(CESQLParserParserEOF)
+ }
+
+ return localctx
+}
+
+// IExpressionContext is an interface to support dynamic dispatch.
+type IExpressionContext interface {
+ antlr.ParserRuleContext
+
+ // GetParser returns the parser.
+ GetParser() antlr.Parser
+
+ // IsExpressionContext differentiates from other interfaces.
+ IsExpressionContext()
+}
+
+type ExpressionContext struct {
+ *antlr.BaseParserRuleContext
+ parser antlr.Parser
+}
+
+func NewEmptyExpressionContext() *ExpressionContext {
+ var p = new(ExpressionContext)
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(nil, -1)
+ p.RuleIndex = CESQLParserParserRULE_expression
+ return p
+}
+
+func (*ExpressionContext) IsExpressionContext() {}
+
+func NewExpressionContext(parser antlr.Parser, parent antlr.ParserRuleContext, invokingState int) *ExpressionContext {
+ var p = new(ExpressionContext)
+
+ p.BaseParserRuleContext = antlr.NewBaseParserRuleContext(parent, invokingState)
+
+ p.parser = parser
+ p.RuleIndex = CESQLParserParserRULE_expression
+
+ return p
+}
+
+func (s *ExpressionContext) GetParser() antlr.Parser { return s.parser }
+
+func (s *ExpressionContext) CopyFrom(ctx *ExpressionContext) {
+ s.BaseParserRuleContext.CopyFrom(ctx.BaseParserRuleContext)
+}
+
+func (s *ExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExpressionContext) ToStringTree(ruleNames []string, recog antlr.Recognizer) string {
+ return antlr.TreesStringTree(s, ruleNames, recog)
+}
+
+type InExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewInExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *InExpressionContext {
+ var p = new(InExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *InExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *InExpressionContext) Expression() IExpressionContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *InExpressionContext) IN() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserIN, 0)
+}
+
+func (s *InExpressionContext) SetExpression() ISetExpressionContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(ISetExpressionContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(ISetExpressionContext)
+}
+
+func (s *InExpressionContext) NOT() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserNOT, 0)
+}
+
+func (s *InExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitInExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BinaryComparisonExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewBinaryComparisonExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BinaryComparisonExpressionContext {
+ var p = new(BinaryComparisonExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *BinaryComparisonExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BinaryComparisonExpressionContext) AllExpression() []IExpressionContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExpressionContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExpressionContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExpressionContext); ok {
+ tst[i] = t.(IExpressionContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *BinaryComparisonExpressionContext) Expression(i int) IExpressionContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *BinaryComparisonExpressionContext) EQUAL() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserEQUAL, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) NOT_EQUAL() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserNOT_EQUAL, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) LESS_GREATER() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLESS_GREATER, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) GREATER_OR_EQUAL() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserGREATER_OR_EQUAL, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) LESS_OR_EQUAL() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLESS_OR_EQUAL, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) LESS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLESS, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) GREATER() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserGREATER, 0)
+}
+
+func (s *BinaryComparisonExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitBinaryComparisonExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type AtomExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewAtomExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *AtomExpressionContext {
+ var p = new(AtomExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *AtomExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *AtomExpressionContext) Atom() IAtomContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IAtomContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IAtomContext)
+}
+
+func (s *AtomExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitAtomExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type ExistsExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewExistsExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *ExistsExpressionContext {
+ var p = new(ExistsExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *ExistsExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *ExistsExpressionContext) EXISTS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserEXISTS, 0)
+}
+
+func (s *ExistsExpressionContext) Identifier() IIdentifierContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IIdentifierContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IIdentifierContext)
+}
+
+func (s *ExistsExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitExistsExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BinaryLogicExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewBinaryLogicExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BinaryLogicExpressionContext {
+ var p = new(BinaryLogicExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *BinaryLogicExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BinaryLogicExpressionContext) AllExpression() []IExpressionContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExpressionContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExpressionContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExpressionContext); ok {
+ tst[i] = t.(IExpressionContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *BinaryLogicExpressionContext) Expression(i int) IExpressionContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *BinaryLogicExpressionContext) AND() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserAND, 0)
+}
+
+func (s *BinaryLogicExpressionContext) OR() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserOR, 0)
+}
+
+func (s *BinaryLogicExpressionContext) XOR() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserXOR, 0)
+}
+
+func (s *BinaryLogicExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitBinaryLogicExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type LikeExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewLikeExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *LikeExpressionContext {
+ var p = new(LikeExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *LikeExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *LikeExpressionContext) Expression() IExpressionContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *LikeExpressionContext) LIKE() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLIKE, 0)
+}
+
+func (s *LikeExpressionContext) StringLiteral() IStringLiteralContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IStringLiteralContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IStringLiteralContext)
+}
+
+func (s *LikeExpressionContext) NOT() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserNOT, 0)
+}
+
+func (s *LikeExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitLikeExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type FunctionInvocationExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewFunctionInvocationExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *FunctionInvocationExpressionContext {
+ var p = new(FunctionInvocationExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *FunctionInvocationExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *FunctionInvocationExpressionContext) FunctionIdentifier() IFunctionIdentifierContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IFunctionIdentifierContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IFunctionIdentifierContext)
+}
+
+func (s *FunctionInvocationExpressionContext) FunctionParameterList() IFunctionParameterListContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IFunctionParameterListContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IFunctionParameterListContext)
+}
+
+func (s *FunctionInvocationExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitFunctionInvocationExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BinaryMultiplicativeExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewBinaryMultiplicativeExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BinaryMultiplicativeExpressionContext {
+ var p = new(BinaryMultiplicativeExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *BinaryMultiplicativeExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BinaryMultiplicativeExpressionContext) AllExpression() []IExpressionContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExpressionContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExpressionContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExpressionContext); ok {
+ tst[i] = t.(IExpressionContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *BinaryMultiplicativeExpressionContext) Expression(i int) IExpressionContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *BinaryMultiplicativeExpressionContext) STAR() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserSTAR, 0)
+}
+
+func (s *BinaryMultiplicativeExpressionContext) DIVIDE() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserDIVIDE, 0)
+}
+
+func (s *BinaryMultiplicativeExpressionContext) MODULE() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserMODULE, 0)
+}
+
+func (s *BinaryMultiplicativeExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitBinaryMultiplicativeExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type UnaryLogicExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewUnaryLogicExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UnaryLogicExpressionContext {
+ var p = new(UnaryLogicExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *UnaryLogicExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *UnaryLogicExpressionContext) NOT() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserNOT, 0)
+}
+
+func (s *UnaryLogicExpressionContext) Expression() IExpressionContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *UnaryLogicExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitUnaryLogicExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type UnaryNumericExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewUnaryNumericExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *UnaryNumericExpressionContext {
+ var p = new(UnaryNumericExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *UnaryNumericExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *UnaryNumericExpressionContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserMINUS, 0)
+}
+
+func (s *UnaryNumericExpressionContext) Expression() IExpressionContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *UnaryNumericExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitUnaryNumericExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type SubExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewSubExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *SubExpressionContext {
+ var p = new(SubExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *SubExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *SubExpressionContext) LR_BRACKET() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserLR_BRACKET, 0)
+}
+
+func (s *SubExpressionContext) Expression() IExpressionContext {
+ var t antlr.RuleContext
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *SubExpressionContext) RR_BRACKET() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserRR_BRACKET, 0)
+}
+
+func (s *SubExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitSubExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+type BinaryAdditiveExpressionContext struct {
+ *ExpressionContext
+}
+
+func NewBinaryAdditiveExpressionContext(parser antlr.Parser, ctx antlr.ParserRuleContext) *BinaryAdditiveExpressionContext {
+ var p = new(BinaryAdditiveExpressionContext)
+
+ p.ExpressionContext = NewEmptyExpressionContext()
+ p.parser = parser
+ p.CopyFrom(ctx.(*ExpressionContext))
+
+ return p
+}
+
+func (s *BinaryAdditiveExpressionContext) GetRuleContext() antlr.RuleContext {
+ return s
+}
+
+func (s *BinaryAdditiveExpressionContext) AllExpression() []IExpressionContext {
+ children := s.GetChildren()
+ len := 0
+ for _, ctx := range children {
+ if _, ok := ctx.(IExpressionContext); ok {
+ len++
+ }
+ }
+
+ tst := make([]IExpressionContext, len)
+ i := 0
+ for _, ctx := range children {
+ if t, ok := ctx.(IExpressionContext); ok {
+ tst[i] = t.(IExpressionContext)
+ i++
+ }
+ }
+
+ return tst
+}
+
+func (s *BinaryAdditiveExpressionContext) Expression(i int) IExpressionContext {
+ var t antlr.RuleContext
+ j := 0
+ for _, ctx := range s.GetChildren() {
+ if _, ok := ctx.(IExpressionContext); ok {
+ if j == i {
+ t = ctx.(antlr.RuleContext)
+ break
+ }
+ j++
+ }
+ }
+
+ if t == nil {
+ return nil
+ }
+
+ return t.(IExpressionContext)
+}
+
+func (s *BinaryAdditiveExpressionContext) PLUS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserPLUS, 0)
+}
+
+func (s *BinaryAdditiveExpressionContext) MINUS() antlr.TerminalNode {
+ return s.GetToken(CESQLParserParserMINUS, 0)
+}
+
+func (s *BinaryAdditiveExpressionContext) Accept(visitor antlr.ParseTreeVisitor) interface{} {
+ switch t := visitor.(type) {
+ case CESQLParserVisitor:
+ return t.VisitBinaryAdditiveExpression(s)
+
+ default:
+ return t.VisitChildren(s)
+ }
+}
+
+func (p *CESQLParserParser) Expression() (localctx IExpressionContext) {
+ return p.expression(0)
+}
+
+func (p *CESQLParserParser) expression(_p int) (localctx IExpressionContext) {
+ this := p
+ _ = this
+
+ var _parentctx antlr.ParserRuleContext = p.GetParserRuleContext()
+ _parentState := p.GetState()
+ localctx = NewExpressionContext(p, p.GetParserRuleContext(), _parentState)
+ var _prevctx IExpressionContext = localctx
+ var _ antlr.ParserRuleContext = _prevctx // TODO: To prevent unused variable warning.
+ _startState := 2
+ p.EnterRecursionRule(localctx, 2, CESQLParserParserRULE_expression, _p)
+ var _la int
+
+ defer func() {
+ p.UnrollRecursionContexts(_parentctx)
+ }()
+
+ defer func() {
+ if err := recover(); err != nil {
+ if v, ok := err.(antlr.RecognitionException); ok {
+ localctx.SetException(v)
+ p.GetErrorHandler().ReportError(p, v)
+ p.GetErrorHandler().Recover(p, v)
+ } else {
+ panic(err)
+ }
+ }
+ }()
+
+ var _alt int
+
+ p.EnterOuterAlt(localctx, 1)
+ p.SetState(38)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 0, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewFunctionInvocationExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+
+ {
+ p.SetState(24)
+ p.FunctionIdentifier()
+ }
+ {
+ p.SetState(25)
+ p.FunctionParameterList()
+ }
+
+ case 2:
+ localctx = NewUnaryLogicExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(27)
+ p.Match(CESQLParserParserNOT)
+ }
+ {
+ p.SetState(28)
+ p.expression(11)
+ }
+
+ case 3:
+ localctx = NewUnaryNumericExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(29)
+ p.Match(CESQLParserParserMINUS)
+ }
+ {
+ p.SetState(30)
+ p.expression(10)
+ }
+
+ case 4:
+ localctx = NewExistsExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(31)
+ p.Match(CESQLParserParserEXISTS)
+ }
+ {
+ p.SetState(32)
+ p.Identifier()
+ }
+
+ case 5:
+ localctx = NewSubExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(33)
+ p.Match(CESQLParserParserLR_BRACKET)
+ }
+ {
+ p.SetState(34)
+ p.expression(0)
+ }
+ {
+ p.SetState(35)
+ p.Match(CESQLParserParserRR_BRACKET)
+ }
+
+ case 6:
+ localctx = NewAtomExpressionContext(p, localctx)
+ p.SetParserRuleContext(localctx)
+ _prevctx = localctx
+ {
+ p.SetState(37)
+ p.Atom()
+ }
+
+ }
+ p.GetParserRuleContext().SetStop(p.GetTokenStream().LT(-1))
+ p.SetState(66)
+ p.GetErrorHandler().Sync(p)
+ _alt = p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 4, p.GetParserRuleContext())
+
+ for _alt != 2 && _alt != antlr.ATNInvalidAltNumber {
+ if _alt == 1 {
+ if p.GetParseListeners() != nil {
+ p.TriggerExitRuleEvent()
+ }
+ _prevctx = localctx
+ p.SetState(64)
+ p.GetErrorHandler().Sync(p)
+ switch p.GetInterpreter().AdaptivePredict(p.GetTokenStream(), 3, p.GetParserRuleContext()) {
+ case 1:
+ localctx = NewBinaryMultiplicativeExpressionContext(p, NewExpressionContext(p, _parentctx, _parentState))
+ p.PushNewRecursionContext(localctx, _startState, CESQLParserParserRULE_expression)
+ p.SetState(40)
+
+ if !(p.Precpred(p.GetParserRuleContext(), 6)) {
+ panic(antlr.NewFailedPredicateException(p, "p.Precpred(p.GetParserRuleContext(), 6)", ""))
+ }
+ {
+ p.SetState(41)
+ _la = p.GetTokenStream().LA(1)
+
+ if !(((_la)&-(0x1f+1)) == 0 && ((int64(1)< maxArity {
+ maxArity = a
+ }
+ }
+ if maxArity >= function.Arity() {
+ return errors.New("cannot add the variadic function, " +
+ "because there is already another function defined with the same name and same or greater arity")
+ }
+
+ item.variadicFunction = function
+ return nil
+ } else {
+ if _, ok := item.fixedArgsFunctions[function.Arity()]; ok {
+ return errors.New("cannot add the function, " +
+ "because there is already another function defined with the same arity and same name")
+ }
+
+ item.fixedArgsFunctions[function.Arity()] = function
+ return nil
+ }
+}
+
+// Adds user defined function
+func AddFunction(fn cesql.Function) error {
+ return globalFunctionTable.AddFunction(fn)
+}
+
+func (table functionTable) ResolveFunction(name string, args int) cesql.Function {
+ item := table[strings.ToUpper(name)]
+ if item == nil {
+ return nil
+ }
+
+ if fn, ok := item.fixedArgsFunctions[args]; ok {
+ return fn
+ }
+
+ if item.variadicFunction == nil || item.variadicFunction.Arity() > args {
+ return nil
+ }
+
+ return item.variadicFunction
+}
+
+var globalFunctionTable = functionTable{}
+
+func init() {
+ for _, fn := range []cesql.Function{
+ function.IntFunction,
+ function.BoolFunction,
+ function.StringFunction,
+ function.IsBoolFunction,
+ function.IsIntFunction,
+ function.AbsFunction,
+ function.LengthFunction,
+ function.ConcatFunction,
+ function.ConcatWSFunction,
+ function.LowerFunction,
+ function.UpperFunction,
+ function.TrimFunction,
+ function.LeftFunction,
+ function.RightFunction,
+ function.SubstringFunction,
+ function.SubstringWithLengthFunction,
+ } {
+ if err := globalFunctionTable.AddFunction(fn); err != nil {
+ panic(err)
+ }
+ }
+}
+
+func ResolveFunction(name string, args int) cesql.Function {
+ return globalFunctionTable.ResolveFunction(name, args)
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/types.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/types.go
new file mode 100644
index 000000000..629b7a18d
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/types.go
@@ -0,0 +1,62 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package v2
+
+type Type uint8
+
+const (
+ StringType Type = iota
+ IntegerType
+ BooleanType
+ AnyType
+)
+
+func TypePtr(t Type) *Type {
+ return &t
+}
+
+func (t Type) IsSameType(val interface{}) bool {
+ return TypeFromVal(val) == t
+}
+
+func (t Type) String() string {
+ switch t {
+ case IntegerType:
+ return "Integer"
+ case BooleanType:
+ return "Boolean"
+ case StringType:
+ return "String"
+ }
+ return "Any"
+}
+
+func (t Type) ZeroValue() interface{} {
+ switch t {
+ case StringType:
+ return ""
+ case IntegerType:
+ return 0
+ case BooleanType:
+ return false
+ case AnyType:
+ // by default, return false
+ return false
+ }
+ return false
+}
+
+func TypeFromVal(val interface{}) Type {
+ switch val.(type) {
+ case string:
+ return StringType
+ case int32:
+ return IntegerType
+ case bool:
+ return BooleanType
+ }
+ return AnyType
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/casting.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/casting.go
new file mode 100644
index 000000000..d8053adb3
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/casting.go
@@ -0,0 +1,76 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package utils
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+
+ cesql "github.com/cloudevents/sdk-go/sql/v2"
+ sqlerrors "github.com/cloudevents/sdk-go/sql/v2/errors"
+)
+
+func Cast(val interface{}, target cesql.Type) (interface{}, error) {
+ if target.IsSameType(val) {
+ return val, nil
+ }
+ switch target {
+ case cesql.StringType:
+ switch val.(type) {
+ case int32:
+ return strconv.Itoa(int(val.(int32))), nil
+ case bool:
+ if val.(bool) {
+ return "true", nil
+ } else {
+ return "false", nil
+ }
+ }
+ // Casting to string is always defined
+ return fmt.Sprintf("%v", val), nil
+ case cesql.IntegerType:
+ switch val.(type) {
+ case string:
+ v, err := strconv.ParseInt(val.(string), 10, 32)
+ if err != nil {
+ err = sqlerrors.NewCastError(fmt.Errorf("cannot cast from String to Integer: %w", err))
+ }
+ return int32(v), err
+ case bool:
+ if val.(bool) {
+ return int32(1), nil
+ }
+ return int32(0), nil
+ }
+ return 0, sqlerrors.NewCastError(fmt.Errorf("undefined cast from %v to %v", cesql.TypeFromVal(val), target))
+ case cesql.BooleanType:
+ switch val.(type) {
+ case string:
+ lowerCase := strings.ToLower(val.(string))
+ if lowerCase == "true" {
+ return true, nil
+ } else if lowerCase == "false" {
+ return false, nil
+ }
+ return false, sqlerrors.NewCastError(fmt.Errorf("cannot cast String to Boolean, actual value: %v", val))
+ case int32:
+ if val.(int32) == 0 {
+ return false, nil
+ }
+ return true, nil
+ }
+ return false, sqlerrors.NewCastError(fmt.Errorf("undefined cast from %v to %v", cesql.TypeFromVal(val), target))
+ }
+
+ // AnyType doesn't need casting
+ return val, nil
+}
+
+func CanCast(val interface{}, target cesql.Type) bool {
+ _, err := Cast(val, target)
+ return err == nil
+}
diff --git a/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/event.go b/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/event.go
new file mode 100644
index 000000000..8525d4bf4
--- /dev/null
+++ b/vendor/github.com/cloudevents/sdk-go/sql/v2/utils/event.go
@@ -0,0 +1,67 @@
+/*
+ Copyright 2021 The CloudEvents Authors
+ SPDX-License-Identifier: Apache-2.0
+*/
+
+package utils
+
+import (
+ "fmt"
+ "time"
+
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+ "github.com/cloudevents/sdk-go/v2/binding/spec"
+ "github.com/cloudevents/sdk-go/v2/types"
+)
+
+func GetAttribute(event cloudevents.Event, attributeName string) interface{} {
+ var val interface{}
+
+ if a := spec.V1.Attribute(attributeName); a != nil { // Standard attribute
+ val = a.Get(event.Context)
+ } else {
+ val = event.Extensions()[attributeName]
+ }
+
+ if val == nil {
+ return nil
+ }
+
+ // Type cohercion
+ switch val.(type) {
+ case bool, int32, string:
+ return val
+ case int8:
+ return int32(val.(int8))
+ case uint8:
+ return int32(val.(uint8))
+ case int16:
+ return int32(val.(int16))
+ case uint16:
+ return int32(val.(uint16))
+ case uint32:
+ return int32(val.(uint32))
+ case int64:
+ return int32(val.(int64))
+ case uint64:
+ return int32(val.(uint64))
+ case time.Time:
+ return val.(time.Time).Format(time.RFC3339Nano)
+ case []byte:
+ return types.FormatBinary(val.([]byte))
+ }
+ return fmt.Sprintf("%v", val)
+}
+
+func ContainsAttribute(event cloudevents.Event, attributeName string) bool {
+ if attributeName == "specversion" || attributeName == "id" || attributeName == "source" || attributeName == "type" {
+ return true
+ }
+
+ if attr := spec.V1.Attribute(attributeName); attr != nil {
+ return attr.Get(event.Context) != nil
+ }
+
+ _, ok := event.Extensions()[attributeName]
+ return ok
+}
diff --git a/vendor/github.com/robfig/cron/v3/.gitignore b/vendor/github.com/robfig/cron/v3/.gitignore
new file mode 100644
index 000000000..00268614f
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/.gitignore
@@ -0,0 +1,22 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
diff --git a/vendor/github.com/robfig/cron/v3/.travis.yml b/vendor/github.com/robfig/cron/v3/.travis.yml
new file mode 100644
index 000000000..4f2ee4d97
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/.travis.yml
@@ -0,0 +1 @@
+language: go
diff --git a/vendor/github.com/robfig/cron/v3/LICENSE b/vendor/github.com/robfig/cron/v3/LICENSE
new file mode 100644
index 000000000..3a0f627ff
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/LICENSE
@@ -0,0 +1,21 @@
+Copyright (C) 2012 Rob Figueiredo
+All Rights Reserved.
+
+MIT LICENSE
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/robfig/cron/v3/README.md b/vendor/github.com/robfig/cron/v3/README.md
new file mode 100644
index 000000000..984c537c0
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/README.md
@@ -0,0 +1,125 @@
+[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron)
+[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron)
+
+# cron
+
+Cron V3 has been released!
+
+To download the specific tagged release, run:
+
+ go get github.com/robfig/cron/v3@v3.0.0
+
+Import it in your program as:
+
+ import "github.com/robfig/cron/v3"
+
+It requires Go 1.11 or later due to usage of Go Modules.
+
+Refer to the documentation here:
+http://godoc.org/github.com/robfig/cron
+
+The rest of this document describes the the advances in v3 and a list of
+breaking changes for users that wish to upgrade from an earlier version.
+
+## Upgrading to v3 (June 2019)
+
+cron v3 is a major upgrade to the library that addresses all outstanding bugs,
+feature requests, and rough edges. It is based on a merge of master which
+contains various fixes to issues found over the years and the v2 branch which
+contains some backwards-incompatible features like the ability to remove cron
+jobs. In addition, v3 adds support for Go Modules, cleans up rough edges like
+the timezone support, and fixes a number of bugs.
+
+New features:
+
+- Support for Go modules. Callers must now import this library as
+ `github.com/robfig/cron/v3`, instead of `gopkg.in/...`
+
+- Fixed bugs:
+ - 0f01e6b parser: fix combining of Dow and Dom (#70)
+ - dbf3220 adjust times when rolling the clock forward to handle non-existent midnight (#157)
+ - eeecf15 spec_test.go: ensure an error is returned on 0 increment (#144)
+ - 70971dc cron.Entries(): update request for snapshot to include a reply channel (#97)
+ - 1cba5e6 cron: fix: removing a job causes the next scheduled job to run too late (#206)
+
+- Standard cron spec parsing by default (first field is "minute"), with an easy
+ way to opt into the seconds field (quartz-compatible). Although, note that the
+ year field (optional in Quartz) is not supported.
+
+- Extensible, key/value logging via an interface that complies with
+ the https://github.com/go-logr/logr project.
+
+- The new Chain & JobWrapper types allow you to install "interceptors" to add
+ cross-cutting behavior like the following:
+ - Recover any panics from jobs
+ - Delay a job's execution if the previous run hasn't completed yet
+ - Skip a job's execution if the previous run hasn't completed yet
+ - Log each job's invocations
+ - Notification when jobs are completed
+
+It is backwards incompatible with both v1 and v2. These updates are required:
+
+- The v1 branch accepted an optional seconds field at the beginning of the cron
+ spec. This is non-standard and has led to a lot of confusion. The new default
+ parser conforms to the standard as described by [the Cron wikipedia page].
+
+ UPDATING: To retain the old behavior, construct your Cron with a custom
+ parser:
+
+ // Seconds field, required
+ cron.New(cron.WithSeconds())
+
+ // Seconds field, optional
+ cron.New(
+ cron.WithParser(
+ cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor))
+
+- The Cron type now accepts functional options on construction rather than the
+ previous ad-hoc behavior modification mechanisms (setting a field, calling a setter).
+
+ UPDATING: Code that sets Cron.ErrorLogger or calls Cron.SetLocation must be
+ updated to provide those values on construction.
+
+- CRON_TZ is now the recommended way to specify the timezone of a single
+ schedule, which is sanctioned by the specification. The legacy "TZ=" prefix
+ will continue to be supported since it is unambiguous and easy to do so.
+
+ UPDATING: No update is required.
+
+- By default, cron will no longer recover panics in jobs that it runs.
+ Recovering can be surprising (see issue #192) and seems to be at odds with
+ typical behavior of libraries. Relatedly, the `cron.WithPanicLogger` option
+ has been removed to accommodate the more general JobWrapper type.
+
+ UPDATING: To opt into panic recovery and configure the panic logger:
+
+ cron.New(cron.WithChain(
+ cron.Recover(logger), // or use cron.DefaultLogger
+ ))
+
+- In adding support for https://github.com/go-logr/logr, `cron.WithVerboseLogger` was
+ removed, since it is duplicative with the leveled logging.
+
+ UPDATING: Callers should use `WithLogger` and specify a logger that does not
+ discard `Info` logs. For convenience, one is provided that wraps `*log.Logger`:
+
+ cron.New(
+ cron.WithLogger(cron.VerbosePrintfLogger(logger)))
+
+
+### Background - Cron spec format
+
+There are two cron spec formats in common usage:
+
+- The "standard" cron format, described on [the Cron wikipedia page] and used by
+ the cron Linux system utility.
+
+- The cron format used by [the Quartz Scheduler], commonly used for scheduled
+ jobs in Java software
+
+[the Cron wikipedia page]: https://en.wikipedia.org/wiki/Cron
+[the Quartz Scheduler]: http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/tutorial-lesson-06.html
+
+The original version of this package included an optional "seconds" field, which
+made it incompatible with both of these formats. Now, the "standard" format is
+the default format accepted, and the Quartz format is opt-in.
diff --git a/vendor/github.com/robfig/cron/v3/chain.go b/vendor/github.com/robfig/cron/v3/chain.go
new file mode 100644
index 000000000..9565b418e
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/chain.go
@@ -0,0 +1,92 @@
+package cron
+
+import (
+ "fmt"
+ "runtime"
+ "sync"
+ "time"
+)
+
+// JobWrapper decorates the given Job with some behavior.
+type JobWrapper func(Job) Job
+
+// Chain is a sequence of JobWrappers that decorates submitted jobs with
+// cross-cutting behaviors like logging or synchronization.
+type Chain struct {
+ wrappers []JobWrapper
+}
+
+// NewChain returns a Chain consisting of the given JobWrappers.
+func NewChain(c ...JobWrapper) Chain {
+ return Chain{c}
+}
+
+// Then decorates the given job with all JobWrappers in the chain.
+//
+// This:
+// NewChain(m1, m2, m3).Then(job)
+// is equivalent to:
+// m1(m2(m3(job)))
+func (c Chain) Then(j Job) Job {
+ for i := range c.wrappers {
+ j = c.wrappers[len(c.wrappers)-i-1](j)
+ }
+ return j
+}
+
+// Recover panics in wrapped jobs and log them with the provided logger.
+func Recover(logger Logger) JobWrapper {
+ return func(j Job) Job {
+ return FuncJob(func() {
+ defer func() {
+ if r := recover(); r != nil {
+ const size = 64 << 10
+ buf := make([]byte, size)
+ buf = buf[:runtime.Stack(buf, false)]
+ err, ok := r.(error)
+ if !ok {
+ err = fmt.Errorf("%v", r)
+ }
+ logger.Error(err, "panic", "stack", "...\n"+string(buf))
+ }
+ }()
+ j.Run()
+ })
+ }
+}
+
+// DelayIfStillRunning serializes jobs, delaying subsequent runs until the
+// previous one is complete. Jobs running after a delay of more than a minute
+// have the delay logged at Info.
+func DelayIfStillRunning(logger Logger) JobWrapper {
+ return func(j Job) Job {
+ var mu sync.Mutex
+ return FuncJob(func() {
+ start := time.Now()
+ mu.Lock()
+ defer mu.Unlock()
+ if dur := time.Since(start); dur > time.Minute {
+ logger.Info("delay", "duration", dur)
+ }
+ j.Run()
+ })
+ }
+}
+
+// SkipIfStillRunning skips an invocation of the Job if a previous invocation is
+// still running. It logs skips to the given logger at Info level.
+func SkipIfStillRunning(logger Logger) JobWrapper {
+ return func(j Job) Job {
+ var ch = make(chan struct{}, 1)
+ ch <- struct{}{}
+ return FuncJob(func() {
+ select {
+ case v := <-ch:
+ j.Run()
+ ch <- v
+ default:
+ logger.Info("skip")
+ }
+ })
+ }
+}
diff --git a/vendor/github.com/robfig/cron/v3/constantdelay.go b/vendor/github.com/robfig/cron/v3/constantdelay.go
new file mode 100644
index 000000000..cd6e7b1be
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/constantdelay.go
@@ -0,0 +1,27 @@
+package cron
+
+import "time"
+
+// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
+// It does not support jobs more frequent than once a second.
+type ConstantDelaySchedule struct {
+ Delay time.Duration
+}
+
+// Every returns a crontab Schedule that activates once every duration.
+// Delays of less than a second are not supported (will round up to 1 second).
+// Any fields less than a Second are truncated.
+func Every(duration time.Duration) ConstantDelaySchedule {
+ if duration < time.Second {
+ duration = time.Second
+ }
+ return ConstantDelaySchedule{
+ Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
+ }
+}
+
+// Next returns the next time this should be run.
+// This rounds so that the next activation time will be on the second.
+func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
+ return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
+}
diff --git a/vendor/github.com/robfig/cron/v3/cron.go b/vendor/github.com/robfig/cron/v3/cron.go
new file mode 100644
index 000000000..c7e917665
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/cron.go
@@ -0,0 +1,355 @@
+package cron
+
+import (
+ "context"
+ "sort"
+ "sync"
+ "time"
+)
+
+// Cron keeps track of any number of entries, invoking the associated func as
+// specified by the schedule. It may be started, stopped, and the entries may
+// be inspected while running.
+type Cron struct {
+ entries []*Entry
+ chain Chain
+ stop chan struct{}
+ add chan *Entry
+ remove chan EntryID
+ snapshot chan chan []Entry
+ running bool
+ logger Logger
+ runningMu sync.Mutex
+ location *time.Location
+ parser ScheduleParser
+ nextID EntryID
+ jobWaiter sync.WaitGroup
+}
+
+// ScheduleParser is an interface for schedule spec parsers that return a Schedule
+type ScheduleParser interface {
+ Parse(spec string) (Schedule, error)
+}
+
+// Job is an interface for submitted cron jobs.
+type Job interface {
+ Run()
+}
+
+// Schedule describes a job's duty cycle.
+type Schedule interface {
+ // Next returns the next activation time, later than the given time.
+ // Next is invoked initially, and then each time the job is run.
+ Next(time.Time) time.Time
+}
+
+// EntryID identifies an entry within a Cron instance
+type EntryID int
+
+// Entry consists of a schedule and the func to execute on that schedule.
+type Entry struct {
+ // ID is the cron-assigned ID of this entry, which may be used to look up a
+ // snapshot or remove it.
+ ID EntryID
+
+ // Schedule on which this job should be run.
+ Schedule Schedule
+
+ // Next time the job will run, or the zero time if Cron has not been
+ // started or this entry's schedule is unsatisfiable
+ Next time.Time
+
+ // Prev is the last time this job was run, or the zero time if never.
+ Prev time.Time
+
+ // WrappedJob is the thing to run when the Schedule is activated.
+ WrappedJob Job
+
+ // Job is the thing that was submitted to cron.
+ // It is kept around so that user code that needs to get at the job later,
+ // e.g. via Entries() can do so.
+ Job Job
+}
+
+// Valid returns true if this is not the zero entry.
+func (e Entry) Valid() bool { return e.ID != 0 }
+
+// byTime is a wrapper for sorting the entry array by time
+// (with zero time at the end).
+type byTime []*Entry
+
+func (s byTime) Len() int { return len(s) }
+func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s byTime) Less(i, j int) bool {
+ // Two zero times should return false.
+ // Otherwise, zero is "greater" than any other time.
+ // (To sort it at the end of the list.)
+ if s[i].Next.IsZero() {
+ return false
+ }
+ if s[j].Next.IsZero() {
+ return true
+ }
+ return s[i].Next.Before(s[j].Next)
+}
+
+// New returns a new Cron job runner, modified by the given options.
+//
+// Available Settings
+//
+// Time Zone
+// Description: The time zone in which schedules are interpreted
+// Default: time.Local
+//
+// Parser
+// Description: Parser converts cron spec strings into cron.Schedules.
+// Default: Accepts this spec: https://en.wikipedia.org/wiki/Cron
+//
+// Chain
+// Description: Wrap submitted jobs to customize behavior.
+// Default: A chain that recovers panics and logs them to stderr.
+//
+// See "cron.With*" to modify the default behavior.
+func New(opts ...Option) *Cron {
+ c := &Cron{
+ entries: nil,
+ chain: NewChain(),
+ add: make(chan *Entry),
+ stop: make(chan struct{}),
+ snapshot: make(chan chan []Entry),
+ remove: make(chan EntryID),
+ running: false,
+ runningMu: sync.Mutex{},
+ logger: DefaultLogger,
+ location: time.Local,
+ parser: standardParser,
+ }
+ for _, opt := range opts {
+ opt(c)
+ }
+ return c
+}
+
+// FuncJob is a wrapper that turns a func() into a cron.Job
+type FuncJob func()
+
+func (f FuncJob) Run() { f() }
+
+// AddFunc adds a func to the Cron to be run on the given schedule.
+// The spec is parsed using the time zone of this Cron instance as the default.
+// An opaque ID is returned that can be used to later remove it.
+func (c *Cron) AddFunc(spec string, cmd func()) (EntryID, error) {
+ return c.AddJob(spec, FuncJob(cmd))
+}
+
+// AddJob adds a Job to the Cron to be run on the given schedule.
+// The spec is parsed using the time zone of this Cron instance as the default.
+// An opaque ID is returned that can be used to later remove it.
+func (c *Cron) AddJob(spec string, cmd Job) (EntryID, error) {
+ schedule, err := c.parser.Parse(spec)
+ if err != nil {
+ return 0, err
+ }
+ return c.Schedule(schedule, cmd), nil
+}
+
+// Schedule adds a Job to the Cron to be run on the given schedule.
+// The job is wrapped with the configured Chain.
+func (c *Cron) Schedule(schedule Schedule, cmd Job) EntryID {
+ c.runningMu.Lock()
+ defer c.runningMu.Unlock()
+ c.nextID++
+ entry := &Entry{
+ ID: c.nextID,
+ Schedule: schedule,
+ WrappedJob: c.chain.Then(cmd),
+ Job: cmd,
+ }
+ if !c.running {
+ c.entries = append(c.entries, entry)
+ } else {
+ c.add <- entry
+ }
+ return entry.ID
+}
+
+// Entries returns a snapshot of the cron entries.
+func (c *Cron) Entries() []Entry {
+ c.runningMu.Lock()
+ defer c.runningMu.Unlock()
+ if c.running {
+ replyChan := make(chan []Entry, 1)
+ c.snapshot <- replyChan
+ return <-replyChan
+ }
+ return c.entrySnapshot()
+}
+
+// Location gets the time zone location
+func (c *Cron) Location() *time.Location {
+ return c.location
+}
+
+// Entry returns a snapshot of the given entry, or nil if it couldn't be found.
+func (c *Cron) Entry(id EntryID) Entry {
+ for _, entry := range c.Entries() {
+ if id == entry.ID {
+ return entry
+ }
+ }
+ return Entry{}
+}
+
+// Remove an entry from being run in the future.
+func (c *Cron) Remove(id EntryID) {
+ c.runningMu.Lock()
+ defer c.runningMu.Unlock()
+ if c.running {
+ c.remove <- id
+ } else {
+ c.removeEntry(id)
+ }
+}
+
+// Start the cron scheduler in its own goroutine, or no-op if already started.
+func (c *Cron) Start() {
+ c.runningMu.Lock()
+ defer c.runningMu.Unlock()
+ if c.running {
+ return
+ }
+ c.running = true
+ go c.run()
+}
+
+// Run the cron scheduler, or no-op if already running.
+func (c *Cron) Run() {
+ c.runningMu.Lock()
+ if c.running {
+ c.runningMu.Unlock()
+ return
+ }
+ c.running = true
+ c.runningMu.Unlock()
+ c.run()
+}
+
+// run the scheduler.. this is private just due to the need to synchronize
+// access to the 'running' state variable.
+func (c *Cron) run() {
+ c.logger.Info("start")
+
+ // Figure out the next activation times for each entry.
+ now := c.now()
+ for _, entry := range c.entries {
+ entry.Next = entry.Schedule.Next(now)
+ c.logger.Info("schedule", "now", now, "entry", entry.ID, "next", entry.Next)
+ }
+
+ for {
+ // Determine the next entry to run.
+ sort.Sort(byTime(c.entries))
+
+ var timer *time.Timer
+ if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
+ // If there are no entries yet, just sleep - it still handles new entries
+ // and stop requests.
+ timer = time.NewTimer(100000 * time.Hour)
+ } else {
+ timer = time.NewTimer(c.entries[0].Next.Sub(now))
+ }
+
+ for {
+ select {
+ case now = <-timer.C:
+ now = now.In(c.location)
+ c.logger.Info("wake", "now", now)
+
+ // Run every entry whose next time was less than now
+ for _, e := range c.entries {
+ if e.Next.After(now) || e.Next.IsZero() {
+ break
+ }
+ c.startJob(e.WrappedJob)
+ e.Prev = e.Next
+ e.Next = e.Schedule.Next(now)
+ c.logger.Info("run", "now", now, "entry", e.ID, "next", e.Next)
+ }
+
+ case newEntry := <-c.add:
+ timer.Stop()
+ now = c.now()
+ newEntry.Next = newEntry.Schedule.Next(now)
+ c.entries = append(c.entries, newEntry)
+ c.logger.Info("added", "now", now, "entry", newEntry.ID, "next", newEntry.Next)
+
+ case replyChan := <-c.snapshot:
+ replyChan <- c.entrySnapshot()
+ continue
+
+ case <-c.stop:
+ timer.Stop()
+ c.logger.Info("stop")
+ return
+
+ case id := <-c.remove:
+ timer.Stop()
+ now = c.now()
+ c.removeEntry(id)
+ c.logger.Info("removed", "entry", id)
+ }
+
+ break
+ }
+ }
+}
+
+// startJob runs the given job in a new goroutine.
+func (c *Cron) startJob(j Job) {
+ c.jobWaiter.Add(1)
+ go func() {
+ defer c.jobWaiter.Done()
+ j.Run()
+ }()
+}
+
+// now returns current time in c location
+func (c *Cron) now() time.Time {
+ return time.Now().In(c.location)
+}
+
+// Stop stops the cron scheduler if it is running; otherwise it does nothing.
+// A context is returned so the caller can wait for running jobs to complete.
+func (c *Cron) Stop() context.Context {
+ c.runningMu.Lock()
+ defer c.runningMu.Unlock()
+ if c.running {
+ c.stop <- struct{}{}
+ c.running = false
+ }
+ ctx, cancel := context.WithCancel(context.Background())
+ go func() {
+ c.jobWaiter.Wait()
+ cancel()
+ }()
+ return ctx
+}
+
+// entrySnapshot returns a copy of the current cron entry list.
+func (c *Cron) entrySnapshot() []Entry {
+ var entries = make([]Entry, len(c.entries))
+ for i, e := range c.entries {
+ entries[i] = *e
+ }
+ return entries
+}
+
+func (c *Cron) removeEntry(id EntryID) {
+ var entries []*Entry
+ for _, e := range c.entries {
+ if e.ID != id {
+ entries = append(entries, e)
+ }
+ }
+ c.entries = entries
+}
diff --git a/vendor/github.com/robfig/cron/v3/doc.go b/vendor/github.com/robfig/cron/v3/doc.go
new file mode 100644
index 000000000..fa5d08b4d
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/doc.go
@@ -0,0 +1,231 @@
+/*
+Package cron implements a cron spec parser and job runner.
+
+Installation
+
+To download the specific tagged release, run:
+
+ go get github.com/robfig/cron/v3@v3.0.0
+
+Import it in your program as:
+
+ import "github.com/robfig/cron/v3"
+
+It requires Go 1.11 or later due to usage of Go Modules.
+
+Usage
+
+Callers may register Funcs to be invoked on a given schedule. Cron will run
+them in their own goroutines.
+
+ c := cron.New()
+ c.AddFunc("30 * * * *", func() { fmt.Println("Every hour on the half hour") })
+ c.AddFunc("30 3-6,20-23 * * *", func() { fmt.Println(".. in the range 3-6am, 8-11pm") })
+ c.AddFunc("CRON_TZ=Asia/Tokyo 30 04 * * *", func() { fmt.Println("Runs at 04:30 Tokyo time every day") })
+ c.AddFunc("@hourly", func() { fmt.Println("Every hour, starting an hour from now") })
+ c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty, starting an hour thirty from now") })
+ c.Start()
+ ..
+ // Funcs are invoked in their own goroutine, asynchronously.
+ ...
+ // Funcs may also be added to a running Cron
+ c.AddFunc("@daily", func() { fmt.Println("Every day") })
+ ..
+ // Inspect the cron job entries' next and previous run times.
+ inspect(c.Entries())
+ ..
+ c.Stop() // Stop the scheduler (does not stop any jobs already running).
+
+CRON Expression Format
+
+A cron expression represents a set of times, using 5 space-separated fields.
+
+ Field name | Mandatory? | Allowed values | Allowed special characters
+ ---------- | ---------- | -------------- | --------------------------
+ Minutes | Yes | 0-59 | * / , -
+ Hours | Yes | 0-23 | * / , -
+ Day of month | Yes | 1-31 | * / , - ?
+ Month | Yes | 1-12 or JAN-DEC | * / , -
+ Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
+
+Month and Day-of-week field values are case insensitive. "SUN", "Sun", and
+"sun" are equally accepted.
+
+The specific interpretation of the format is based on the Cron Wikipedia page:
+https://en.wikipedia.org/wiki/Cron
+
+Alternative Formats
+
+Alternative Cron expression formats support other fields like seconds. You can
+implement that by creating a custom Parser as follows.
+
+ cron.New(
+ cron.WithParser(
+ cron.NewParser(
+ cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor)))
+
+Since adding Seconds is the most common modification to the standard cron spec,
+cron provides a builtin function to do that, which is equivalent to the custom
+parser you saw earlier, except that its seconds field is REQUIRED:
+
+ cron.New(cron.WithSeconds())
+
+That emulates Quartz, the most popular alternative Cron schedule format:
+http://www.quartz-scheduler.org/documentation/quartz-2.x/tutorials/crontrigger.html
+
+Special Characters
+
+Asterisk ( * )
+
+The asterisk indicates that the cron expression will match for all values of the
+field; e.g., using an asterisk in the 5th field (month) would indicate every
+month.
+
+Slash ( / )
+
+Slashes are used to describe increments of ranges. For example 3-59/15 in the
+1st field (minutes) would indicate the 3rd minute of the hour and every 15
+minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
+that is, an increment over the largest possible range of the field. The form
+"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
+increment until the end of that specific range. It does not wrap around.
+
+Comma ( , )
+
+Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
+the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
+
+Hyphen ( - )
+
+Hyphens are used to define ranges. For example, 9-17 would indicate every
+hour between 9am and 5pm inclusive.
+
+Question mark ( ? )
+
+Question mark may be used instead of '*' for leaving either day-of-month or
+day-of-week blank.
+
+Predefined schedules
+
+You may use one of several pre-defined schedules in place of a cron expression.
+
+ Entry | Description | Equivalent To
+ ----- | ----------- | -------------
+ @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 1 1 *
+ @monthly | Run once a month, midnight, first of month | 0 0 1 * *
+ @weekly | Run once a week, midnight between Sat/Sun | 0 0 * * 0
+ @daily (or @midnight) | Run once a day, midnight | 0 0 * * *
+ @hourly | Run once an hour, beginning of hour | 0 * * * *
+
+Intervals
+
+You may also schedule a job to execute at fixed intervals, starting at the time it's added
+or cron is run. This is supported by formatting the cron spec like this:
+
+ @every
+
+where "duration" is a string accepted by time.ParseDuration
+(http://golang.org/pkg/time/#ParseDuration).
+
+For example, "@every 1h30m10s" would indicate a schedule that activates after
+1 hour, 30 minutes, 10 seconds, and then every interval after that.
+
+Note: The interval does not take the job runtime into account. For example,
+if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
+it will have only 2 minutes of idle time between each run.
+
+Time zones
+
+By default, all interpretation and scheduling is done in the machine's local
+time zone (time.Local). You can specify a different time zone on construction:
+
+ cron.New(
+ cron.WithLocation(time.UTC))
+
+Individual cron schedules may also override the time zone they are to be
+interpreted in by providing an additional space-separated field at the beginning
+of the cron spec, of the form "CRON_TZ=Asia/Tokyo".
+
+For example:
+
+ # Runs at 6am in time.Local
+ cron.New().AddFunc("0 6 * * ?", ...)
+
+ # Runs at 6am in America/New_York
+ nyc, _ := time.LoadLocation("America/New_York")
+ c := cron.New(cron.WithLocation(nyc))
+ c.AddFunc("0 6 * * ?", ...)
+
+ # Runs at 6am in Asia/Tokyo
+ cron.New().AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
+
+ # Runs at 6am in Asia/Tokyo
+ c := cron.New(cron.WithLocation(nyc))
+ c.SetLocation("America/New_York")
+ c.AddFunc("CRON_TZ=Asia/Tokyo 0 6 * * ?", ...)
+
+The prefix "TZ=(TIME ZONE)" is also supported for legacy compatibility.
+
+Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
+not be run!
+
+Job Wrappers
+
+A Cron runner may be configured with a chain of job wrappers to add
+cross-cutting functionality to all submitted jobs. For example, they may be used
+to achieve the following effects:
+
+ - Recover any panics from jobs (activated by default)
+ - Delay a job's execution if the previous run hasn't completed yet
+ - Skip a job's execution if the previous run hasn't completed yet
+ - Log each job's invocations
+
+Install wrappers for all jobs added to a cron using the `cron.WithChain` option:
+
+ cron.New(cron.WithChain(
+ cron.SkipIfStillRunning(logger),
+ ))
+
+Install wrappers for individual jobs by explicitly wrapping them:
+
+ job = cron.NewChain(
+ cron.SkipIfStillRunning(logger),
+ ).Then(job)
+
+Thread safety
+
+Since the Cron service runs concurrently with the calling code, some amount of
+care must be taken to ensure proper synchronization.
+
+All cron methods are designed to be correctly synchronized as long as the caller
+ensures that invocations have a clear happens-before ordering between them.
+
+Logging
+
+Cron defines a Logger interface that is a subset of the one defined in
+github.com/go-logr/logr. It has two logging levels (Info and Error), and
+parameters are key/value pairs. This makes it possible for cron logging to plug
+into structured logging systems. An adapter, [Verbose]PrintfLogger, is provided
+to wrap the standard library *log.Logger.
+
+For additional insight into Cron operations, verbose logging may be activated
+which will record job runs, scheduling decisions, and added or removed jobs.
+Activate it with a one-off logger as follows:
+
+ cron.New(
+ cron.WithLogger(
+ cron.VerbosePrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))))
+
+
+Implementation
+
+Cron entries are stored in an array, sorted by their next activation time. Cron
+sleeps until the next job is due to be run.
+
+Upon waking:
+ - it runs each entry that is active on that second
+ - it calculates the next run times for the jobs that were run
+ - it re-sorts the array of entries by next activation time.
+ - it goes to sleep until the soonest job.
+*/
+package cron
diff --git a/vendor/github.com/robfig/cron/v3/logger.go b/vendor/github.com/robfig/cron/v3/logger.go
new file mode 100644
index 000000000..b4efcc053
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/logger.go
@@ -0,0 +1,86 @@
+package cron
+
+import (
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+ "time"
+)
+
+// DefaultLogger is used by Cron if none is specified.
+var DefaultLogger Logger = PrintfLogger(log.New(os.Stdout, "cron: ", log.LstdFlags))
+
+// DiscardLogger can be used by callers to discard all log messages.
+var DiscardLogger Logger = PrintfLogger(log.New(ioutil.Discard, "", 0))
+
+// Logger is the interface used in this package for logging, so that any backend
+// can be plugged in. It is a subset of the github.com/go-logr/logr interface.
+type Logger interface {
+ // Info logs routine messages about cron's operation.
+ Info(msg string, keysAndValues ...interface{})
+ // Error logs an error condition.
+ Error(err error, msg string, keysAndValues ...interface{})
+}
+
+// PrintfLogger wraps a Printf-based logger (such as the standard library "log")
+// into an implementation of the Logger interface which logs errors only.
+func PrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
+ return printfLogger{l, false}
+}
+
+// VerbosePrintfLogger wraps a Printf-based logger (such as the standard library
+// "log") into an implementation of the Logger interface which logs everything.
+func VerbosePrintfLogger(l interface{ Printf(string, ...interface{}) }) Logger {
+ return printfLogger{l, true}
+}
+
+type printfLogger struct {
+ logger interface{ Printf(string, ...interface{}) }
+ logInfo bool
+}
+
+func (pl printfLogger) Info(msg string, keysAndValues ...interface{}) {
+ if pl.logInfo {
+ keysAndValues = formatTimes(keysAndValues)
+ pl.logger.Printf(
+ formatString(len(keysAndValues)),
+ append([]interface{}{msg}, keysAndValues...)...)
+ }
+}
+
+func (pl printfLogger) Error(err error, msg string, keysAndValues ...interface{}) {
+ keysAndValues = formatTimes(keysAndValues)
+ pl.logger.Printf(
+ formatString(len(keysAndValues)+2),
+ append([]interface{}{msg, "error", err}, keysAndValues...)...)
+}
+
+// formatString returns a logfmt-like format string for the number of
+// key/values.
+func formatString(numKeysAndValues int) string {
+ var sb strings.Builder
+ sb.WriteString("%s")
+ if numKeysAndValues > 0 {
+ sb.WriteString(", ")
+ }
+ for i := 0; i < numKeysAndValues/2; i++ {
+ if i > 0 {
+ sb.WriteString(", ")
+ }
+ sb.WriteString("%v=%v")
+ }
+ return sb.String()
+}
+
+// formatTimes formats any time.Time values as RFC3339.
+func formatTimes(keysAndValues []interface{}) []interface{} {
+ var formattedArgs []interface{}
+ for _, arg := range keysAndValues {
+ if t, ok := arg.(time.Time); ok {
+ arg = t.Format(time.RFC3339)
+ }
+ formattedArgs = append(formattedArgs, arg)
+ }
+ return formattedArgs
+}
diff --git a/vendor/github.com/robfig/cron/v3/option.go b/vendor/github.com/robfig/cron/v3/option.go
new file mode 100644
index 000000000..09e4278e7
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/option.go
@@ -0,0 +1,45 @@
+package cron
+
+import (
+ "time"
+)
+
+// Option represents a modification to the default behavior of a Cron.
+type Option func(*Cron)
+
+// WithLocation overrides the timezone of the cron instance.
+func WithLocation(loc *time.Location) Option {
+ return func(c *Cron) {
+ c.location = loc
+ }
+}
+
+// WithSeconds overrides the parser used for interpreting job schedules to
+// include a seconds field as the first one.
+func WithSeconds() Option {
+ return WithParser(NewParser(
+ Second | Minute | Hour | Dom | Month | Dow | Descriptor,
+ ))
+}
+
+// WithParser overrides the parser used for interpreting job schedules.
+func WithParser(p ScheduleParser) Option {
+ return func(c *Cron) {
+ c.parser = p
+ }
+}
+
+// WithChain specifies Job wrappers to apply to all jobs added to this cron.
+// Refer to the Chain* functions in this package for provided wrappers.
+func WithChain(wrappers ...JobWrapper) Option {
+ return func(c *Cron) {
+ c.chain = NewChain(wrappers...)
+ }
+}
+
+// WithLogger uses the provided logger.
+func WithLogger(logger Logger) Option {
+ return func(c *Cron) {
+ c.logger = logger
+ }
+}
diff --git a/vendor/github.com/robfig/cron/v3/parser.go b/vendor/github.com/robfig/cron/v3/parser.go
new file mode 100644
index 000000000..3cf8879f7
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/parser.go
@@ -0,0 +1,434 @@
+package cron
+
+import (
+ "fmt"
+ "math"
+ "strconv"
+ "strings"
+ "time"
+)
+
+// Configuration options for creating a parser. Most options specify which
+// fields should be included, while others enable features. If a field is not
+// included the parser will assume a default value. These options do not change
+// the order fields are parse in.
+type ParseOption int
+
+const (
+ Second ParseOption = 1 << iota // Seconds field, default 0
+ SecondOptional // Optional seconds field, default 0
+ Minute // Minutes field, default 0
+ Hour // Hours field, default 0
+ Dom // Day of month field, default *
+ Month // Month field, default *
+ Dow // Day of week field, default *
+ DowOptional // Optional day of week field, default *
+ Descriptor // Allow descriptors such as @monthly, @weekly, etc.
+)
+
+var places = []ParseOption{
+ Second,
+ Minute,
+ Hour,
+ Dom,
+ Month,
+ Dow,
+}
+
+var defaults = []string{
+ "0",
+ "0",
+ "0",
+ "*",
+ "*",
+ "*",
+}
+
+// A custom Parser that can be configured.
+type Parser struct {
+ options ParseOption
+}
+
+// NewParser creates a Parser with custom options.
+//
+// It panics if more than one Optional is given, since it would be impossible to
+// correctly infer which optional is provided or missing in general.
+//
+// Examples
+//
+// // Standard parser without descriptors
+// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
+// sched, err := specParser.Parse("0 0 15 */3 *")
+//
+// // Same as above, just excludes time fields
+// subsParser := NewParser(Dom | Month | Dow)
+// sched, err := specParser.Parse("15 */3 *")
+//
+// // Same as above, just makes Dow optional
+// subsParser := NewParser(Dom | Month | DowOptional)
+// sched, err := specParser.Parse("15 */3")
+//
+func NewParser(options ParseOption) Parser {
+ optionals := 0
+ if options&DowOptional > 0 {
+ optionals++
+ }
+ if options&SecondOptional > 0 {
+ optionals++
+ }
+ if optionals > 1 {
+ panic("multiple optionals may not be configured")
+ }
+ return Parser{options}
+}
+
+// Parse returns a new crontab schedule representing the given spec.
+// It returns a descriptive error if the spec is not valid.
+// It accepts crontab specs and features configured by NewParser.
+func (p Parser) Parse(spec string) (Schedule, error) {
+ if len(spec) == 0 {
+ return nil, fmt.Errorf("empty spec string")
+ }
+
+ // Extract timezone if present
+ var loc = time.Local
+ if strings.HasPrefix(spec, "TZ=") || strings.HasPrefix(spec, "CRON_TZ=") {
+ var err error
+ i := strings.Index(spec, " ")
+ eq := strings.Index(spec, "=")
+ if loc, err = time.LoadLocation(spec[eq+1 : i]); err != nil {
+ return nil, fmt.Errorf("provided bad location %s: %v", spec[eq+1:i], err)
+ }
+ spec = strings.TrimSpace(spec[i:])
+ }
+
+ // Handle named schedules (descriptors), if configured
+ if strings.HasPrefix(spec, "@") {
+ if p.options&Descriptor == 0 {
+ return nil, fmt.Errorf("parser does not accept descriptors: %v", spec)
+ }
+ return parseDescriptor(spec, loc)
+ }
+
+ // Split on whitespace.
+ fields := strings.Fields(spec)
+
+ // Validate & fill in any omitted or optional fields
+ var err error
+ fields, err = normalizeFields(fields, p.options)
+ if err != nil {
+ return nil, err
+ }
+
+ field := func(field string, r bounds) uint64 {
+ if err != nil {
+ return 0
+ }
+ var bits uint64
+ bits, err = getField(field, r)
+ return bits
+ }
+
+ var (
+ second = field(fields[0], seconds)
+ minute = field(fields[1], minutes)
+ hour = field(fields[2], hours)
+ dayofmonth = field(fields[3], dom)
+ month = field(fields[4], months)
+ dayofweek = field(fields[5], dow)
+ )
+ if err != nil {
+ return nil, err
+ }
+
+ return &SpecSchedule{
+ Second: second,
+ Minute: minute,
+ Hour: hour,
+ Dom: dayofmonth,
+ Month: month,
+ Dow: dayofweek,
+ Location: loc,
+ }, nil
+}
+
+// normalizeFields takes a subset set of the time fields and returns the full set
+// with defaults (zeroes) populated for unset fields.
+//
+// As part of performing this function, it also validates that the provided
+// fields are compatible with the configured options.
+func normalizeFields(fields []string, options ParseOption) ([]string, error) {
+ // Validate optionals & add their field to options
+ optionals := 0
+ if options&SecondOptional > 0 {
+ options |= Second
+ optionals++
+ }
+ if options&DowOptional > 0 {
+ options |= Dow
+ optionals++
+ }
+ if optionals > 1 {
+ return nil, fmt.Errorf("multiple optionals may not be configured")
+ }
+
+ // Figure out how many fields we need
+ max := 0
+ for _, place := range places {
+ if options&place > 0 {
+ max++
+ }
+ }
+ min := max - optionals
+
+ // Validate number of fields
+ if count := len(fields); count < min || count > max {
+ if min == max {
+ return nil, fmt.Errorf("expected exactly %d fields, found %d: %s", min, count, fields)
+ }
+ return nil, fmt.Errorf("expected %d to %d fields, found %d: %s", min, max, count, fields)
+ }
+
+ // Populate the optional field if not provided
+ if min < max && len(fields) == min {
+ switch {
+ case options&DowOptional > 0:
+ fields = append(fields, defaults[5]) // TODO: improve access to default
+ case options&SecondOptional > 0:
+ fields = append([]string{defaults[0]}, fields...)
+ default:
+ return nil, fmt.Errorf("unknown optional field")
+ }
+ }
+
+ // Populate all fields not part of options with their defaults
+ n := 0
+ expandedFields := make([]string, len(places))
+ copy(expandedFields, defaults)
+ for i, place := range places {
+ if options&place > 0 {
+ expandedFields[i] = fields[n]
+ n++
+ }
+ }
+ return expandedFields, nil
+}
+
+var standardParser = NewParser(
+ Minute | Hour | Dom | Month | Dow | Descriptor,
+)
+
+// ParseStandard returns a new crontab schedule representing the given
+// standardSpec (https://en.wikipedia.org/wiki/Cron). It requires 5 entries
+// representing: minute, hour, day of month, month and day of week, in that
+// order. It returns a descriptive error if the spec is not valid.
+//
+// It accepts
+// - Standard crontab specs, e.g. "* * * * ?"
+// - Descriptors, e.g. "@midnight", "@every 1h30m"
+func ParseStandard(standardSpec string) (Schedule, error) {
+ return standardParser.Parse(standardSpec)
+}
+
+// getField returns an Int with the bits set representing all of the times that
+// the field represents or error parsing field value. A "field" is a comma-separated
+// list of "ranges".
+func getField(field string, r bounds) (uint64, error) {
+ var bits uint64
+ ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
+ for _, expr := range ranges {
+ bit, err := getRange(expr, r)
+ if err != nil {
+ return bits, err
+ }
+ bits |= bit
+ }
+ return bits, nil
+}
+
+// getRange returns the bits indicated by the given expression:
+// number | number "-" number [ "/" number ]
+// or error parsing range.
+func getRange(expr string, r bounds) (uint64, error) {
+ var (
+ start, end, step uint
+ rangeAndStep = strings.Split(expr, "/")
+ lowAndHigh = strings.Split(rangeAndStep[0], "-")
+ singleDigit = len(lowAndHigh) == 1
+ err error
+ )
+
+ var extra uint64
+ if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
+ start = r.min
+ end = r.max
+ extra = starBit
+ } else {
+ start, err = parseIntOrName(lowAndHigh[0], r.names)
+ if err != nil {
+ return 0, err
+ }
+ switch len(lowAndHigh) {
+ case 1:
+ end = start
+ case 2:
+ end, err = parseIntOrName(lowAndHigh[1], r.names)
+ if err != nil {
+ return 0, err
+ }
+ default:
+ return 0, fmt.Errorf("too many hyphens: %s", expr)
+ }
+ }
+
+ switch len(rangeAndStep) {
+ case 1:
+ step = 1
+ case 2:
+ step, err = mustParseInt(rangeAndStep[1])
+ if err != nil {
+ return 0, err
+ }
+
+ // Special handling: "N/step" means "N-max/step".
+ if singleDigit {
+ end = r.max
+ }
+ if step > 1 {
+ extra = 0
+ }
+ default:
+ return 0, fmt.Errorf("too many slashes: %s", expr)
+ }
+
+ if start < r.min {
+ return 0, fmt.Errorf("beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
+ }
+ if end > r.max {
+ return 0, fmt.Errorf("end of range (%d) above maximum (%d): %s", end, r.max, expr)
+ }
+ if start > end {
+ return 0, fmt.Errorf("beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
+ }
+ if step == 0 {
+ return 0, fmt.Errorf("step of range should be a positive number: %s", expr)
+ }
+
+ return getBits(start, end, step) | extra, nil
+}
+
+// parseIntOrName returns the (possibly-named) integer contained in expr.
+func parseIntOrName(expr string, names map[string]uint) (uint, error) {
+ if names != nil {
+ if namedInt, ok := names[strings.ToLower(expr)]; ok {
+ return namedInt, nil
+ }
+ }
+ return mustParseInt(expr)
+}
+
+// mustParseInt parses the given expression as an int or returns an error.
+func mustParseInt(expr string) (uint, error) {
+ num, err := strconv.Atoi(expr)
+ if err != nil {
+ return 0, fmt.Errorf("failed to parse int from %s: %s", expr, err)
+ }
+ if num < 0 {
+ return 0, fmt.Errorf("negative number (%d) not allowed: %s", num, expr)
+ }
+
+ return uint(num), nil
+}
+
+// getBits sets all bits in the range [min, max], modulo the given step size.
+func getBits(min, max, step uint) uint64 {
+ var bits uint64
+
+ // If step is 1, use shifts.
+ if step == 1 {
+ return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
+ }
+
+ // Else, use a simple loop.
+ for i := min; i <= max; i += step {
+ bits |= 1 << i
+ }
+ return bits
+}
+
+// all returns all bits within the given bounds. (plus the star bit)
+func all(r bounds) uint64 {
+ return getBits(r.min, r.max, 1) | starBit
+}
+
+// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
+func parseDescriptor(descriptor string, loc *time.Location) (Schedule, error) {
+ switch descriptor {
+ case "@yearly", "@annually":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: 1 << dom.min,
+ Month: 1 << months.min,
+ Dow: all(dow),
+ Location: loc,
+ }, nil
+
+ case "@monthly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: 1 << dom.min,
+ Month: all(months),
+ Dow: all(dow),
+ Location: loc,
+ }, nil
+
+ case "@weekly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: all(dom),
+ Month: all(months),
+ Dow: 1 << dow.min,
+ Location: loc,
+ }, nil
+
+ case "@daily", "@midnight":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: 1 << hours.min,
+ Dom: all(dom),
+ Month: all(months),
+ Dow: all(dow),
+ Location: loc,
+ }, nil
+
+ case "@hourly":
+ return &SpecSchedule{
+ Second: 1 << seconds.min,
+ Minute: 1 << minutes.min,
+ Hour: all(hours),
+ Dom: all(dom),
+ Month: all(months),
+ Dow: all(dow),
+ Location: loc,
+ }, nil
+
+ }
+
+ const every = "@every "
+ if strings.HasPrefix(descriptor, every) {
+ duration, err := time.ParseDuration(descriptor[len(every):])
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse duration %s: %s", descriptor, err)
+ }
+ return Every(duration), nil
+ }
+
+ return nil, fmt.Errorf("unrecognized descriptor: %s", descriptor)
+}
diff --git a/vendor/github.com/robfig/cron/v3/spec.go b/vendor/github.com/robfig/cron/v3/spec.go
new file mode 100644
index 000000000..fa1e241e5
--- /dev/null
+++ b/vendor/github.com/robfig/cron/v3/spec.go
@@ -0,0 +1,188 @@
+package cron
+
+import "time"
+
+// SpecSchedule specifies a duty cycle (to the second granularity), based on a
+// traditional crontab specification. It is computed initially and stored as bit sets.
+type SpecSchedule struct {
+ Second, Minute, Hour, Dom, Month, Dow uint64
+
+ // Override location for this schedule.
+ Location *time.Location
+}
+
+// bounds provides a range of acceptable values (plus a map of name to value).
+type bounds struct {
+ min, max uint
+ names map[string]uint
+}
+
+// The bounds for each field.
+var (
+ seconds = bounds{0, 59, nil}
+ minutes = bounds{0, 59, nil}
+ hours = bounds{0, 23, nil}
+ dom = bounds{1, 31, nil}
+ months = bounds{1, 12, map[string]uint{
+ "jan": 1,
+ "feb": 2,
+ "mar": 3,
+ "apr": 4,
+ "may": 5,
+ "jun": 6,
+ "jul": 7,
+ "aug": 8,
+ "sep": 9,
+ "oct": 10,
+ "nov": 11,
+ "dec": 12,
+ }}
+ dow = bounds{0, 6, map[string]uint{
+ "sun": 0,
+ "mon": 1,
+ "tue": 2,
+ "wed": 3,
+ "thu": 4,
+ "fri": 5,
+ "sat": 6,
+ }}
+)
+
+const (
+ // Set the top bit if a star was included in the expression.
+ starBit = 1 << 63
+)
+
+// Next returns the next time this schedule is activated, greater than the given
+// time. If no time can be found to satisfy the schedule, return the zero time.
+func (s *SpecSchedule) Next(t time.Time) time.Time {
+ // General approach
+ //
+ // For Month, Day, Hour, Minute, Second:
+ // Check if the time value matches. If yes, continue to the next field.
+ // If the field doesn't match the schedule, then increment the field until it matches.
+ // While incrementing the field, a wrap-around brings it back to the beginning
+ // of the field list (since it is necessary to re-verify previous field
+ // values)
+
+ // Convert the given time into the schedule's timezone, if one is specified.
+ // Save the original timezone so we can convert back after we find a time.
+ // Note that schedules without a time zone specified (time.Local) are treated
+ // as local to the time provided.
+ origLocation := t.Location()
+ loc := s.Location
+ if loc == time.Local {
+ loc = t.Location()
+ }
+ if s.Location != time.Local {
+ t = t.In(s.Location)
+ }
+
+ // Start at the earliest possible time (the upcoming second).
+ t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
+
+ // This flag indicates whether a field has been incremented.
+ added := false
+
+ // If no time is found within five years, return zero.
+ yearLimit := t.Year() + 5
+
+WRAP:
+ if t.Year() > yearLimit {
+ return time.Time{}
+ }
+
+ // Find the first applicable month.
+ // If it's this month, then do nothing.
+ for 1< 12 {
+ t = t.Add(time.Duration(24-t.Hour()) * time.Hour)
+ } else {
+ t = t.Add(time.Duration(-t.Hour()) * time.Hour)
+ }
+ }
+
+ if t.Day() == 1 {
+ goto WRAP
+ }
+ }
+
+ for 1< 0
+ dowMatch bool = 1< 0
+ )
+ if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
+ return domMatch && dowMatch
+ }
+ return domMatch || dowMatch
+}
diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
index 904b57e01..28cd99c7f 100644
--- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
+++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go
@@ -16,7 +16,7 @@ Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To
choose, you can pass the `New` functions from the different SHA packages to
pbkdf2.Key.
*/
-package pbkdf2 // import "golang.org/x/crypto/pbkdf2"
+package pbkdf2
import (
"crypto/hmac"
diff --git a/vendor/golang.org/x/mod/module/module.go b/vendor/golang.org/x/mod/module/module.go
index cac1a899e..2a364b229 100644
--- a/vendor/golang.org/x/mod/module/module.go
+++ b/vendor/golang.org/x/mod/module/module.go
@@ -506,7 +506,6 @@ var badWindowsNames = []string{
"PRN",
"AUX",
"NUL",
- "COM0",
"COM1",
"COM2",
"COM3",
@@ -516,7 +515,6 @@ var badWindowsNames = []string{
"COM7",
"COM8",
"COM9",
- "LPT0",
"LPT1",
"LPT2",
"LPT3",
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 98a49c6b6..61f511f97 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -827,10 +827,6 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
cc.henc.SetMaxDynamicTableSizeLimit(t.maxEncoderHeaderTableSize())
cc.peerMaxHeaderTableSize = initialHeaderTableSize
- if t.AllowHTTP {
- cc.nextStreamID = 3
- }
-
if cs, ok := c.(connectionStater); ok {
state := cs.ConnectionState()
cc.tlsState = &state
diff --git a/vendor/golang.org/x/sys/unix/mremap.go b/vendor/golang.org/x/sys/unix/mremap.go
index fd45fe529..3a5e776f8 100644
--- a/vendor/golang.org/x/sys/unix/mremap.go
+++ b/vendor/golang.org/x/sys/unix/mremap.go
@@ -50,3 +50,8 @@ func (m *mremapMmapper) Mremap(oldData []byte, newLength int, flags int) (data [
func Mremap(oldData []byte, newLength int, flags int) (data []byte, err error) {
return mapper.Mremap(oldData, newLength, flags)
}
+
+func MremapPtr(oldAddr unsafe.Pointer, oldSize uintptr, newAddr unsafe.Pointer, newSize uintptr, flags int) (ret unsafe.Pointer, err error) {
+ xaddr, err := mapper.mremap(uintptr(oldAddr), oldSize, newSize, flags, uintptr(newAddr))
+ return unsafe.Pointer(xaddr), err
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 59542a897..4cc7b0059 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -542,6 +542,18 @@ func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
}
}
+//sys pthread_chdir_np(path string) (err error)
+
+func PthreadChdir(path string) (err error) {
+ return pthread_chdir_np(path)
+}
+
+//sys pthread_fchdir_np(fd int) (err error)
+
+func PthreadFchdir(fd int) (err error) {
+ return pthread_fchdir_np(fd)
+}
+
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/golang.org/x/sys/unix/syscall_unix.go
index 77081de8c..4e92e5aa4 100644
--- a/vendor/golang.org/x/sys/unix/syscall_unix.go
+++ b/vendor/golang.org/x/sys/unix/syscall_unix.go
@@ -154,6 +154,15 @@ func Munmap(b []byte) (err error) {
return mapper.Munmap(b)
}
+func MmapPtr(fd int, offset int64, addr unsafe.Pointer, length uintptr, prot int, flags int) (ret unsafe.Pointer, err error) {
+ xaddr, err := mapper.mmap(uintptr(addr), length, prot, flags, fd, offset)
+ return unsafe.Pointer(xaddr), err
+}
+
+func MunmapPtr(addr unsafe.Pointer, length uintptr) (err error) {
+ return mapper.munmap(uintptr(addr), length)
+}
+
func Read(fd int, p []byte) (n int, err error) {
n, err = read(fd, p)
if raceenabled {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index ccb02f240..07642c308 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func pthread_chdir_np(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pthread_chdir_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pthread_fchdir_np(fd int) (err error) {
+ _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pthread_fchdir_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index 8b8bb2840..923e08cb7 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
+TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pthread_chdir_np(SB)
+GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB)
+
+TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pthread_fchdir_np(SB)
+GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 1b40b997b..7d73dda64 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -760,6 +760,39 @@ var libc_sysctl_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func pthread_chdir_np(path string) (err error) {
+ var _p0 *byte
+ _p0, err = BytePtrFromString(path)
+ if err != nil {
+ return
+ }
+ _, _, e1 := syscall_syscall(libc_pthread_chdir_np_trampoline_addr, uintptr(unsafe.Pointer(_p0)), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pthread_chdir_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pthread_chdir_np pthread_chdir_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pthread_fchdir_np(fd int) (err error) {
+ _, _, e1 := syscall_syscall(libc_pthread_fchdir_np_trampoline_addr, uintptr(fd), 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pthread_fchdir_np_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pthread_fchdir_np pthread_fchdir_np "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error) {
_, _, e1 := syscall_syscall6(libc_sendfile_trampoline_addr, uintptr(infd), uintptr(outfd), uintptr(offset), uintptr(unsafe.Pointer(len)), uintptr(hdtr), uintptr(flags))
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 08362c1ab..057700111 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -228,6 +228,16 @@ TEXT libc_sysctl_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_sysctl_trampoline_addr(SB), RODATA, $8
DATA ·libc_sysctl_trampoline_addr(SB)/8, $libc_sysctl_trampoline<>(SB)
+TEXT libc_pthread_chdir_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pthread_chdir_np(SB)
+GLOBL ·libc_pthread_chdir_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pthread_chdir_np_trampoline_addr(SB)/8, $libc_pthread_chdir_np_trampoline<>(SB)
+
+TEXT libc_pthread_fchdir_np_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pthread_fchdir_np(SB)
+GLOBL ·libc_pthread_fchdir_np_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pthread_fchdir_np_trampoline_addr(SB)/8, $libc_pthread_fchdir_np_trampoline<>(SB)
+
TEXT libc_sendfile_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_sendfile(SB)
GLOBL ·libc_sendfile_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/windows/security_windows.go b/vendor/golang.org/x/sys/windows/security_windows.go
index 6f7d2ac70..97651b5bd 100644
--- a/vendor/golang.org/x/sys/windows/security_windows.go
+++ b/vendor/golang.org/x/sys/windows/security_windows.go
@@ -894,7 +894,7 @@ type ACL struct {
aclRevision byte
sbz1 byte
aclSize uint16
- aceCount uint16
+ AceCount uint16
sbz2 uint16
}
@@ -1087,6 +1087,27 @@ type EXPLICIT_ACCESS struct {
Trustee TRUSTEE
}
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header
+type ACE_HEADER struct {
+ AceType uint8
+ AceFlags uint8
+ AceSize uint16
+}
+
+// https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-access_allowed_ace
+type ACCESS_ALLOWED_ACE struct {
+ Header ACE_HEADER
+ Mask ACCESS_MASK
+ SidStart uint32
+}
+
+const (
+ // Constants for AceType
+ // https://learn.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-ace_header
+ ACCESS_ALLOWED_ACE_TYPE = 0
+ ACCESS_DENIED_ACE_TYPE = 1
+)
+
// This type is the union inside of TRUSTEE and must be created using one of the TrusteeValueFrom* functions.
type TrusteeValue uintptr
@@ -1158,6 +1179,7 @@ type OBJECTS_AND_NAME struct {
//sys makeSelfRelativeSD(absoluteSD *SECURITY_DESCRIPTOR, selfRelativeSD *SECURITY_DESCRIPTOR, selfRelativeSDSize *uint32) (err error) = advapi32.MakeSelfRelativeSD
//sys setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCESS, oldACL *ACL, newACL **ACL) (ret error) = advapi32.SetEntriesInAclW
+//sys GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) = advapi32.GetAce
// Control returns the security descriptor control bits.
func (sd *SECURITY_DESCRIPTOR) Control() (control SECURITY_DESCRIPTOR_CONTROL, revision uint32, err error) {
diff --git a/vendor/golang.org/x/sys/windows/zsyscall_windows.go b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
index 9f73df75b..eba761018 100644
--- a/vendor/golang.org/x/sys/windows/zsyscall_windows.go
+++ b/vendor/golang.org/x/sys/windows/zsyscall_windows.go
@@ -91,6 +91,7 @@ var (
procEnumServicesStatusExW = modadvapi32.NewProc("EnumServicesStatusExW")
procEqualSid = modadvapi32.NewProc("EqualSid")
procFreeSid = modadvapi32.NewProc("FreeSid")
+ procGetAce = modadvapi32.NewProc("GetAce")
procGetLengthSid = modadvapi32.NewProc("GetLengthSid")
procGetNamedSecurityInfoW = modadvapi32.NewProc("GetNamedSecurityInfoW")
procGetSecurityDescriptorControl = modadvapi32.NewProc("GetSecurityDescriptorControl")
@@ -1224,6 +1225,14 @@ func setEntriesInAcl(countExplicitEntries uint32, explicitEntries *EXPLICIT_ACCE
return
}
+func GetAce(acl *ACL, aceIndex uint32, pAce **ACCESS_ALLOWED_ACE) (ret error) {
+ r0, _, _ := syscall.Syscall(procGetAce.Addr(), 3, uintptr(unsafe.Pointer(acl)), uintptr(aceIndex), uintptr(unsafe.Pointer(pAce)))
+ if r0 == 0 {
+ ret = GetLastError()
+ }
+ return
+}
+
func SetKernelObjectSecurity(handle Handle, securityInformation SECURITY_INFORMATION, securityDescriptor *SECURITY_DESCRIPTOR) (err error) {
r1, _, e1 := syscall.Syscall(procSetKernelObjectSecurity.Addr(), 3, uintptr(handle), uintptr(securityInformation), uintptr(unsafe.Pointer(securityDescriptor)))
if r1 == 0 {
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
index 2c4c4e232..6e34df461 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go
@@ -106,8 +106,21 @@ func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Nod
// Does augmented child strictly contain [start, end)?
if augPos <= start && end <= augEnd {
- _, isToken := child.(tokenNode)
- return isToken || visit(child)
+ if is[tokenNode](child) {
+ return true
+ }
+
+ // childrenOf elides the FuncType node beneath FuncDecl.
+ // Add it back here for TypeParams, Params, Results,
+ // all FieldLists). But we don't add it back for the "func" token
+ // even though it is is the tree at FuncDecl.Type.Func.
+ if decl, ok := node.(*ast.FuncDecl); ok {
+ if fields, ok := child.(*ast.FieldList); ok && fields != decl.Recv {
+ path = append(path, decl.Type)
+ }
+ }
+
+ return visit(child)
}
// Does [start, end) overlap multiple children?
@@ -313,6 +326,8 @@ func childrenOf(n ast.Node) []ast.Node {
//
// As a workaround, we inline the case for FuncType
// here and order things correctly.
+ // We also need to insert the elided FuncType just
+ // before the 'visit' recursion.
//
children = nil // discard ast.Walk(FuncDecl) info subtrees
children = append(children, tok(n.Type.Func, len("func")))
@@ -632,3 +647,8 @@ func NodeDescription(n ast.Node) string {
}
panic(fmt.Sprintf("unexpected node type: %T", n))
}
+
+func is[T any](x any) bool {
+ _, ok := x.(T)
+ return ok
+}
diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go
index 919d5305a..6bdcf70ac 100644
--- a/vendor/golang.org/x/tools/go/ast/astutil/util.go
+++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go
@@ -7,6 +7,7 @@ package astutil
import "go/ast"
// Unparen returns e with any enclosing parentheses stripped.
+// TODO(adonovan): use go1.22's ast.Unparen.
func Unparen(e ast.Expr) ast.Expr {
for {
p, ok := e.(*ast.ParenExpr)
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index af0ee6c61..2e59ff855 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -200,12 +200,14 @@ func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io
return
}
-func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
- log := i.Logf
- if log == nil {
- log = func(string, ...interface{}) {}
+// logf logs if i.Logf is non-nil.
+func (i *Invocation) logf(format string, args ...any) {
+ if i.Logf != nil {
+ i.Logf(format, args...)
}
+}
+func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
goArgs := []string{i.Verb}
appendModFile := func() {
@@ -277,7 +279,12 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
cmd.Dir = i.WorkingDir
}
- defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
+ debugStr := cmdDebugStr(cmd)
+ i.logf("starting %v", debugStr)
+ start := time.Now()
+ defer func() {
+ i.logf("%s for %v", time.Since(start), debugStr)
+ }()
return runCmdContext(ctx, cmd)
}
@@ -514,7 +521,7 @@ func WriteOverlays(overlay map[string][]byte) (filename string, cleanup func(),
for k, v := range overlay {
// Use a unique basename for each file (001-foo.go),
// to avoid creating nested directories.
- base := fmt.Sprintf("%d-%s.go", 1+len(overlays), filepath.Base(k))
+ base := fmt.Sprintf("%d-%s", 1+len(overlays), filepath.Base(k))
filename := filepath.Join(dir, base)
err := os.WriteFile(filename, v, 0666)
if err != nil {
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index 4569313a0..dc7d50a7a 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -27,6 +27,7 @@ import (
"unicode"
"unicode/utf8"
+ "golang.org/x/sync/errgroup"
"golang.org/x/tools/go/ast/astutil"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
@@ -365,9 +366,7 @@ func (p *pass) load() ([]*ImportFix, bool) {
if p.loadRealPackageNames {
err := p.loadPackageNames(append(imports, p.candidates...))
if err != nil {
- if p.env.Logf != nil {
- p.env.Logf("loading package names: %v", err)
- }
+ p.env.logf("loading package names: %v", err)
return nil, false
}
}
@@ -563,7 +562,14 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
// fixImports adds and removes imports from f so that all its references are
// satisfied and there are no unused imports.
-func fixImports(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
+//
+// This is declared as a variable rather than a function so goimports can
+// easily be extended by adding a file with an init function.
+//
+// DO NOT REMOVE: used internally at Google.
+var fixImports = fixImportsDefault
+
+func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
fixes, err := getFixes(context.Background(), fset, f, filename, env)
if err != nil {
return err
@@ -580,9 +586,7 @@ func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename st
return nil, err
}
srcDir := filepath.Dir(abs)
- if env.Logf != nil {
- env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
- }
+ env.logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
// First pass: looking only at f, and using the naive algorithm to
// derive package names from import paths, see if the file is already
@@ -1014,16 +1018,26 @@ func (e *ProcessEnv) GetResolver() (Resolver, error) {
// already know the view type.
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
e.resolver = newGopathResolver(e)
+ e.logf("created gopath resolver")
} else if r, err := newModuleResolver(e, e.ModCache); err != nil {
e.resolverErr = err
+ e.logf("failed to create module resolver: %v", err)
} else {
e.resolver = Resolver(r)
+ e.logf("created module resolver")
}
}
return e.resolver, e.resolverErr
}
+// logf logs if e.Logf is non-nil.
+func (e *ProcessEnv) logf(format string, args ...any) {
+ if e.Logf != nil {
+ e.Logf(format, args...)
+ }
+}
+
// buildContext returns the build.Context to use for matching files.
//
// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
@@ -1127,8 +1141,8 @@ type Resolver interface {
// scan works with callback to search for packages. See scanCallback for details.
scan(ctx context.Context, callback *scanCallback) error
- // loadExports returns the set of exported symbols in the package at dir.
- // loadExports may be called concurrently.
+ // loadExports returns the package name and set of exported symbols in the
+ // package at dir. loadExports may be called concurrently.
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
// scoreImportPath returns the relevance for an import path.
@@ -1205,54 +1219,52 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
imp *ImportInfo
pkg *packageInfo
}
- results := make(chan result, len(refs))
+ results := make([]*result, len(refs))
- ctx, cancel := context.WithCancel(ctx)
- var wg sync.WaitGroup
- defer func() {
- cancel()
- wg.Wait()
- }()
- var (
- firstErr error
- firstErrOnce sync.Once
- )
- for pkgName, symbols := range refs {
- wg.Add(1)
- go func(pkgName string, symbols map[string]bool) {
- defer wg.Done()
+ g, ctx := errgroup.WithContext(ctx)
+
+ searcher := symbolSearcher{
+ logf: pass.env.logf,
+ srcDir: pass.srcDir,
+ xtest: strings.HasSuffix(pass.f.Name.Name, "_test"),
+ loadExports: resolver.loadExports,
+ }
- found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
+ i := 0
+ for pkgName, symbols := range refs {
+ index := i // claim an index in results
+ i++
+ pkgName := pkgName
+ symbols := symbols
+ g.Go(func() error {
+ found, err := searcher.search(ctx, found[pkgName], pkgName, symbols)
if err != nil {
- firstErrOnce.Do(func() {
- firstErr = err
- cancel()
- })
- return
+ return err
}
-
if found == nil {
- return // No matching package.
+ return nil // No matching package.
}
imp := &ImportInfo{
ImportPath: found.importPathShort,
}
-
pkg := &packageInfo{
name: pkgName,
exports: symbols,
}
- results <- result{imp, pkg}
- }(pkgName, symbols)
+ results[index] = &result{imp, pkg}
+ return nil
+ })
+ }
+ if err := g.Wait(); err != nil {
+ return err
}
- go func() {
- wg.Wait()
- close(results)
- }()
- for result := range results {
+ for _, result := range results {
+ if result == nil {
+ continue
+ }
// Don't offer completions that would shadow predeclared
// names, such as github.com/coreos/etcd/error.
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
@@ -1266,7 +1278,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
}
pass.addCandidate(result.imp, result.pkg)
}
- return firstErr
+ return nil
}
// notIdentifier reports whether ch is an invalid identifier character.
@@ -1610,9 +1622,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
fullFile := filepath.Join(dir, fi.Name())
f, err := parser.ParseFile(fset, fullFile, nil, 0)
if err != nil {
- if env.Logf != nil {
- env.Logf("error parsing %v: %v", fullFile, err)
- }
+ env.logf("error parsing %v: %v", fullFile, err)
continue
}
if f.Name.Name == "documentation" {
@@ -1648,9 +1658,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
}
sortSymbols(exports)
- if env.Logf != nil {
- env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
- }
+ env.logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
return pkgName, exports, nil
}
@@ -1660,25 +1668,39 @@ func sortSymbols(syms []stdlib.Symbol) {
})
}
-// findImport searches for a package with the given symbols.
-// If no package is found, findImport returns ("", false, nil)
-func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
+// A symbolSearcher searches for a package with a set of symbols, among a set
+// of candidates. See [symbolSearcher.search].
+//
+// The search occurs within the scope of a single file, with context captured
+// in srcDir and xtest.
+type symbolSearcher struct {
+ logf func(string, ...any)
+ srcDir string // directory containing the file
+ xtest bool // if set, the file containing is an x_test file
+ loadExports func(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
+}
+
+// search searches the provided candidates for a package containing all
+// exported symbols.
+//
+// If successful, returns the resulting package.
+func (s *symbolSearcher) search(ctx context.Context, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
// Sort the candidates by their import package length,
// assuming that shorter package names are better than long
// ones. Note that this sorts by the de-vendored name, so
// there's no "penalty" for vendoring.
sort.Sort(byDistanceOrImportPathShortLength(candidates))
- if pass.env.Logf != nil {
+ if s.logf != nil {
for i, c := range candidates {
- pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
+ s.logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
}
}
- resolver, err := pass.env.GetResolver()
- if err != nil {
- return nil, err
- }
- // Collect exports for packages with matching names.
+ // Arrange rescv so that we can we can await results in order of relevance
+ // and exit as soon as we find the first match.
+ //
+ // Search with bounded concurrency, returning as soon as the first result
+ // among rescv is non-nil.
rescv := make([]chan *pkg, len(candidates))
for i := range candidates {
rescv[i] = make(chan *pkg, 1)
@@ -1686,6 +1708,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
const maxConcurrentPackageImport = 4
loadExportsSem := make(chan struct{}, maxConcurrentPackageImport)
+ // Ensure that all work is completed at exit.
ctx, cancel := context.WithCancel(ctx)
var wg sync.WaitGroup
defer func() {
@@ -1693,6 +1716,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
wg.Wait()
}()
+ // Start the search.
wg.Add(1)
go func() {
defer wg.Done()
@@ -1703,55 +1727,67 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
return
}
+ i := i
+ c := c
wg.Add(1)
- go func(c pkgDistance, resc chan<- *pkg) {
+ go func() {
defer func() {
<-loadExportsSem
wg.Done()
}()
-
- if pass.env.Logf != nil {
- pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
+ if s.logf != nil {
+ s.logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
}
- // If we're an x_test, load the package under test's test variant.
- includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
- _, exports, err := resolver.loadExports(ctx, c.pkg, includeTest)
+ pkg, err := s.searchOne(ctx, c, symbols)
if err != nil {
- if pass.env.Logf != nil {
- pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
- }
- resc <- nil
- return
- }
-
- exportsMap := make(map[string]bool, len(exports))
- for _, sym := range exports {
- exportsMap[sym.Name] = true
- }
-
- // If it doesn't have the right
- // symbols, send nil to mean no match.
- for symbol := range symbols {
- if !exportsMap[symbol] {
- resc <- nil
- return
+ if s.logf != nil && ctx.Err() == nil {
+ s.logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
}
+ pkg = nil
}
- resc <- c.pkg
- }(c, rescv[i])
+ rescv[i] <- pkg // may be nil
+ }()
}
}()
+ // Await the first (best) result.
for _, resc := range rescv {
- pkg := <-resc
- if pkg == nil {
- continue
+ select {
+ case r := <-resc:
+ if r != nil {
+ return r, nil
+ }
+ case <-ctx.Done():
+ return nil, ctx.Err()
}
- return pkg, nil
}
return nil, nil
}
+func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols map[string]bool) (*pkg, error) {
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+ // If we're considering the package under test from an x_test, load the
+ // test variant.
+ includeTest := s.xtest && c.pkg.dir == s.srcDir
+ _, exports, err := s.loadExports(ctx, c.pkg, includeTest)
+ if err != nil {
+ return nil, err
+ }
+
+ exportsMap := make(map[string]bool, len(exports))
+ for _, sym := range exports {
+ exportsMap[sym.Name] = true
+ }
+ for symbol := range symbols {
+ if !exportsMap[symbol] {
+ return nil, nil // no match
+ }
+ }
+ return c.pkg, nil
+}
+
// pkgIsCandidate reports whether pkg is a candidate for satisfying the
// finding which package pkgIdent in the file named by filename is trying
// to refer to.
@@ -1771,58 +1807,24 @@ func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
}
// Speed optimization to minimize disk I/O:
- // the last two components on disk must contain the
- // package name somewhere.
//
- // This permits mismatch naming like directory
- // "go-foo" being package "foo", or "pkg.v3" being "pkg",
- // or directory "google.golang.org/api/cloudbilling/v1"
- // being package "cloudbilling", but doesn't
- // permit a directory "foo" to be package
- // "bar", which is strongly discouraged
- // anyway. There's no reason goimports needs
- // to be slow just to accommodate that.
+ // Use the matchesPath heuristic to filter to package paths that could
+ // reasonably match a dangling reference.
+ //
+ // This permits mismatch naming like directory "go-foo" being package "foo",
+ // or "pkg.v3" being "pkg", or directory
+ // "google.golang.org/api/cloudbilling/v1" being package "cloudbilling", but
+ // doesn't permit a directory "foo" to be package "bar", which is strongly
+ // discouraged anyway. There's no reason goimports needs to be slow just to
+ // accommodate that.
for pkgIdent := range refs {
- lastTwo := lastTwoComponents(pkg.importPathShort)
- if strings.Contains(lastTwo, pkgIdent) {
- return true
- }
- if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
- lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
- if strings.Contains(lastTwo, pkgIdent) {
- return true
- }
- }
- }
- return false
-}
-
-func hasHyphenOrUpperASCII(s string) bool {
- for i := 0; i < len(s); i++ {
- b := s[i]
- if b == '-' || ('A' <= b && b <= 'Z') {
+ if matchesPath(pkgIdent, pkg.importPathShort) {
return true
}
}
return false
}
-func lowerASCIIAndRemoveHyphen(s string) (ret string) {
- buf := make([]byte, 0, len(s))
- for i := 0; i < len(s); i++ {
- b := s[i]
- switch {
- case b == '-':
- continue
- case 'A' <= b && b <= 'Z':
- buf = append(buf, b+('a'-'A'))
- default:
- buf = append(buf, b)
- }
- }
- return string(buf)
-}
-
// canUse reports whether the package in dir is usable from filename,
// respecting the Go "internal" and "vendor" visibility rules.
func canUse(filename, dir string) bool {
@@ -1863,19 +1865,84 @@ func canUse(filename, dir string) bool {
return !strings.Contains(relSlash, "/vendor/") && !strings.Contains(relSlash, "/internal/") && !strings.HasSuffix(relSlash, "/internal")
}
-// lastTwoComponents returns at most the last two path components
-// of v, using either / or \ as the path separator.
-func lastTwoComponents(v string) string {
+// matchesPath reports whether ident may match a potential package name
+// referred to by path, using heuristics to filter out unidiomatic package
+// names.
+//
+// Specifically, it checks whether either of the last two '/'- or '\'-delimited
+// path segments matches the identifier. The segment-matching heuristic must
+// allow for various conventions around segment naming, including go-foo,
+// foo-go, and foo.v3. To handle all of these, matching considers both (1) the
+// entire segment, ignoring '-' and '.', as well as (2) the last subsegment
+// separated by '-' or '.'. So the segment foo-go matches all of the following
+// identifiers: foo, go, and foogo. All matches are case insensitive (for ASCII
+// identifiers).
+//
+// See the docstring for [pkgIsCandidate] for an explanation of how this
+// heuristic filters potential candidate packages.
+func matchesPath(ident, path string) bool {
+ // Ignore case, for ASCII.
+ lowerIfASCII := func(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+ }
+
+ // match reports whether path[start:end] matches ident, ignoring [.-].
+ match := func(start, end int) bool {
+ ii := len(ident) - 1 // current byte in ident
+ pi := end - 1 // current byte in path
+ for ; pi >= start && ii >= 0; pi-- {
+ pb := path[pi]
+ if pb == '-' || pb == '.' {
+ continue
+ }
+ pb = lowerIfASCII(pb)
+ ib := lowerIfASCII(ident[ii])
+ if pb != ib {
+ return false
+ }
+ ii--
+ }
+ return ii < 0 && pi < start // all bytes matched
+ }
+
+ // segmentEnd and subsegmentEnd hold the end points of the current segment
+ // and subsegment intervals.
+ segmentEnd := len(path)
+ subsegmentEnd := len(path)
+
+ // Count slashes; we only care about the last two segments.
nslash := 0
- for i := len(v) - 1; i >= 0; i-- {
- if v[i] == '/' || v[i] == '\\' {
+
+ for i := len(path) - 1; i >= 0; i-- {
+ switch b := path[i]; b {
+ // TODO(rfindley): we handle backlashes here only because the previous
+ // heuristic handled backslashes. This is perhaps overly defensive, but is
+ // the result of many lessons regarding Chesterton's fence and the
+ // goimports codebase.
+ //
+ // However, this function is only ever called with something called an
+ // 'importPath'. Is it possible that this is a real import path, and
+ // therefore we need only consider forward slashes?
+ case '/', '\\':
+ if match(i+1, segmentEnd) || match(i+1, subsegmentEnd) {
+ return true
+ }
nslash++
if nslash == 2 {
- return v[i:]
+ return false // did not match above
+ }
+ segmentEnd, subsegmentEnd = i, i // reset
+ case '-', '.':
+ if match(i+1, subsegmentEnd) {
+ return true
}
+ subsegmentEnd = i
}
}
- return v
+ return match(0, segmentEnd) || match(0, subsegmentEnd)
}
type visitFn func(node ast.Node) ast.Visitor
diff --git a/vendor/golang.org/x/tools/internal/imports/mod.go b/vendor/golang.org/x/tools/internal/imports/mod.go
index 82fe644a1..91221fda3 100644
--- a/vendor/golang.org/x/tools/internal/imports/mod.go
+++ b/vendor/golang.org/x/tools/internal/imports/mod.go
@@ -265,9 +265,7 @@ func (r *ModuleResolver) initAllMods() error {
return err
}
if mod.Dir == "" {
- if r.env.Logf != nil {
- r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
- }
+ r.env.logf("module %v has not been downloaded and will be ignored", mod.Path)
// Can't do anything with a module that's not downloaded.
continue
}
@@ -766,9 +764,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
}
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
if err != nil {
- if r.env.Logf != nil {
- r.env.Logf("decoding module cache path %q: %v", subdir, err)
- }
+ r.env.logf("decoding module cache path %q: %v", subdir, err)
return directoryPackageInfo{
status: directoryScanned,
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index fd6892075..a928acf29 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -23,6 +23,7 @@ var PackageSymbols = map[string][]Symbol{
{"ErrWriteAfterClose", Var, 0},
{"ErrWriteTooLong", Var, 0},
{"FileInfoHeader", Func, 1},
+ {"FileInfoNames", Type, 23},
{"Format", Type, 10},
{"FormatGNU", Const, 10},
{"FormatPAX", Const, 10},
@@ -820,6 +821,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*ConnectionState).ExportKeyingMaterial", Method, 11},
{"(*Dialer).Dial", Method, 15},
{"(*Dialer).DialContext", Method, 15},
+ {"(*ECHRejectionError).Error", Method, 23},
{"(*QUICConn).Close", Method, 21},
{"(*QUICConn).ConnectionState", Method, 21},
{"(*QUICConn).HandleData", Method, 21},
@@ -827,6 +829,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*QUICConn).SendSessionTicket", Method, 21},
{"(*QUICConn).SetTransportParameters", Method, 21},
{"(*QUICConn).Start", Method, 21},
+ {"(*QUICConn).StoreSession", Method, 23},
{"(*SessionState).Bytes", Method, 21},
{"(AlertError).Error", Method, 21},
{"(ClientAuthType).String", Method, 15},
@@ -877,6 +880,8 @@ var PackageSymbols = map[string][]Symbol{
{"Config.ClientSessionCache", Field, 3},
{"Config.CurvePreferences", Field, 3},
{"Config.DynamicRecordSizingDisabled", Field, 7},
+ {"Config.EncryptedClientHelloConfigList", Field, 23},
+ {"Config.EncryptedClientHelloRejectionVerify", Field, 23},
{"Config.GetCertificate", Field, 4},
{"Config.GetClientCertificate", Field, 8},
{"Config.GetConfigForClient", Field, 8},
@@ -902,6 +907,7 @@ var PackageSymbols = map[string][]Symbol{
{"ConnectionState", Type, 0},
{"ConnectionState.CipherSuite", Field, 0},
{"ConnectionState.DidResume", Field, 1},
+ {"ConnectionState.ECHAccepted", Field, 23},
{"ConnectionState.HandshakeComplete", Field, 0},
{"ConnectionState.NegotiatedProtocol", Field, 0},
{"ConnectionState.NegotiatedProtocolIsMutual", Field, 0},
@@ -925,6 +931,8 @@ var PackageSymbols = map[string][]Symbol{
{"ECDSAWithP384AndSHA384", Const, 8},
{"ECDSAWithP521AndSHA512", Const, 8},
{"ECDSAWithSHA1", Const, 10},
+ {"ECHRejectionError", Type, 23},
+ {"ECHRejectionError.RetryConfigList", Field, 23},
{"Ed25519", Const, 13},
{"InsecureCipherSuites", Func, 14},
{"Listen", Func, 0},
@@ -943,6 +951,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParseSessionState", Func, 21},
{"QUICClient", Func, 21},
{"QUICConfig", Type, 21},
+ {"QUICConfig.EnableStoreSessionEvent", Field, 23},
{"QUICConfig.TLSConfig", Field, 21},
{"QUICConn", Type, 21},
{"QUICEncryptionLevel", Type, 21},
@@ -954,16 +963,20 @@ var PackageSymbols = map[string][]Symbol{
{"QUICEvent.Data", Field, 21},
{"QUICEvent.Kind", Field, 21},
{"QUICEvent.Level", Field, 21},
+ {"QUICEvent.SessionState", Field, 23},
{"QUICEvent.Suite", Field, 21},
{"QUICEventKind", Type, 21},
{"QUICHandshakeDone", Const, 21},
{"QUICNoEvent", Const, 21},
{"QUICRejectedEarlyData", Const, 21},
+ {"QUICResumeSession", Const, 23},
{"QUICServer", Func, 21},
{"QUICSessionTicketOptions", Type, 21},
{"QUICSessionTicketOptions.EarlyData", Field, 21},
+ {"QUICSessionTicketOptions.Extra", Field, 23},
{"QUICSetReadSecret", Const, 21},
{"QUICSetWriteSecret", Const, 21},
+ {"QUICStoreSession", Const, 23},
{"QUICTransportParameters", Const, 21},
{"QUICTransportParametersRequired", Const, 21},
{"QUICWriteData", Const, 21},
@@ -1036,6 +1049,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*Certificate).Verify", Method, 0},
{"(*Certificate).VerifyHostname", Method, 0},
{"(*CertificateRequest).CheckSignature", Method, 5},
+ {"(*OID).UnmarshalBinary", Method, 23},
+ {"(*OID).UnmarshalText", Method, 23},
{"(*RevocationList).CheckSignatureFrom", Method, 19},
{"(CertificateInvalidError).Error", Method, 0},
{"(ConstraintViolationError).Error", Method, 0},
@@ -1043,6 +1058,8 @@ var PackageSymbols = map[string][]Symbol{
{"(InsecureAlgorithmError).Error", Method, 6},
{"(OID).Equal", Method, 22},
{"(OID).EqualASN1OID", Method, 22},
+ {"(OID).MarshalBinary", Method, 23},
+ {"(OID).MarshalText", Method, 23},
{"(OID).String", Method, 22},
{"(PublicKeyAlgorithm).String", Method, 10},
{"(SignatureAlgorithm).String", Method, 6},
@@ -1196,6 +1213,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParseCertificates", Func, 0},
{"ParseDERCRL", Func, 0},
{"ParseECPrivateKey", Func, 1},
+ {"ParseOID", Func, 23},
{"ParsePKCS1PrivateKey", Func, 0},
{"ParsePKCS1PublicKey", Func, 10},
{"ParsePKCS8PrivateKey", Func, 0},
@@ -2541,6 +2559,7 @@ var PackageSymbols = map[string][]Symbol{
{"PT_NOTE", Const, 0},
{"PT_NULL", Const, 0},
{"PT_OPENBSD_BOOTDATA", Const, 16},
+ {"PT_OPENBSD_NOBTCFI", Const, 23},
{"PT_OPENBSD_RANDOMIZE", Const, 16},
{"PT_OPENBSD_WXNEEDED", Const, 16},
{"PT_PAX_FLAGS", Const, 16},
@@ -3620,13 +3639,16 @@ var PackageSymbols = map[string][]Symbol{
{"STT_COMMON", Const, 0},
{"STT_FILE", Const, 0},
{"STT_FUNC", Const, 0},
+ {"STT_GNU_IFUNC", Const, 23},
{"STT_HIOS", Const, 0},
{"STT_HIPROC", Const, 0},
{"STT_LOOS", Const, 0},
{"STT_LOPROC", Const, 0},
{"STT_NOTYPE", Const, 0},
{"STT_OBJECT", Const, 0},
+ {"STT_RELC", Const, 23},
{"STT_SECTION", Const, 0},
+ {"STT_SRELC", Const, 23},
{"STT_TLS", Const, 0},
{"STV_DEFAULT", Const, 0},
{"STV_HIDDEN", Const, 0},
@@ -4544,11 +4566,14 @@ var PackageSymbols = map[string][]Symbol{
{"URLEncoding", Var, 0},
},
"encoding/binary": {
+ {"Append", Func, 23},
{"AppendByteOrder", Type, 19},
{"AppendUvarint", Func, 19},
{"AppendVarint", Func, 19},
{"BigEndian", Var, 0},
{"ByteOrder", Type, 0},
+ {"Decode", Func, 23},
+ {"Encode", Func, 23},
{"LittleEndian", Var, 0},
{"MaxVarintLen16", Const, 0},
{"MaxVarintLen32", Const, 0},
@@ -5308,6 +5333,7 @@ var PackageSymbols = map[string][]Symbol{
{"ParenExpr.Rparen", Field, 0},
{"ParenExpr.X", Field, 0},
{"Pkg", Const, 0},
+ {"Preorder", Func, 23},
{"Print", Func, 0},
{"RECV", Const, 0},
{"RangeStmt", Type, 0},
@@ -5898,7 +5924,12 @@ var PackageSymbols = map[string][]Symbol{
},
"go/types": {
{"(*Alias).Obj", Method, 22},
+ {"(*Alias).Origin", Method, 23},
+ {"(*Alias).Rhs", Method, 23},
+ {"(*Alias).SetTypeParams", Method, 23},
{"(*Alias).String", Method, 22},
+ {"(*Alias).TypeArgs", Method, 23},
+ {"(*Alias).TypeParams", Method, 23},
{"(*Alias).Underlying", Method, 22},
{"(*ArgumentError).Error", Method, 18},
{"(*ArgumentError).Unwrap", Method, 18},
@@ -5943,6 +5974,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Func).Pkg", Method, 5},
{"(*Func).Pos", Method, 5},
{"(*Func).Scope", Method, 5},
+ {"(*Func).Signature", Method, 23},
{"(*Func).String", Method, 5},
{"(*Func).Type", Method, 5},
{"(*Info).ObjectOf", Method, 5},
@@ -6992,6 +7024,12 @@ var PackageSymbols = map[string][]Symbol{
{"TempFile", Func, 0},
{"WriteFile", Func, 0},
},
+ "iter": {
+ {"Pull", Func, 23},
+ {"Pull2", Func, 23},
+ {"Seq", Type, 23},
+ {"Seq2", Type, 23},
+ },
"log": {
{"(*Logger).Fatal", Method, 0},
{"(*Logger).Fatalf", Method, 0},
@@ -7222,11 +7260,16 @@ var PackageSymbols = map[string][]Symbol{
{"Writer", Type, 0},
},
"maps": {
+ {"All", Func, 23},
{"Clone", Func, 21},
+ {"Collect", Func, 23},
{"Copy", Func, 21},
{"DeleteFunc", Func, 21},
{"Equal", Func, 21},
{"EqualFunc", Func, 21},
+ {"Insert", Func, 23},
+ {"Keys", Func, 23},
+ {"Values", Func, 23},
},
"math": {
{"Abs", Func, 0},
@@ -7617,6 +7660,7 @@ var PackageSymbols = map[string][]Symbol{
},
"math/rand/v2": {
{"(*ChaCha8).MarshalBinary", Method, 22},
+ {"(*ChaCha8).Read", Method, 23},
{"(*ChaCha8).Seed", Method, 22},
{"(*ChaCha8).Uint64", Method, 22},
{"(*ChaCha8).UnmarshalBinary", Method, 22},
@@ -7636,6 +7680,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Rand).NormFloat64", Method, 22},
{"(*Rand).Perm", Method, 22},
{"(*Rand).Shuffle", Method, 22},
+ {"(*Rand).Uint", Method, 23},
{"(*Rand).Uint32", Method, 22},
{"(*Rand).Uint32N", Method, 22},
{"(*Rand).Uint64", Method, 22},
@@ -7663,6 +7708,7 @@ var PackageSymbols = map[string][]Symbol{
{"Rand", Type, 22},
{"Shuffle", Func, 22},
{"Source", Type, 22},
+ {"Uint", Func, 23},
{"Uint32", Func, 22},
{"Uint32N", Func, 22},
{"Uint64", Func, 22},
@@ -7743,6 +7789,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*DNSError).Error", Method, 0},
{"(*DNSError).Temporary", Method, 0},
{"(*DNSError).Timeout", Method, 0},
+ {"(*DNSError).Unwrap", Method, 23},
{"(*Dialer).Dial", Method, 1},
{"(*Dialer).DialContext", Method, 7},
{"(*Dialer).MultipathTCP", Method, 21},
@@ -7809,6 +7856,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*TCPConn).RemoteAddr", Method, 0},
{"(*TCPConn).SetDeadline", Method, 0},
{"(*TCPConn).SetKeepAlive", Method, 0},
+ {"(*TCPConn).SetKeepAliveConfig", Method, 23},
{"(*TCPConn).SetKeepAlivePeriod", Method, 2},
{"(*TCPConn).SetLinger", Method, 0},
{"(*TCPConn).SetNoDelay", Method, 0},
@@ -7922,6 +7970,7 @@ var PackageSymbols = map[string][]Symbol{
{"DNSError.IsTimeout", Field, 0},
{"DNSError.Name", Field, 0},
{"DNSError.Server", Field, 0},
+ {"DNSError.UnwrapErr", Field, 23},
{"DefaultResolver", Var, 8},
{"Dial", Func, 0},
{"DialIP", Func, 0},
@@ -7937,6 +7986,7 @@ var PackageSymbols = map[string][]Symbol{
{"Dialer.DualStack", Field, 2},
{"Dialer.FallbackDelay", Field, 5},
{"Dialer.KeepAlive", Field, 3},
+ {"Dialer.KeepAliveConfig", Field, 23},
{"Dialer.LocalAddr", Field, 1},
{"Dialer.Resolver", Field, 8},
{"Dialer.Timeout", Field, 1},
@@ -7989,10 +8039,16 @@ var PackageSymbols = map[string][]Symbol{
{"Interfaces", Func, 0},
{"InvalidAddrError", Type, 0},
{"JoinHostPort", Func, 0},
+ {"KeepAliveConfig", Type, 23},
+ {"KeepAliveConfig.Count", Field, 23},
+ {"KeepAliveConfig.Enable", Field, 23},
+ {"KeepAliveConfig.Idle", Field, 23},
+ {"KeepAliveConfig.Interval", Field, 23},
{"Listen", Func, 0},
{"ListenConfig", Type, 11},
{"ListenConfig.Control", Field, 11},
{"ListenConfig.KeepAlive", Field, 13},
+ {"ListenConfig.KeepAliveConfig", Field, 23},
{"ListenIP", Func, 0},
{"ListenMulticastUDP", Func, 0},
{"ListenPacket", Func, 0},
@@ -8081,6 +8137,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Request).Context", Method, 7},
{"(*Request).Cookie", Method, 0},
{"(*Request).Cookies", Method, 0},
+ {"(*Request).CookiesNamed", Method, 23},
{"(*Request).FormFile", Method, 0},
{"(*Request).FormValue", Method, 0},
{"(*Request).MultipartReader", Method, 0},
@@ -8148,7 +8205,9 @@ var PackageSymbols = map[string][]Symbol{
{"Cookie.HttpOnly", Field, 0},
{"Cookie.MaxAge", Field, 0},
{"Cookie.Name", Field, 0},
+ {"Cookie.Partitioned", Field, 23},
{"Cookie.Path", Field, 0},
+ {"Cookie.Quoted", Field, 23},
{"Cookie.Raw", Field, 0},
{"Cookie.RawExpires", Field, 0},
{"Cookie.SameSite", Field, 11},
@@ -8225,7 +8284,9 @@ var PackageSymbols = map[string][]Symbol{
{"NoBody", Var, 8},
{"NotFound", Func, 0},
{"NotFoundHandler", Func, 0},
+ {"ParseCookie", Func, 23},
{"ParseHTTPVersion", Func, 0},
+ {"ParseSetCookie", Func, 23},
{"ParseTime", Func, 1},
{"Post", Func, 0},
{"PostForm", Func, 0},
@@ -8252,6 +8313,7 @@ var PackageSymbols = map[string][]Symbol{
{"Request.Host", Field, 0},
{"Request.Method", Field, 0},
{"Request.MultipartForm", Field, 0},
+ {"Request.Pattern", Field, 23},
{"Request.PostForm", Field, 1},
{"Request.Proto", Field, 0},
{"Request.ProtoMajor", Field, 0},
@@ -8453,6 +8515,7 @@ var PackageSymbols = map[string][]Symbol{
{"DefaultRemoteAddr", Const, 0},
{"NewRecorder", Func, 0},
{"NewRequest", Func, 7},
+ {"NewRequestWithContext", Func, 23},
{"NewServer", Func, 0},
{"NewTLSServer", Func, 0},
{"NewUnstartedServer", Func, 0},
@@ -8917,6 +8980,7 @@ var PackageSymbols = map[string][]Symbol{
{"Chown", Func, 0},
{"Chtimes", Func, 0},
{"Clearenv", Func, 0},
+ {"CopyFS", Func, 23},
{"Create", Func, 0},
{"CreateTemp", Func, 16},
{"DevNull", Const, 0},
@@ -9150,6 +9214,7 @@ var PackageSymbols = map[string][]Symbol{
{"IsLocal", Func, 20},
{"Join", Func, 0},
{"ListSeparator", Const, 0},
+ {"Localize", Func, 23},
{"Match", Func, 0},
{"Rel", Func, 0},
{"Separator", Const, 0},
@@ -9232,6 +9297,8 @@ var PackageSymbols = map[string][]Symbol{
{"(Value).Pointer", Method, 0},
{"(Value).Recv", Method, 0},
{"(Value).Send", Method, 0},
+ {"(Value).Seq", Method, 23},
+ {"(Value).Seq2", Method, 23},
{"(Value).Set", Method, 0},
{"(Value).SetBool", Method, 0},
{"(Value).SetBytes", Method, 0},
@@ -9314,6 +9381,7 @@ var PackageSymbols = map[string][]Symbol{
{"SelectSend", Const, 1},
{"SendDir", Const, 0},
{"Slice", Const, 0},
+ {"SliceAt", Func, 23},
{"SliceHeader", Type, 0},
{"SliceHeader.Cap", Field, 0},
{"SliceHeader.Data", Field, 0},
@@ -9655,6 +9723,7 @@ var PackageSymbols = map[string][]Symbol{
{"BuildSetting", Type, 18},
{"BuildSetting.Key", Field, 18},
{"BuildSetting.Value", Field, 18},
+ {"CrashOptions", Type, 23},
{"FreeOSMemory", Func, 1},
{"GCStats", Type, 1},
{"GCStats.LastGC", Field, 1},
@@ -9672,6 +9741,7 @@ var PackageSymbols = map[string][]Symbol{
{"PrintStack", Func, 0},
{"ReadBuildInfo", Func, 12},
{"ReadGCStats", Func, 1},
+ {"SetCrashOutput", Func, 23},
{"SetGCPercent", Func, 1},
{"SetMaxStack", Func, 2},
{"SetMaxThreads", Func, 2},
@@ -9742,10 +9812,15 @@ var PackageSymbols = map[string][]Symbol{
{"WithRegion", Func, 11},
},
"slices": {
+ {"All", Func, 23},
+ {"AppendSeq", Func, 23},
+ {"Backward", Func, 23},
{"BinarySearch", Func, 21},
{"BinarySearchFunc", Func, 21},
+ {"Chunk", Func, 23},
{"Clip", Func, 21},
{"Clone", Func, 21},
+ {"Collect", Func, 23},
{"Compact", Func, 21},
{"CompactFunc", Func, 21},
{"Compare", Func, 21},
@@ -9767,11 +9842,16 @@ var PackageSymbols = map[string][]Symbol{
{"MaxFunc", Func, 21},
{"Min", Func, 21},
{"MinFunc", Func, 21},
+ {"Repeat", Func, 23},
{"Replace", Func, 21},
{"Reverse", Func, 21},
{"Sort", Func, 21},
{"SortFunc", Func, 21},
{"SortStableFunc", Func, 21},
+ {"Sorted", Func, 23},
+ {"SortedFunc", Func, 23},
+ {"SortedStableFunc", Func, 23},
+ {"Values", Func, 23},
},
"sort": {
{"(Float64Slice).Len", Method, 0},
@@ -9936,10 +10016,14 @@ var PackageSymbols = map[string][]Symbol{
{"TrimSpace", Func, 0},
{"TrimSuffix", Func, 1},
},
+ "structs": {
+ {"HostLayout", Type, 23},
+ },
"sync": {
{"(*Cond).Broadcast", Method, 0},
{"(*Cond).Signal", Method, 0},
{"(*Cond).Wait", Method, 0},
+ {"(*Map).Clear", Method, 23},
{"(*Map).CompareAndDelete", Method, 20},
{"(*Map).CompareAndSwap", Method, 20},
{"(*Map).Delete", Method, 9},
@@ -9986,13 +10070,17 @@ var PackageSymbols = map[string][]Symbol{
{"(*Bool).Store", Method, 19},
{"(*Bool).Swap", Method, 19},
{"(*Int32).Add", Method, 19},
+ {"(*Int32).And", Method, 23},
{"(*Int32).CompareAndSwap", Method, 19},
{"(*Int32).Load", Method, 19},
+ {"(*Int32).Or", Method, 23},
{"(*Int32).Store", Method, 19},
{"(*Int32).Swap", Method, 19},
{"(*Int64).Add", Method, 19},
+ {"(*Int64).And", Method, 23},
{"(*Int64).CompareAndSwap", Method, 19},
{"(*Int64).Load", Method, 19},
+ {"(*Int64).Or", Method, 23},
{"(*Int64).Store", Method, 19},
{"(*Int64).Swap", Method, 19},
{"(*Pointer).CompareAndSwap", Method, 19},
@@ -10000,18 +10088,24 @@ var PackageSymbols = map[string][]Symbol{
{"(*Pointer).Store", Method, 19},
{"(*Pointer).Swap", Method, 19},
{"(*Uint32).Add", Method, 19},
+ {"(*Uint32).And", Method, 23},
{"(*Uint32).CompareAndSwap", Method, 19},
{"(*Uint32).Load", Method, 19},
+ {"(*Uint32).Or", Method, 23},
{"(*Uint32).Store", Method, 19},
{"(*Uint32).Swap", Method, 19},
{"(*Uint64).Add", Method, 19},
+ {"(*Uint64).And", Method, 23},
{"(*Uint64).CompareAndSwap", Method, 19},
{"(*Uint64).Load", Method, 19},
+ {"(*Uint64).Or", Method, 23},
{"(*Uint64).Store", Method, 19},
{"(*Uint64).Swap", Method, 19},
{"(*Uintptr).Add", Method, 19},
+ {"(*Uintptr).And", Method, 23},
{"(*Uintptr).CompareAndSwap", Method, 19},
{"(*Uintptr).Load", Method, 19},
+ {"(*Uintptr).Or", Method, 23},
{"(*Uintptr).Store", Method, 19},
{"(*Uintptr).Swap", Method, 19},
{"(*Value).CompareAndSwap", Method, 17},
@@ -10023,6 +10117,11 @@ var PackageSymbols = map[string][]Symbol{
{"AddUint32", Func, 0},
{"AddUint64", Func, 0},
{"AddUintptr", Func, 0},
+ {"AndInt32", Func, 23},
+ {"AndInt64", Func, 23},
+ {"AndUint32", Func, 23},
+ {"AndUint64", Func, 23},
+ {"AndUintptr", Func, 23},
{"Bool", Type, 19},
{"CompareAndSwapInt32", Func, 0},
{"CompareAndSwapInt64", Func, 0},
@@ -10038,6 +10137,11 @@ var PackageSymbols = map[string][]Symbol{
{"LoadUint32", Func, 0},
{"LoadUint64", Func, 0},
{"LoadUintptr", Func, 0},
+ {"OrInt32", Func, 23},
+ {"OrInt64", Func, 23},
+ {"OrUint32", Func, 23},
+ {"OrUint64", Func, 23},
+ {"OrUintptr", Func, 23},
{"Pointer", Type, 19},
{"StoreInt32", Func, 0},
{"StoreInt64", Func, 0},
@@ -16200,6 +16304,7 @@ var PackageSymbols = map[string][]Symbol{
{"WSAEACCES", Const, 2},
{"WSAECONNABORTED", Const, 9},
{"WSAECONNRESET", Const, 3},
+ {"WSAENOPROTOOPT", Const, 23},
{"WSAEnumProtocols", Func, 2},
{"WSAID_CONNECTEX", Var, 1},
{"WSAIoctl", Func, 0},
@@ -17284,6 +17389,7 @@ var PackageSymbols = map[string][]Symbol{
{"Encode", Func, 0},
{"EncodeRune", Func, 0},
{"IsSurrogate", Func, 0},
+ {"RuneLen", Func, 23},
},
"unicode/utf8": {
{"AppendRune", Func, 18},
@@ -17306,6 +17412,11 @@ var PackageSymbols = map[string][]Symbol{
{"ValidRune", Func, 1},
{"ValidString", Func, 0},
},
+ "unique": {
+ {"(Handle).Value", Method, 23},
+ {"Handle", Type, 23},
+ {"Make", Func, 23},
+ },
"unsafe": {
{"Add", Func, 0},
{"Alignof", Func, 0},
diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md
index ab0fbb79b..b572707c6 100644
--- a/vendor/google.golang.org/grpc/README.md
+++ b/vendor/google.golang.org/grpc/README.md
@@ -10,7 +10,7 @@ RPC framework that puts mobile and HTTP/2 first. For more information see the
## Prerequisites
-- **[Go][]**: any one of the **three latest major** [releases][go-releases].
+- **[Go][]**: any one of the **two latest major** [releases][go-releases].
## Installation
diff --git a/vendor/google.golang.org/grpc/pickfirst.go b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
similarity index 89%
rename from vendor/google.golang.org/grpc/pickfirst.go
rename to vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
index 885362661..07527603f 100644
--- a/vendor/google.golang.org/grpc/pickfirst.go
+++ b/vendor/google.golang.org/grpc/balancer/pickfirst/pickfirst.go
@@ -16,26 +16,36 @@
*
*/
-package grpc
+// Package pickfirst contains the pick_first load balancing policy.
+package pickfirst
import (
"encoding/json"
"errors"
"fmt"
+ "math/rand"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/grpclog"
+ "google.golang.org/grpc/internal"
internalgrpclog "google.golang.org/grpc/internal/grpclog"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/pretty"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
)
+func init() {
+ balancer.Register(pickfirstBuilder{})
+ internal.ShuffleAddressListForTesting = func(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
+}
+
+var logger = grpclog.Component("pick-first-lb")
+
const (
- // PickFirstBalancerName is the name of the pick_first balancer.
- PickFirstBalancerName = "pick_first"
- logPrefix = "[pick-first-lb %p] "
+ // Name is the name of the pick_first balancer.
+ Name = "pick_first"
+ logPrefix = "[pick-first-lb %p] "
)
type pickfirstBuilder struct{}
@@ -47,7 +57,7 @@ func (pickfirstBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
}
func (pickfirstBuilder) Name() string {
- return PickFirstBalancerName
+ return Name
}
type pfConfig struct {
@@ -93,6 +103,12 @@ func (b *pickfirstBalancer) ResolverError(err error) {
})
}
+type Shuffler interface {
+ ShuffleAddressListForTesting(n int, swap func(i, j int))
+}
+
+func ShuffleAddressListForTesting(n int, swap func(i, j int)) { rand.Shuffle(n, swap) }
+
func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState) error {
if len(state.ResolverState.Addresses) == 0 && len(state.ResolverState.Endpoints) == 0 {
// The resolver reported an empty address list. Treat it like an error by
@@ -124,7 +140,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
// within each endpoint. - A61
if cfg.ShuffleAddressList {
endpoints = append([]resolver.Endpoint{}, endpoints...)
- grpcrand.Shuffle(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
+ internal.ShuffleAddressListForTesting.(func(int, func(int, int)))(len(endpoints), func(i, j int) { endpoints[i], endpoints[j] = endpoints[j], endpoints[i] })
}
// "Flatten the list by concatenating the ordered list of addresses for each
@@ -145,7 +161,7 @@ func (b *pickfirstBalancer) UpdateClientConnState(state balancer.ClientConnState
addrs = state.ResolverState.Addresses
if cfg.ShuffleAddressList {
addrs = append([]resolver.Address{}, addrs...)
- grpcrand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
+ rand.Shuffle(len(addrs), func(i, j int) { addrs[i], addrs[j] = addrs[j], addrs[i] })
}
}
diff --git a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
index f7031ad22..260255d31 100644
--- a/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
+++ b/vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
@@ -22,12 +22,12 @@
package roundrobin
import (
+ "math/rand"
"sync/atomic"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
"google.golang.org/grpc/grpclog"
- "google.golang.org/grpc/internal/grpcrand"
)
// Name is the name of round_robin balancer.
@@ -60,7 +60,7 @@ func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
// Start at a random index, as the same RR balancer rebuilds a new
// picker when SubConn states change, and we don't want to apply excess
// load to the first server in the list.
- next: uint32(grpcrand.Intn(len(scs))),
+ next: uint32(rand.Intn(len(scs))),
}
}
diff --git a/vendor/google.golang.org/grpc/balancer_wrapper.go b/vendor/google.golang.org/grpc/balancer_wrapper.go
index af39b8a4c..4161fdf47 100644
--- a/vendor/google.golang.org/grpc/balancer_wrapper.go
+++ b/vendor/google.golang.org/grpc/balancer_wrapper.go
@@ -198,6 +198,10 @@ func (ccb *ccBalancerWrapper) UpdateAddresses(sc balancer.SubConn, addrs []resol
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
ccb.cc.mu.Lock()
defer ccb.cc.mu.Unlock()
+ if ccb.cc.conns == nil {
+ // The CC has been closed; ignore this update.
+ return
+ }
ccb.mu.Lock()
if ccb.closed {
diff --git a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
index 1afb1e84a..63c639e4f 100644
--- a/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
+++ b/vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
@@ -18,7 +18,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
+// protoc-gen-go v1.34.1
// protoc v4.25.2
// source: grpc/binlog/v1/binarylog.proto
diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go
index 2359f94b8..423be7b43 100644
--- a/vendor/google.golang.org/grpc/clientconn.go
+++ b/vendor/google.golang.org/grpc/clientconn.go
@@ -31,6 +31,7 @@ import (
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/balancer/base"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/connectivity"
"google.golang.org/grpc/internal"
@@ -72,6 +73,8 @@ var (
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
// service config.
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
+ // PickFirstBalancerName is the name of the pick_first balancer.
+ PickFirstBalancerName = pickfirst.Name
)
// The following errors are returned from Dial and DialContext
@@ -152,6 +155,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
for _, opt := range opts {
opt.apply(&cc.dopts)
}
+
+ // Determine the resolver to use.
+ if err := cc.initParsedTargetAndResolverBuilder(); err != nil {
+ return nil, err
+ }
+
+ for _, opt := range globalPerTargetDialOptions {
+ opt.DialOptionForTarget(cc.parsedTarget.URL).apply(&cc.dopts)
+ }
+
chainUnaryClientInterceptors(cc)
chainStreamClientInterceptors(cc)
@@ -160,7 +173,7 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
}
if cc.dopts.defaultServiceConfigRawJSON != nil {
- scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON)
+ scpr := parseServiceConfig(*cc.dopts.defaultServiceConfigRawJSON, cc.dopts.maxCallAttempts)
if scpr.Err != nil {
return nil, fmt.Errorf("%s: %v", invalidDefaultServiceConfigErrPrefix, scpr.Err)
}
@@ -168,25 +181,16 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error)
}
cc.mkp = cc.dopts.copts.KeepaliveParams
- // Register ClientConn with channelz.
- cc.channelzRegistration(target)
-
- // TODO: Ideally it should be impossible to error from this function after
- // channelz registration. This will require removing some channelz logs
- // from the following functions that can error. Errors can be returned to
- // the user, and successful logs can be emitted here, after the checks have
- // passed and channelz is subsequently registered.
-
- // Determine the resolver to use.
- if err := cc.parseTargetAndFindResolver(); err != nil {
- channelz.RemoveEntry(cc.channelz.ID)
- return nil, err
- }
- if err = cc.determineAuthority(); err != nil {
- channelz.RemoveEntry(cc.channelz.ID)
+ if err = cc.initAuthority(); err != nil {
return nil, err
}
+ // Register ClientConn with channelz. Note that this is only done after
+ // channel creation cannot fail.
+ cc.channelzRegistration(target)
+ channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", cc.parsedTarget)
+ channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
+
cc.csMgr = newConnectivityStateManager(cc.ctx, cc.channelz)
cc.pickerWrapper = newPickerWrapper(cc.dopts.copts.StatsHandlers)
@@ -587,11 +591,11 @@ type ClientConn struct {
// The following are initialized at dial time, and are read-only after that.
target string // User's dial target.
- parsedTarget resolver.Target // See parseTargetAndFindResolver().
- authority string // See determineAuthority().
+ parsedTarget resolver.Target // See initParsedTargetAndResolverBuilder().
+ authority string // See initAuthority().
dopts dialOptions // Default and user specified dial options.
channelz *channelz.Channel // Channelz object.
- resolverBuilder resolver.Builder // See parseTargetAndFindResolver().
+ resolverBuilder resolver.Builder // See initParsedTargetAndResolverBuilder().
idlenessMgr *idle.Manager
// The following provide their own synchronization, and therefore don't
@@ -692,8 +696,7 @@ func (cc *ClientConn) waitForResolvedAddrs(ctx context.Context) error {
var emptyServiceConfig *ServiceConfig
func init() {
- balancer.Register(pickfirstBuilder{})
- cfg := parseServiceConfig("{}")
+ cfg := parseServiceConfig("{}", defaultMaxCallAttempts)
if cfg.Err != nil {
panic(fmt.Sprintf("impossible error parsing empty service config: %v", cfg.Err))
}
@@ -1673,22 +1676,19 @@ func (cc *ClientConn) connectionError() error {
return cc.lastConnectionError
}
-// parseTargetAndFindResolver parses the user's dial target and stores the
-// parsed target in `cc.parsedTarget`.
+// initParsedTargetAndResolverBuilder parses the user's dial target and stores
+// the parsed target in `cc.parsedTarget`.
//
// The resolver to use is determined based on the scheme in the parsed target
// and the same is stored in `cc.resolverBuilder`.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) parseTargetAndFindResolver() error {
- channelz.Infof(logger, cc.channelz, "original dial target is: %q", cc.target)
+func (cc *ClientConn) initParsedTargetAndResolverBuilder() error {
+ logger.Infof("original dial target is: %q", cc.target)
var rb resolver.Builder
parsedTarget, err := parseTarget(cc.target)
- if err != nil {
- channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", cc.target, err)
- } else {
- channelz.Infof(logger, cc.channelz, "parsed dial target is: %#v", parsedTarget)
+ if err == nil {
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb != nil {
cc.parsedTarget = parsedTarget
@@ -1707,15 +1707,12 @@ func (cc *ClientConn) parseTargetAndFindResolver() error {
defScheme = resolver.GetDefaultScheme()
}
- channelz.Infof(logger, cc.channelz, "fallback to scheme %q", defScheme)
canonicalTarget := defScheme + ":///" + cc.target
parsedTarget, err = parseTarget(canonicalTarget)
if err != nil {
- channelz.Infof(logger, cc.channelz, "dial target %q parse failed: %v", canonicalTarget, err)
return err
}
- channelz.Infof(logger, cc.channelz, "parsed dial target is: %+v", parsedTarget)
rb = cc.getResolver(parsedTarget.URL.Scheme)
if rb == nil {
return fmt.Errorf("could not get resolver for default scheme: %q", parsedTarget.URL.Scheme)
@@ -1805,7 +1802,7 @@ func encodeAuthority(authority string) string {
// credentials do not match the authority configured through the dial option.
//
// Doesn't grab cc.mu as this method is expected to be called only at Dial time.
-func (cc *ClientConn) determineAuthority() error {
+func (cc *ClientConn) initAuthority() error {
dopts := cc.dopts
// Historically, we had two options for users to specify the serverName or
// authority for a channel. One was through the transport credentials
@@ -1838,6 +1835,5 @@ func (cc *ClientConn) determineAuthority() error {
} else {
cc.authority = encodeAuthority(endpoint)
}
- channelz.Infof(logger, cc.channelz, "Channel authority set to %q", cc.authority)
return nil
}
diff --git a/vendor/google.golang.org/grpc/credentials/tls.go b/vendor/google.golang.org/grpc/credentials/tls.go
index 5dafd34ed..411435854 100644
--- a/vendor/google.golang.org/grpc/credentials/tls.go
+++ b/vendor/google.golang.org/grpc/credentials/tls.go
@@ -27,9 +27,13 @@ import (
"net/url"
"os"
+ "google.golang.org/grpc/grpclog"
credinternal "google.golang.org/grpc/internal/credentials"
+ "google.golang.org/grpc/internal/envconfig"
)
+var logger = grpclog.Component("credentials")
+
// TLSInfo contains the auth information for a TLS authenticated connection.
// It implements the AuthInfo interface.
type TLSInfo struct {
@@ -112,6 +116,22 @@ func (c *tlsCreds) ClientHandshake(ctx context.Context, authority string, rawCon
conn.Close()
return nil, nil, ctx.Err()
}
+
+ // The negotiated protocol can be either of the following:
+ // 1. h2: When the server supports ALPN. Only HTTP/2 can be negotiated since
+ // it is the only protocol advertised by the client during the handshake.
+ // The tls library ensures that the server chooses a protocol advertised
+ // by the client.
+ // 2. "" (empty string): If the server doesn't support ALPN. ALPN is a requirement
+ // for using HTTP/2 over TLS. We can terminate the connection immediately.
+ np := conn.ConnectionState().NegotiatedProtocol
+ if np == "" {
+ if envconfig.EnforceALPNEnabled {
+ conn.Close()
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
+ }
+ logger.Warningf("Allowing TLS connection to server %q with ALPN disabled. TLS connections to servers with ALPN disabled will be disallowed in future grpc-go releases", cfg.ServerName)
+ }
tlsInfo := TLSInfo{
State: conn.ConnectionState(),
CommonAuthInfo: CommonAuthInfo{
@@ -131,8 +151,20 @@ func (c *tlsCreds) ServerHandshake(rawConn net.Conn) (net.Conn, AuthInfo, error)
conn.Close()
return nil, nil, err
}
+ cs := conn.ConnectionState()
+ // The negotiated application protocol can be empty only if the client doesn't
+ // support ALPN. In such cases, we can close the connection since ALPN is required
+ // for using HTTP/2 over TLS.
+ if cs.NegotiatedProtocol == "" {
+ if envconfig.EnforceALPNEnabled {
+ conn.Close()
+ return nil, nil, fmt.Errorf("credentials: cannot check peer: missing selected ALPN property")
+ } else if logger.V(2) {
+ logger.Info("Allowing TLS connection from client with ALPN disabled. TLS connections with ALPN disabled will be disallowed in future grpc-go releases")
+ }
+ }
tlsInfo := TLSInfo{
- State: conn.ConnectionState(),
+ State: cs,
CommonAuthInfo: CommonAuthInfo{
SecurityLevel: PrivacyAndIntegrity,
},
diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go
index 00273702b..f5453d48a 100644
--- a/vendor/google.golang.org/grpc/dialoptions.go
+++ b/vendor/google.golang.org/grpc/dialoptions.go
@@ -21,6 +21,7 @@ package grpc
import (
"context"
"net"
+ "net/url"
"time"
"google.golang.org/grpc/backoff"
@@ -36,6 +37,11 @@ import (
"google.golang.org/grpc/stats"
)
+const (
+ // https://github.com/grpc/proposal/blob/master/A6-client-retries.md#limits-on-retries-and-hedges
+ defaultMaxCallAttempts = 5
+)
+
func init() {
internal.AddGlobalDialOptions = func(opt ...DialOption) {
globalDialOptions = append(globalDialOptions, opt...)
@@ -43,6 +49,14 @@ func init() {
internal.ClearGlobalDialOptions = func() {
globalDialOptions = nil
}
+ internal.AddGlobalPerTargetDialOptions = func(opt any) {
+ if ptdo, ok := opt.(perTargetDialOption); ok {
+ globalPerTargetDialOptions = append(globalPerTargetDialOptions, ptdo)
+ }
+ }
+ internal.ClearGlobalPerTargetDialOptions = func() {
+ globalPerTargetDialOptions = nil
+ }
internal.WithBinaryLogger = withBinaryLogger
internal.JoinDialOptions = newJoinDialOption
internal.DisableGlobalDialOptions = newDisableGlobalDialOptions
@@ -80,6 +94,7 @@ type dialOptions struct {
idleTimeout time.Duration
recvBufferPool SharedBufferPool
defaultScheme string
+ maxCallAttempts int
}
// DialOption configures how we set up the connection.
@@ -89,6 +104,19 @@ type DialOption interface {
var globalDialOptions []DialOption
+// perTargetDialOption takes a parsed target and returns a dial option to apply.
+//
+// This gets called after NewClient() parses the target, and allows per target
+// configuration set through a returned DialOption. The DialOption will not take
+// effect if specifies a resolver builder, as that Dial Option is factored in
+// while parsing target.
+type perTargetDialOption interface {
+ // DialOption returns a Dial Option to apply.
+ DialOptionForTarget(parsedTarget url.URL) DialOption
+}
+
+var globalPerTargetDialOptions []perTargetDialOption
+
// EmptyDialOption does not alter the dial configuration. It can be embedded in
// another structure to build custom dial options.
//
@@ -655,6 +683,7 @@ func defaultDialOptions() dialOptions {
idleTimeout: 30 * time.Minute,
recvBufferPool: nopBufferPool{},
defaultScheme: "dns",
+ maxCallAttempts: defaultMaxCallAttempts,
}
}
@@ -712,6 +741,23 @@ func WithIdleTimeout(d time.Duration) DialOption {
})
}
+// WithMaxCallAttempts returns a DialOption that configures the maximum number
+// of attempts per call (including retries and hedging) using the channel.
+// Service owners may specify a higher value for these parameters, but higher
+// values will be treated as equal to the maximum value by the client
+// implementation. This mitigates security concerns related to the service
+// config being transferred to the client via DNS.
+//
+// A value of 5 will be used if this dial option is not set or n < 2.
+func WithMaxCallAttempts(n int) DialOption {
+ return newFuncDialOption(func(o *dialOptions) {
+ if n < 2 {
+ n = defaultMaxCallAttempts
+ }
+ o.maxCallAttempts = n
+ })
+}
+
// WithRecvBufferPool returns a DialOption that configures the ClientConn
// to use the provided shared buffer pool for parsing incoming messages. Depending
// on the application's workload, this could result in reduced memory allocation.
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
index 6a93475a7..38b883507 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.33.0
+// protoc-gen-go v1.34.1
// protoc v4.25.2
// source: grpc/health/v1/health.proto
diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
index 8f793e6e8..51b736ba0 100644
--- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
+++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go
@@ -17,7 +17,7 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.3.0
+// - protoc-gen-go-grpc v1.4.0
// - protoc v4.25.2
// source: grpc/health/v1/health.proto
@@ -43,6 +43,10 @@ const (
// HealthClient is the client API for Health service.
//
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
+//
+// Health is gRPC's mechanism for checking whether a server is able to handle
+// RPCs. Its semantics are documented in
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
type HealthClient interface {
// Check gets the health of the specified service. If the requested service
// is unknown, the call will fail with status NOT_FOUND. If the caller does
@@ -126,6 +130,10 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) {
// HealthServer is the server API for Health service.
// All implementations should embed UnimplementedHealthServer
// for forward compatibility
+//
+// Health is gRPC's mechanism for checking whether a server is able to handle
+// RPCs. Its semantics are documented in
+// https://github.com/grpc/grpc/blob/master/doc/health-checking.md.
type HealthServer interface {
// Check gets the health of the specified service. If the requested service
// is unknown, the call will fail with status NOT_FOUND. If the caller does
diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
index fed1c011a..b15cf482d 100644
--- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go
+++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go
@@ -25,10 +25,10 @@ package backoff
import (
"context"
"errors"
+ "math/rand"
"time"
grpcbackoff "google.golang.org/grpc/backoff"
- "google.golang.org/grpc/internal/grpcrand"
)
// Strategy defines the methodology for backing off after a grpc connection
@@ -67,7 +67,7 @@ func (bc Exponential) Backoff(retries int) time.Duration {
}
// Randomize backoff delays so that if a cluster of requests start at
// the same time, they won't operate in lockstep.
- backoff *= 1 + bc.Config.Jitter*(grpcrand.Float64()*2-1)
+ backoff *= 1 + bc.Config.Jitter*(rand.Float64()*2-1)
if backoff < 0 {
return 0
}
diff --git a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
index 9c915d9e4..d90648713 100644
--- a/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
+++ b/vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
@@ -40,6 +40,12 @@ var (
// ALTSMaxConcurrentHandshakes is the maximum number of concurrent ALTS
// handshakes that can be performed.
ALTSMaxConcurrentHandshakes = uint64FromEnv("GRPC_ALTS_MAX_CONCURRENT_HANDSHAKES", 100, 1, 100)
+ // EnforceALPNEnabled is set if TLS connections to servers with ALPN disabled
+ // should be rejected. The HTTP/2 protocol requires ALPN to be enabled, this
+ // option is present for backward compatibility. This option may be overridden
+ // by setting the environment variable "GRPC_ENFORCE_ALPN_ENABLED" to "true"
+ // or "false".
+ EnforceALPNEnabled = boolFromEnv("GRPC_ENFORCE_ALPN_ENABLED", false)
)
func boolFromEnv(envVar string, def bool) bool {
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
deleted file mode 100644
index 0126d6b51..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand.go
+++ /dev/null
@@ -1,100 +0,0 @@
-//go:build !go1.21
-
-// TODO: when this file is deleted (after Go 1.20 support is dropped), delete
-// all of grpcrand and call the rand package directly.
-
-/*
- *
- * Copyright 2018 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import (
- "math/rand"
- "sync"
- "time"
-)
-
-var (
- r = rand.New(rand.NewSource(time.Now().UnixNano()))
- mu sync.Mutex
-)
-
-// Int implements rand.Int on the grpcrand global source.
-func Int() int {
- mu.Lock()
- defer mu.Unlock()
- return r.Int()
-}
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Int63n(n)
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
- mu.Lock()
- defer mu.Unlock()
- return r.Intn(n)
-}
-
-// Int31n implements rand.Int31n on the grpcrand global source.
-func Int31n(n int32) int32 {
- mu.Lock()
- defer mu.Unlock()
- return r.Int31n(n)
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Float64()
-}
-
-// Uint64 implements rand.Uint64 on the grpcrand global source.
-func Uint64() uint64 {
- mu.Lock()
- defer mu.Unlock()
- return r.Uint64()
-}
-
-// Uint32 implements rand.Uint32 on the grpcrand global source.
-func Uint32() uint32 {
- mu.Lock()
- defer mu.Unlock()
- return r.Uint32()
-}
-
-// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
-func ExpFloat64() float64 {
- mu.Lock()
- defer mu.Unlock()
- return r.ExpFloat64()
-}
-
-// Shuffle implements rand.Shuffle on the grpcrand global source.
-var Shuffle = func(n int, f func(int, int)) {
- mu.Lock()
- defer mu.Unlock()
- r.Shuffle(n, f)
-}
diff --git a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go b/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go
deleted file mode 100644
index c37299af1..000000000
--- a/vendor/google.golang.org/grpc/internal/grpcrand/grpcrand_go1.21.go
+++ /dev/null
@@ -1,73 +0,0 @@
-//go:build go1.21
-
-/*
- *
- * Copyright 2024 gRPC authors.
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- *
- */
-
-// Package grpcrand implements math/rand functions in a concurrent-safe way
-// with a global random source, independent of math/rand's global source.
-package grpcrand
-
-import "math/rand"
-
-// This implementation will be used for Go version 1.21 or newer.
-// For older versions, the original implementation with mutex will be used.
-
-// Int implements rand.Int on the grpcrand global source.
-func Int() int {
- return rand.Int()
-}
-
-// Int63n implements rand.Int63n on the grpcrand global source.
-func Int63n(n int64) int64 {
- return rand.Int63n(n)
-}
-
-// Intn implements rand.Intn on the grpcrand global source.
-func Intn(n int) int {
- return rand.Intn(n)
-}
-
-// Int31n implements rand.Int31n on the grpcrand global source.
-func Int31n(n int32) int32 {
- return rand.Int31n(n)
-}
-
-// Float64 implements rand.Float64 on the grpcrand global source.
-func Float64() float64 {
- return rand.Float64()
-}
-
-// Uint64 implements rand.Uint64 on the grpcrand global source.
-func Uint64() uint64 {
- return rand.Uint64()
-}
-
-// Uint32 implements rand.Uint32 on the grpcrand global source.
-func Uint32() uint32 {
- return rand.Uint32()
-}
-
-// ExpFloat64 implements rand.ExpFloat64 on the grpcrand global source.
-func ExpFloat64() float64 {
- return rand.ExpFloat64()
-}
-
-// Shuffle implements rand.Shuffle on the grpcrand global source.
-var Shuffle = func(n int, f func(int, int)) {
- rand.Shuffle(n, f)
-}
diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go
index 48d24bdb4..5d6653986 100644
--- a/vendor/google.golang.org/grpc/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/internal.go
@@ -106,6 +106,14 @@ var (
// This is used in the 1.0 release of gcp/observability, and thus must not be
// deleted or changed.
ClearGlobalDialOptions func()
+
+ // AddGlobalPerTargetDialOptions adds a PerTargetDialOption that will be
+ // configured for newly created ClientConns.
+ AddGlobalPerTargetDialOptions any // func (opt any)
+ // ClearGlobalPerTargetDialOptions clears the slice of global late apply
+ // dial options.
+ ClearGlobalPerTargetDialOptions func()
+
// JoinDialOptions combines the dial options passed as arguments into a
// single dial option.
JoinDialOptions any // func(...grpc.DialOption) grpc.DialOption
@@ -126,7 +134,8 @@ var (
// deleted or changed.
BinaryLogger any // func(binarylog.Logger) grpc.ServerOption
- // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a provided grpc.ClientConn
+ // SubscribeToConnectivityStateChanges adds a grpcsync.Subscriber to a
+ // provided grpc.ClientConn.
SubscribeToConnectivityStateChanges any // func(*grpc.ClientConn, grpcsync.Subscriber)
// NewXDSResolverWithConfigForTesting creates a new xds resolver builder using
@@ -184,25 +193,25 @@ var (
ChannelzTurnOffForTesting func()
- // TriggerXDSResourceNameNotFoundForTesting triggers the resource-not-found
- // error for a given resource type and name. This is usually triggered when
- // the associated watch timer fires. For testing purposes, having this
- // function makes events more predictable than relying on timer events.
- TriggerXDSResourceNameNotFoundForTesting any // func(func(xdsresource.Type, string), string, string) error
+ // TriggerXDSResourceNotFoundForTesting causes the provided xDS Client to
+ // invoke resource-not-found error for the given resource type and name.
+ TriggerXDSResourceNotFoundForTesting any // func(xdsclient.XDSClient, xdsresource.Type, string) error
- // TriggerXDSResourceNameNotFoundClient invokes the testing xDS Client
- // singleton to invoke resource not found for a resource type name and
- // resource name.
- TriggerXDSResourceNameNotFoundClient any // func(string, string) error
-
- // FromOutgoingContextRaw returns the un-merged, intermediary contents of metadata.rawMD.
+ // FromOutgoingContextRaw returns the un-merged, intermediary contents of
+ // metadata.rawMD.
FromOutgoingContextRaw any // func(context.Context) (metadata.MD, [][]string, bool)
- // UserSetDefaultScheme is set to true if the user has overridden the default resolver scheme.
+ // UserSetDefaultScheme is set to true if the user has overridden the
+ // default resolver scheme.
UserSetDefaultScheme bool = false
+
+ // ShuffleAddressListForTesting pseudo-randomizes the order of addresses. n
+ // is the number of elements. swap swaps the elements with indexes i and j.
+ ShuffleAddressListForTesting any // func(n int, swap func(i, j int))
)
-// HealthChecker defines the signature of the client-side LB channel health checking function.
+// HealthChecker defines the signature of the client-side LB channel health
+// checking function.
//
// The implementation is expected to create a health checking RPC stream by
// calling newStream(), watch for the health status of serviceName, and report
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
index f3f52a59a..4552db16b 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
@@ -24,6 +24,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "math/rand"
"net"
"os"
"strconv"
@@ -35,7 +36,6 @@ import (
"google.golang.org/grpc/grpclog"
"google.golang.org/grpc/internal/backoff"
"google.golang.org/grpc/internal/envconfig"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/resolver/dns/internal"
"google.golang.org/grpc/resolver"
"google.golang.org/grpc/serviceconfig"
@@ -63,6 +63,8 @@ var (
func init() {
resolver.Register(NewBuilder())
internal.TimeAfterFunc = time.After
+ internal.TimeNowFunc = time.Now
+ internal.TimeUntilFunc = time.Until
internal.NewNetResolver = newNetResolver
internal.AddressDialer = addressDialer
}
@@ -209,12 +211,12 @@ func (d *dnsResolver) watcher() {
err = d.cc.UpdateState(*state)
}
- var waitTime time.Duration
+ var nextResolutionTime time.Time
if err == nil {
// Success resolving, wait for the next ResolveNow. However, also wait 30
// seconds at the very least to prevent constantly re-resolving.
backoffIndex = 1
- waitTime = MinResolutionInterval
+ nextResolutionTime = internal.TimeNowFunc().Add(MinResolutionInterval)
select {
case <-d.ctx.Done():
return
@@ -223,13 +225,13 @@ func (d *dnsResolver) watcher() {
} else {
// Poll on an error found in DNS Resolver or an error received from
// ClientConn.
- waitTime = backoff.DefaultExponential.Backoff(backoffIndex)
+ nextResolutionTime = internal.TimeNowFunc().Add(backoff.DefaultExponential.Backoff(backoffIndex))
backoffIndex++
}
select {
case <-d.ctx.Done():
return
- case <-internal.TimeAfterFunc(waitTime):
+ case <-internal.TimeAfterFunc(internal.TimeUntilFunc(nextResolutionTime)):
}
}
}
@@ -423,7 +425,7 @@ func chosenByPercentage(a *int) bool {
if a == nil {
return true
}
- return grpcrand.Intn(100)+1 <= *a
+ return rand.Intn(100)+1 <= *a
}
func canaryingSC(js string) string {
diff --git a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
index a7ecaf8d5..c0eae4f5f 100644
--- a/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
+++ b/vendor/google.golang.org/grpc/internal/resolver/dns/internal/internal.go
@@ -51,11 +51,22 @@ var (
// The following vars are overridden from tests.
var (
// TimeAfterFunc is used by the DNS resolver to wait for the given duration
- // to elapse. In non-test code, this is implemented by time.After. In test
+ // to elapse. In non-test code, this is implemented by time.After. In test
// code, this can be used to control the amount of time the resolver is
// blocked waiting for the duration to elapse.
TimeAfterFunc func(time.Duration) <-chan time.Time
+ // TimeNowFunc is used by the DNS resolver to get the current time.
+ // In non-test code, this is implemented by time.Now. In test code,
+ // this can be used to control the current time for the resolver.
+ TimeNowFunc func() time.Time
+
+ // TimeUntilFunc is used by the DNS resolver to calculate the remaining
+ // wait time for re-resolution. In non-test code, this is implemented by
+ // time.Until. In test code, this can be used to control the remaining
+ // time for resolver to wait for re-resolution.
+ TimeUntilFunc func(time.Time) time.Duration
+
// NewNetResolver returns the net.Resolver instance for the given target.
NewNetResolver func(string) (NetResolver, error)
diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
index cab0e2d3d..b7091165b 100644
--- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go
+++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go
@@ -25,6 +25,7 @@ import (
"fmt"
"io"
"math"
+ "math/rand"
"net"
"net/http"
"strconv"
@@ -43,7 +44,6 @@ import (
"google.golang.org/grpc/codes"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcsync"
"google.golang.org/grpc/keepalive"
"google.golang.org/grpc/metadata"
@@ -1440,7 +1440,7 @@ func getJitter(v time.Duration) time.Duration {
}
// Generate a jitter between +/- 10% of the value.
r := int64(v / 10)
- j := grpcrand.Int63n(2*r) - r
+ j := rand.Int63n(2*r) - r
return time.Duration(j)
}
diff --git a/vendor/google.golang.org/grpc/metadata/metadata.go b/vendor/google.golang.org/grpc/metadata/metadata.go
index 6c01a9b35..1e9485fd6 100644
--- a/vendor/google.golang.org/grpc/metadata/metadata.go
+++ b/vendor/google.golang.org/grpc/metadata/metadata.go
@@ -90,21 +90,6 @@ func Pairs(kv ...string) MD {
return md
}
-// String implements the Stringer interface for pretty-printing a MD.
-// Ordering of the values is non-deterministic as it ranges over a map.
-func (md MD) String() string {
- var sb strings.Builder
- fmt.Fprintf(&sb, "MD{")
- for k, v := range md {
- if sb.Len() > 3 {
- fmt.Fprintf(&sb, ", ")
- }
- fmt.Fprintf(&sb, "%s=[%s]", k, strings.Join(v, ", "))
- }
- fmt.Fprintf(&sb, "}")
- return sb.String()
-}
-
// Len returns the number of items in md.
func (md MD) Len() int {
return len(md)
diff --git a/vendor/google.golang.org/grpc/picker_wrapper.go b/vendor/google.golang.org/grpc/picker_wrapper.go
index 56e8aba78..bdaa2130e 100644
--- a/vendor/google.golang.org/grpc/picker_wrapper.go
+++ b/vendor/google.golang.org/grpc/picker_wrapper.go
@@ -22,7 +22,7 @@ import (
"context"
"fmt"
"io"
- "sync"
+ "sync/atomic"
"google.golang.org/grpc/balancer"
"google.golang.org/grpc/codes"
@@ -33,35 +33,43 @@ import (
"google.golang.org/grpc/status"
)
+// pickerGeneration stores a picker and a channel used to signal that a picker
+// newer than this one is available.
+type pickerGeneration struct {
+ // picker is the picker produced by the LB policy. May be nil if a picker
+ // has never been produced.
+ picker balancer.Picker
+ // blockingCh is closed when the picker has been invalidated because there
+ // is a new one available.
+ blockingCh chan struct{}
+}
+
// pickerWrapper is a wrapper of balancer.Picker. It blocks on certain pick
// actions and unblock when there's a picker update.
type pickerWrapper struct {
- mu sync.Mutex
- done bool
- blockingCh chan struct{}
- picker balancer.Picker
+ // If pickerGen holds a nil pointer, the pickerWrapper is closed.
+ pickerGen atomic.Pointer[pickerGeneration]
statsHandlers []stats.Handler // to record blocking picker calls
}
func newPickerWrapper(statsHandlers []stats.Handler) *pickerWrapper {
- return &pickerWrapper{
- blockingCh: make(chan struct{}),
+ pw := &pickerWrapper{
statsHandlers: statsHandlers,
}
+ pw.pickerGen.Store(&pickerGeneration{
+ blockingCh: make(chan struct{}),
+ })
+ return pw
}
-// updatePicker is called by UpdateBalancerState. It unblocks all blocked pick.
+// updatePicker is called by UpdateState calls from the LB policy. It
+// unblocks all blocked pick.
func (pw *pickerWrapper) updatePicker(p balancer.Picker) {
- pw.mu.Lock()
- if pw.done {
- pw.mu.Unlock()
- return
- }
- pw.picker = p
- // pw.blockingCh should never be nil.
- close(pw.blockingCh)
- pw.blockingCh = make(chan struct{})
- pw.mu.Unlock()
+ old := pw.pickerGen.Swap(&pickerGeneration{
+ picker: p,
+ blockingCh: make(chan struct{}),
+ })
+ close(old.blockingCh)
}
// doneChannelzWrapper performs the following:
@@ -98,20 +106,17 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
var lastPickErr error
for {
- pw.mu.Lock()
- if pw.done {
- pw.mu.Unlock()
+ pg := pw.pickerGen.Load()
+ if pg == nil {
return nil, balancer.PickResult{}, ErrClientConnClosing
}
-
- if pw.picker == nil {
- ch = pw.blockingCh
+ if pg.picker == nil {
+ ch = pg.blockingCh
}
- if ch == pw.blockingCh {
+ if ch == pg.blockingCh {
// This could happen when either:
// - pw.picker is nil (the previous if condition), or
- // - has called pick on the current picker.
- pw.mu.Unlock()
+ // - we have already called pick on the current picker.
select {
case <-ctx.Done():
var errStr string
@@ -145,9 +150,8 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
}
- ch = pw.blockingCh
- p := pw.picker
- pw.mu.Unlock()
+ ch = pg.blockingCh
+ p := pg.picker
pickResult, err := p.Pick(info)
if err != nil {
@@ -197,24 +201,15 @@ func (pw *pickerWrapper) pick(ctx context.Context, failfast bool, info balancer.
}
func (pw *pickerWrapper) close() {
- pw.mu.Lock()
- defer pw.mu.Unlock()
- if pw.done {
- return
- }
- pw.done = true
- close(pw.blockingCh)
+ old := pw.pickerGen.Swap(nil)
+ close(old.blockingCh)
}
// reset clears the pickerWrapper and prepares it for being used again when idle
// mode is exited.
func (pw *pickerWrapper) reset() {
- pw.mu.Lock()
- defer pw.mu.Unlock()
- if pw.done {
- return
- }
- pw.blockingCh = make(chan struct{})
+ old := pw.pickerGen.Swap(&pickerGeneration{blockingCh: make(chan struct{})})
+ close(old.blockingCh)
}
// dropError is a wrapper error that indicates the LB policy wishes to drop the
diff --git a/vendor/google.golang.org/grpc/resolver_wrapper.go b/vendor/google.golang.org/grpc/resolver_wrapper.go
index 9dcc9780f..c5fb45236 100644
--- a/vendor/google.golang.org/grpc/resolver_wrapper.go
+++ b/vendor/google.golang.org/grpc/resolver_wrapper.go
@@ -171,7 +171,7 @@ func (ccr *ccResolverWrapper) NewAddress(addrs []resolver.Address) {
// ParseServiceConfig is called by resolver implementations to parse a JSON
// representation of the service config.
func (ccr *ccResolverWrapper) ParseServiceConfig(scJSON string) *serviceconfig.ParseResult {
- return parseServiceConfig(scJSON)
+ return parseServiceConfig(scJSON, ccr.cc.dopts.maxCallAttempts)
}
// addChannelzTraceEvent adds a channelz trace event containing the new
diff --git a/vendor/google.golang.org/grpc/service_config.go b/vendor/google.golang.org/grpc/service_config.go
index 9da8fc802..2671c5ef6 100644
--- a/vendor/google.golang.org/grpc/service_config.go
+++ b/vendor/google.golang.org/grpc/service_config.go
@@ -26,6 +26,7 @@ import (
"time"
"google.golang.org/grpc/balancer"
+ "google.golang.org/grpc/balancer/pickfirst"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/internal"
"google.golang.org/grpc/internal/balancer/gracefulswitch"
@@ -163,9 +164,11 @@ type jsonSC struct {
}
func init() {
- internal.ParseServiceConfig = parseServiceConfig
+ internal.ParseServiceConfig = func(js string) *serviceconfig.ParseResult {
+ return parseServiceConfig(js, defaultMaxCallAttempts)
+ }
}
-func parseServiceConfig(js string) *serviceconfig.ParseResult {
+func parseServiceConfig(js string, maxAttempts int) *serviceconfig.ParseResult {
if len(js) == 0 {
return &serviceconfig.ParseResult{Err: fmt.Errorf("no JSON service config provided")}
}
@@ -183,12 +186,12 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
}
c := rsc.LoadBalancingConfig
if c == nil {
- name := PickFirstBalancerName
+ name := pickfirst.Name
if rsc.LoadBalancingPolicy != nil {
name = *rsc.LoadBalancingPolicy
}
if balancer.Get(name) == nil {
- name = PickFirstBalancerName
+ name = pickfirst.Name
}
cfg := []map[string]any{{name: struct{}{}}}
strCfg, err := json.Marshal(cfg)
@@ -218,7 +221,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
WaitForReady: m.WaitForReady,
Timeout: (*time.Duration)(m.Timeout),
}
- if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy); err != nil {
+ if mc.RetryPolicy, err = convertRetryPolicy(m.RetryPolicy, maxAttempts); err != nil {
logger.Warningf("grpc: unmarshalling service config %s: %v", js, err)
return &serviceconfig.ParseResult{Err: err}
}
@@ -264,7 +267,7 @@ func parseServiceConfig(js string) *serviceconfig.ParseResult {
return &serviceconfig.ParseResult{Config: &sc}
}
-func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPolicy, err error) {
+func convertRetryPolicy(jrp *jsonRetryPolicy, maxAttempts int) (p *internalserviceconfig.RetryPolicy, err error) {
if jrp == nil {
return nil, nil
}
@@ -278,17 +281,16 @@ func convertRetryPolicy(jrp *jsonRetryPolicy) (p *internalserviceconfig.RetryPol
return nil, nil
}
+ if jrp.MaxAttempts < maxAttempts {
+ maxAttempts = jrp.MaxAttempts
+ }
rp := &internalserviceconfig.RetryPolicy{
- MaxAttempts: jrp.MaxAttempts,
+ MaxAttempts: maxAttempts,
InitialBackoff: time.Duration(jrp.InitialBackoff),
MaxBackoff: time.Duration(jrp.MaxBackoff),
BackoffMultiplier: jrp.BackoffMultiplier,
RetryableStatusCodes: make(map[codes.Code]bool),
}
- if rp.MaxAttempts > 5 {
- // TODO(retry): Make the max maxAttempts configurable.
- rp.MaxAttempts = 5
- }
for _, code := range jrp.RetryableStatusCodes {
rp.RetryableStatusCodes[code] = true
}
diff --git a/vendor/google.golang.org/grpc/stream.go b/vendor/google.golang.org/grpc/stream.go
index b54563e81..8051ef5b5 100644
--- a/vendor/google.golang.org/grpc/stream.go
+++ b/vendor/google.golang.org/grpc/stream.go
@@ -23,6 +23,7 @@ import (
"errors"
"io"
"math"
+ "math/rand"
"strconv"
"sync"
"time"
@@ -34,7 +35,6 @@ import (
"google.golang.org/grpc/internal/balancerload"
"google.golang.org/grpc/internal/binarylog"
"google.golang.org/grpc/internal/channelz"
- "google.golang.org/grpc/internal/grpcrand"
"google.golang.org/grpc/internal/grpcutil"
imetadata "google.golang.org/grpc/internal/metadata"
iresolver "google.golang.org/grpc/internal/resolver"
@@ -699,7 +699,7 @@ func (a *csAttempt) shouldRetry(err error) (bool, error) {
if max := float64(rp.MaxBackoff); cur > max {
cur = max
}
- dur = time.Duration(grpcrand.Int63n(int64(cur)))
+ dur = time.Duration(rand.Int63n(int64(cur)))
cs.numRetriesSincePushback++
}
diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go
index e1806e760..bafaef99b 100644
--- a/vendor/google.golang.org/grpc/version.go
+++ b/vendor/google.golang.org/grpc/version.go
@@ -19,4 +19,4 @@
package grpc
// Version is the current grpc version.
-const Version = "1.64.0"
+const Version = "1.65.0"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go
new file mode 100644
index 000000000..82a473bb1
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/rand/rand.go
@@ -0,0 +1,127 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package rand provides utilities related to randomization.
+package rand
+
+import (
+ "math/rand"
+ "sync"
+ "time"
+)
+
+var rng = struct {
+ sync.Mutex
+ rand *rand.Rand
+}{
+ rand: rand.New(rand.NewSource(time.Now().UnixNano())),
+}
+
+// Int returns a non-negative pseudo-random int.
+func Int() int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Int()
+}
+
+// Intn generates an integer in range [0,max).
+// By design this should panic if input is invalid, <= 0.
+func Intn(max int) int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Intn(max)
+}
+
+// IntnRange generates an integer in range [min,max).
+// By design this should panic if input is invalid, <= 0.
+func IntnRange(min, max int) int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Intn(max-min) + min
+}
+
+// IntnRange generates an int64 integer in range [min,max).
+// By design this should panic if input is invalid, <= 0.
+func Int63nRange(min, max int64) int64 {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Int63n(max-min) + min
+}
+
+// Seed seeds the rng with the provided seed.
+func Seed(seed int64) {
+ rng.Lock()
+ defer rng.Unlock()
+
+ rng.rand = rand.New(rand.NewSource(seed))
+}
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n)
+// from the default Source.
+func Perm(n int) []int {
+ rng.Lock()
+ defer rng.Unlock()
+ return rng.rand.Perm(n)
+}
+
+const (
+ // We omit vowels from the set of available characters to reduce the chances
+ // of "bad words" being formed.
+ alphanums = "bcdfghjklmnpqrstvwxz2456789"
+ // No. of bits required to index into alphanums string.
+ alphanumsIdxBits = 5
+ // Mask used to extract last alphanumsIdxBits of an int.
+ alphanumsIdxMask = 1<>= alphanumsIdxBits
+ remaining--
+ }
+ return string(b)
+}
+
+// SafeEncodeString encodes s using the same characters as rand.String. This reduces the chances of bad words and
+// ensures that strings generated from hash functions appear consistent throughout the API.
+func SafeEncodeString(s string) string {
+ r := make([]byte, len(s))
+ for i, b := range []rune(s) {
+ r[i] = alphanums[(int(b) % len(alphanums))]
+ }
+ return string(r)
+}
diff --git a/vendor/k8s.io/apiserver/LICENSE b/vendor/k8s.io/apiserver/LICENSE
new file mode 100644
index 000000000..d64569567
--- /dev/null
+++ b/vendor/k8s.io/apiserver/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/k8s.io/apiserver/pkg/storage/names/generate.go b/vendor/k8s.io/apiserver/pkg/storage/names/generate.go
new file mode 100644
index 000000000..0b8afff0e
--- /dev/null
+++ b/vendor/k8s.io/apiserver/pkg/storage/names/generate.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2014 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package names
+
+import (
+ "fmt"
+
+ utilrand "k8s.io/apimachinery/pkg/util/rand"
+)
+
+// NameGenerator generates names for objects. Some backends may have more information
+// available to guide selection of new names and this interface hides those details.
+type NameGenerator interface {
+ // GenerateName generates a valid name from the base name, adding a random suffix to
+ // the base. If base is valid, the returned name must also be valid. The generator is
+ // responsible for knowing the maximum valid name length.
+ GenerateName(base string) string
+}
+
+// simpleNameGenerator generates random names.
+type simpleNameGenerator struct{}
+
+// SimpleNameGenerator is a generator that returns the name plus a random suffix of five alphanumerics
+// when a name is requested. The string is guaranteed to not exceed the length of a standard Kubernetes
+// name (63 characters)
+var SimpleNameGenerator NameGenerator = simpleNameGenerator{}
+
+const (
+ // TODO: make this flexible for non-core resources with alternate naming rules.
+ maxNameLength = 63
+ randomLength = 5
+ MaxGeneratedNameLength = maxNameLength - randomLength
+)
+
+func (simpleNameGenerator) GenerateName(base string) string {
+ if len(base) > MaxGeneratedNameLength {
+ base = base[:MaxGeneratedNameLength]
+ }
+ return fmt.Sprintf("%s%s", base, utilrand.String(randomLength))
+}
diff --git a/vendor/knative.dev/eventing/pkg/adapter/v2/cloudevents.go b/vendor/knative.dev/eventing/pkg/adapter/v2/cloudevents.go
index 0a17fa55f..21d039560 100644
--- a/vendor/knative.dev/eventing/pkg/adapter/v2/cloudevents.go
+++ b/vendor/knative.dev/eventing/pkg/adapter/v2/cloudevents.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/adapter/v2/configurator_configmap.go b/vendor/knative.dev/eventing/pkg/adapter/v2/configurator_configmap.go
index 630b236cd..f20633778 100644
--- a/vendor/knative.dev/eventing/pkg/adapter/v2/configurator_configmap.go
+++ b/vendor/knative.dev/eventing/pkg/adapter/v2/configurator_configmap.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/adapter/v2/configurator_environment.go b/vendor/knative.dev/eventing/pkg/adapter/v2/configurator_environment.go
index 631760744..372b236a6 100644
--- a/vendor/knative.dev/eventing/pkg/adapter/v2/configurator_environment.go
+++ b/vendor/knative.dev/eventing/pkg/adapter/v2/configurator_environment.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/adapter/v2/context.go b/vendor/knative.dev/eventing/pkg/adapter/v2/context.go
index 7de6bd81c..f36f95daa 100644
--- a/vendor/knative.dev/eventing/pkg/adapter/v2/context.go
+++ b/vendor/knative.dev/eventing/pkg/adapter/v2/context.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/adapter/v2/main.go b/vendor/knative.dev/eventing/pkg/adapter/v2/main.go
index 6387b8b3d..1eb274364 100644
--- a/vendor/knative.dev/eventing/pkg/adapter/v2/main.go
+++ b/vendor/knative.dev/eventing/pkg/adapter/v2/main.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/adapter/v2/main_message.go b/vendor/knative.dev/eventing/pkg/adapter/v2/main_message.go
index ba8f9ab5c..8d38117f3 100644
--- a/vendor/knative.dev/eventing/pkg/adapter/v2/main_message.go
+++ b/vendor/knative.dev/eventing/pkg/adapter/v2/main_message.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/adapter/v2/util/crstatusevent/eventsstatus.go b/vendor/knative.dev/eventing/pkg/adapter/v2/util/crstatusevent/eventsstatus.go
index 45c976dcd..9c90166c3 100644
--- a/vendor/knative.dev/eventing/pkg/adapter/v2/util/crstatusevent/eventsstatus.go
+++ b/vendor/knative.dev/eventing/pkg/adapter/v2/util/crstatusevent/eventsstatus.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/config/doc.go b/vendor/knative.dev/eventing/pkg/apis/config/doc.go
index a0d4319ef..61305f830 100644
--- a/vendor/knative.dev/eventing/pkg/apis/config/doc.go
+++ b/vendor/knative.dev/eventing/pkg/apis/config/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/config/store.go b/vendor/knative.dev/eventing/pkg/apis/config/store.go
index a0dfc8645..30f38932c 100644
--- a/vendor/knative.dev/eventing/pkg/apis/config/store.go
+++ b/vendor/knative.dev/eventing/pkg/apis/config/store.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/lifecycle_helper.go b/vendor/knative.dev/eventing/pkg/apis/duck/lifecycle_helper.go
new file mode 100644
index 000000000..1b4badcb7
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/lifecycle_helper.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+)
+
+// DeploymentIsAvailable determines if the provided deployment is available. Note that if it cannot
+// determine the Deployment's availability, it returns `def` (short for default).
+func DeploymentIsAvailable(d *appsv1.DeploymentStatus, def bool) bool {
+ // Check if the Deployment is available.
+ for _, cond := range d.Conditions {
+ if cond.Type == appsv1.DeploymentAvailable {
+ return cond.Status == "True"
+ }
+ }
+ return def
+}
+
+// EndpointsAreAvailable determines if the provided Endpoints are available.
+func EndpointsAreAvailable(ep *corev1.Endpoints) bool {
+ for _, subset := range ep.Subsets {
+ if len(subset.Addresses) > 0 {
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/channelable_types.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/channelable_types.go
index 82d75bd98..9dc7580bc 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/channelable_types.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/channelable_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_conversion.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_conversion.go
index bae7a804e..b0c3cd5ea 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_conversion.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_defaults.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_defaults.go
index b06c792ef..574df94f2 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_defaults.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_defaults.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_types.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_types.go
index 48048a09d..d9b2b6783 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_types.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/delivery_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -81,6 +81,14 @@ type DeliverySpec struct {
//
// +optional
RetryAfterMax *string `json:"retryAfterMax,omitempty"`
+
+ // format specifies the desired event format for the cloud event.
+ // It can be one of the following values:
+ // - nil: default value, no specific format required.
+ // - "JSON": indicates the event should be in structured mode.
+ // - "binary": indicates the event should be in binary mode.
+ //+optional
+ Format *FormatType `json:"format,omitempty"`
}
func (ds *DeliverySpec) Validate(ctx context.Context) *apis.FieldError {
@@ -123,6 +131,15 @@ func (ds *DeliverySpec) Validate(ctx context.Context) *apis.FieldError {
}
}
+ if ds.Format != nil {
+ switch *ds.Format {
+ case DeliveryFormatBinary, DeliveryFormatJson:
+ // nothing
+ default:
+ errs = errs.Also(apis.ErrInvalidValue(*ds.Format, "format"))
+ }
+ }
+
if ds.RetryAfterMax != nil {
if feature.FromContext(ctx).IsEnabled(feature.DeliveryRetryAfter) {
p, me := period.Parse(*ds.RetryAfterMax)
@@ -148,6 +165,14 @@ const (
BackoffPolicyExponential BackoffPolicyType = "exponential"
)
+// FormatType is the type for delivery format
+type FormatType string
+
+const (
+ DeliveryFormatJson FormatType = "json"
+ DeliveryFormatBinary FormatType = "binary"
+)
+
// DeliveryStatus contains the Status of an object supporting delivery options. This type is intended to be embedded into a status struct.
type DeliveryStatus struct {
// DeadLetterSink is a KReference that is the reference to the native, platform specific channel
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/doc.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/doc.go
index ff5ac0c20..00367a5cb 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/doc.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/doc.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/register.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/register.go
index fe37475f1..5711480fb 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/register.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/register.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/subscribable_types.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/subscribable_types.go
index b7f719b9f..aa307d74a 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/subscribable_types.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/subscribable_types.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/subscribable_types_conversion.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/subscribable_types_conversion.go
index 52985caf4..f321b6b62 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/subscribable_types_conversion.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/subscribable_types_conversion.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/duck/v1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/duck/v1/zz_generated.deepcopy.go
index d7965aaf4..52f6b7bb6 100644
--- a/vendor/knative.dev/eventing/pkg/apis/duck/v1/zz_generated.deepcopy.go
+++ b/vendor/knative.dev/eventing/pkg/apis/duck/v1/zz_generated.deepcopy.go
@@ -201,6 +201,11 @@ func (in *DeliverySpec) DeepCopyInto(out *DeliverySpec) {
*out = new(string)
**out = **in
}
+ if in.Format != nil {
+ in, out := &in.Format, &out.Format
+ *out = new(FormatType)
+ **out = **in
+ }
return
}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/defaults.go b/vendor/knative.dev/eventing/pkg/apis/eventing/defaults.go
index d2b52e8f8..ed0aa4ab1 100644
--- a/vendor/knative.dev/eventing/pkg/apis/eventing/defaults.go
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/defaults.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/register.go b/vendor/knative.dev/eventing/pkg/apis/eventing/register.go
index ae699f239..5470025cc 100644
--- a/vendor/knative.dev/eventing/pkg/apis/eventing/register.go
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/register.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_conversion.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_conversion.go
new file mode 100644
index 000000000..6feba078d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *Broker) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *Broker) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_defaults.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_defaults.go
new file mode 100644
index 000000000..f4fc1550d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_defaults.go
@@ -0,0 +1,58 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+
+ "knative.dev/pkg/apis"
+
+ "knative.dev/eventing/pkg/apis/config"
+ "knative.dev/eventing/pkg/apis/eventing"
+)
+
+func (b *Broker) SetDefaults(ctx context.Context) {
+ // Default Spec fields.
+ withNS := apis.WithinParent(ctx, b.ObjectMeta)
+ b.Spec.SetDefaults(withNS)
+ eventing.DefaultBrokerClassIfUnset(withNS, &b.ObjectMeta)
+}
+
+func (bs *BrokerSpec) SetDefaults(ctx context.Context) {
+ cfg := config.FromContextOrDefaults(ctx)
+ c, err := cfg.Defaults.GetBrokerConfig(apis.ParentMeta(ctx).Namespace)
+ if err == nil {
+ if bs.Config == nil {
+ bs.Config = c.KReference
+ }
+ if bs.Delivery == nil && c.Delivery != nil {
+ bs.Delivery = &eventingduckv1.DeliverySpec{
+ DeadLetterSink: c.Delivery.DeadLetterSink,
+ Retry: c.Delivery.Retry,
+ BackoffPolicy: c.Delivery.BackoffPolicy,
+ BackoffDelay: c.Delivery.BackoffDelay,
+ }
+ }
+ }
+ // Default the namespace if not given
+ if bs.Config != nil {
+ bs.Config.SetDefaults(ctx)
+ }
+ bs.Delivery.SetDefaults(ctx)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_lifecycle.go
new file mode 100644
index 000000000..f1836e99b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_lifecycle.go
@@ -0,0 +1,138 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "sync"
+
+ "knative.dev/pkg/apis"
+ v1 "knative.dev/pkg/apis/duck/v1"
+
+ eventingduck "knative.dev/eventing/pkg/apis/duck/v1"
+)
+
+const (
+ BrokerConditionReady = apis.ConditionReady
+ BrokerConditionIngress apis.ConditionType = "IngressReady"
+ BrokerConditionTriggerChannel apis.ConditionType = "TriggerChannelReady"
+ BrokerConditionFilter apis.ConditionType = "FilterReady"
+ BrokerConditionAddressable apis.ConditionType = "Addressable"
+ BrokerConditionDeadLetterSinkResolved apis.ConditionType = "DeadLetterSinkResolved"
+ BrokerConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
+)
+
+var brokerCondSet = apis.NewLivingConditionSet(
+ BrokerConditionIngress,
+ BrokerConditionTriggerChannel,
+ BrokerConditionFilter,
+ BrokerConditionAddressable,
+ BrokerConditionDeadLetterSinkResolved,
+ BrokerConditionEventPoliciesReady,
+)
+var brokerCondSetLock = sync.RWMutex{}
+
+// RegisterAlternateBrokerConditionSet register a apis.ConditionSet for the given broker class.
+func RegisterAlternateBrokerConditionSet(conditionSet apis.ConditionSet) {
+ brokerCondSetLock.Lock()
+ defer brokerCondSetLock.Unlock()
+
+ brokerCondSet = conditionSet
+}
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (b *Broker) GetConditionSet() apis.ConditionSet {
+ brokerCondSetLock.RLock()
+ defer brokerCondSetLock.RUnlock()
+
+ return brokerCondSet
+}
+
+// GetConditionSet retrieves the condition set for this resource.
+func (bs *BrokerStatus) GetConditionSet() apis.ConditionSet {
+ brokerCondSetLock.RLock()
+ defer brokerCondSetLock.RUnlock()
+
+ return brokerCondSet
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (bs *BrokerStatus) GetTopLevelCondition() *apis.Condition {
+ return bs.GetConditionSet().Manage(bs).GetTopLevelCondition()
+}
+
+// SetAddress makes this Broker addressable by setting the URI. It also
+// sets the BrokerConditionAddressable to true.
+func (bs *BrokerStatus) SetAddress(address *v1.Addressable) {
+ bs.AddressStatus = v1.AddressStatus{
+ Address: address,
+ }
+
+ if address != nil && address.URL != nil {
+ bs.GetConditionSet().Manage(bs).MarkTrue(BrokerConditionAddressable)
+ bs.AddressStatus.Address.Name = &address.URL.Scheme
+ } else {
+ bs.GetConditionSet().Manage(bs).MarkFalse(BrokerConditionAddressable, "nil URL", "URL is nil")
+ }
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (bs *BrokerStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return bs.GetConditionSet().Manage(bs).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall and the latest spec has been observed.
+func (b *Broker) IsReady() bool {
+ bs := b.Status
+ return bs.ObservedGeneration == b.Generation &&
+ b.GetConditionSet().Manage(&bs).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (bs *BrokerStatus) InitializeConditions() {
+ bs.GetConditionSet().Manage(bs).InitializeConditions()
+}
+
+func (bs *BrokerStatus) MarkDeadLetterSinkResolvedSucceeded(deadLetterSink eventingduck.DeliveryStatus) {
+ bs.DeliveryStatus = deadLetterSink
+ bs.GetConditionSet().Manage(bs).MarkTrue(BrokerConditionDeadLetterSinkResolved)
+}
+
+func (bs *BrokerStatus) MarkDeadLetterSinkNotConfigured() {
+ bs.DeliveryStatus = eventingduck.DeliveryStatus{}
+ bs.GetConditionSet().Manage(bs).MarkTrueWithReason(BrokerConditionDeadLetterSinkResolved, "DeadLetterSinkNotConfigured", "No dead letter sink is configured.")
+}
+
+func (bs *BrokerStatus) MarkDeadLetterSinkResolvedFailed(reason, messageFormat string, messageA ...interface{}) {
+ bs.DeliveryStatus = eventingduck.DeliveryStatus{}
+ bs.GetConditionSet().Manage(bs).MarkFalse(BrokerConditionDeadLetterSinkResolved, reason, messageFormat, messageA...)
+}
+
+func (bs *BrokerStatus) MarkEventPoliciesTrue() {
+ bs.GetConditionSet().Manage(bs).MarkTrue(BrokerConditionEventPoliciesReady)
+}
+
+func (bs *BrokerStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ bs.GetConditionSet().Manage(bs).MarkTrueWithReason(BrokerConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (bs *BrokerStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ bs.GetConditionSet().Manage(bs).MarkFalse(BrokerConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (bs *BrokerStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ bs.GetConditionSet().Manage(bs).MarkUnknown(BrokerConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_lifecycle_mt.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_lifecycle_mt.go
new file mode 100644
index 000000000..a27ceaa24
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_lifecycle_mt.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+
+ "knative.dev/eventing/pkg/apis/duck"
+ duckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+)
+
+func (bs *BrokerStatus) MarkIngressFailed(reason, format string, args ...interface{}) {
+ bs.GetConditionSet().Manage(bs).MarkFalse(BrokerConditionIngress, reason, format, args...)
+}
+
+func (bs *BrokerStatus) PropagateIngressAvailability(ep *corev1.Endpoints) {
+ if duck.EndpointsAreAvailable(ep) {
+ bs.GetConditionSet().Manage(bs).MarkTrue(BrokerConditionIngress)
+ } else {
+ bs.MarkIngressFailed("EndpointsUnavailable", "Endpoints %q are unavailable.", ep.Name)
+ }
+}
+
+func (bs *BrokerStatus) MarkTriggerChannelFailed(reason, format string, args ...interface{}) {
+ bs.GetConditionSet().Manage(bs).MarkFalse(BrokerConditionTriggerChannel, reason, format, args...)
+}
+
+func (bs *BrokerStatus) PropagateTriggerChannelReadiness(cs *duckv1.ChannelableStatus) {
+ // TODO: Once you can get a Ready status from Channelable in a generic way, use it here...
+ address := cs.AddressStatus.Address
+ if address != nil {
+ bs.GetConditionSet().Manage(bs).MarkTrue(BrokerConditionTriggerChannel)
+ } else {
+ bs.MarkTriggerChannelFailed("ChannelNotReady", "trigger Channel is not ready: not addressable")
+ }
+}
+
+func (bs *BrokerStatus) MarkBrokerAddressableUnknown(reason, format string, args ...interface{}) {
+ bs.GetConditionSet().Manage(bs).MarkUnknown(BrokerConditionAddressable, reason, format, args...)
+}
+
+func (bs *BrokerStatus) MarkFilterFailed(reason, format string, args ...interface{}) {
+ bs.GetConditionSet().Manage(bs).MarkFalse(BrokerConditionFilter, reason, format, args...)
+}
+
+func (bs *BrokerStatus) PropagateFilterAvailability(ep *corev1.Endpoints) {
+ if duck.EndpointsAreAvailable(ep) {
+ bs.GetConditionSet().Manage(bs).MarkTrue(BrokerConditionFilter)
+ } else {
+ bs.MarkFilterFailed("EndpointsUnavailable", "Endpoints %q are unavailable.", ep.Name)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_types.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_types.go
new file mode 100644
index 000000000..6eb6f5ede
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_types.go
@@ -0,0 +1,129 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler:class=eventing.knative.dev/broker.class
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Broker collects a pool of events that are consumable using Triggers. Brokers
+// provide a well-known endpoint for event delivery that senders can use with
+// minimal knowledge of the event routing strategy. Subscribers use Triggers to
+// request delivery of events from a Broker's pool to a specific URL or
+// Addressable endpoint.
+type Broker struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the Broker.
+ Spec BrokerSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the Broker. This data may be out of
+ // date.
+ // +optional
+ Status BrokerStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that Broker can be validated, can be defaulted, and has immutable fields.
+ _ apis.Validatable = (*Broker)(nil)
+ _ apis.Defaultable = (*Broker)(nil)
+
+ // Check that Broker can return its spec untyped.
+ _ apis.HasSpec = (*Broker)(nil)
+
+ _ runtime.Object = (*Broker)(nil)
+
+ // Check that we can create OwnerReferences to a Broker.
+ _ kmeta.OwnerRefable = (*Broker)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*Broker)(nil)
+)
+
+type BrokerSpec struct {
+ // Config is a KReference to the configuration that specifies
+ // configuration options for this Broker. For example, this could be
+ // a pointer to a ConfigMap.
+ // +optional
+ Config *duckv1.KReference `json:"config,omitempty"`
+
+ // Delivery contains the delivery spec for each trigger
+ // to this Broker. Each trigger delivery spec, if any, overrides this
+ // global delivery spec.
+ // +optional
+ Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"`
+}
+
+// BrokerStatus represents the current state of a Broker.
+type BrokerStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Broker that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+
+ // AddressStatus is the part where the Broker fulfills the Addressable contract.
+ // It exposes the endpoint as an URI to get events delivered into the Broker mesh.
+ // +optional
+ duckv1.AddressStatus `json:",inline"`
+
+ // DeliveryStatus contains a resolved URL to the dead letter sink address, and any other
+ // resolved delivery options.
+ eventingduckv1.DeliveryStatus `json:",inline"`
+
+ // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ // +optional
+ eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// BrokerList is a collection of Brokers.
+type BrokerList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+
+ Items []Broker `json:"items"`
+}
+
+// GetGroupVersionKind returns GroupVersionKind for Brokers
+func (t *Broker) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("Broker")
+}
+
+// GetUntypedSpec returns the spec of the Broker.
+func (b *Broker) GetUntypedSpec() interface{} {
+ return b.Spec
+}
+
+// GetStatus retrieves the status of the Broker. Implements the KRShaped interface.
+func (t *Broker) GetStatus() *duckv1.Status {
+ return &t.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_validation.go
new file mode 100644
index 000000000..51e30e10c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/broker_validation.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "github.com/google/go-cmp/cmp/cmpopts"
+
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/kmp"
+
+ "knative.dev/eventing/pkg/apis/config"
+)
+
+const (
+ BrokerClassAnnotationKey = "eventing.knative.dev/broker.class"
+)
+
+func (b *Broker) Validate(ctx context.Context) *apis.FieldError {
+ ctx = apis.WithinParent(ctx, b.ObjectMeta)
+
+ cfg := config.FromContextOrDefaults(ctx)
+ var brConfig *config.ClassAndBrokerConfig
+ if cfg.Defaults != nil {
+ if c, ok := cfg.Defaults.NamespaceDefaultsConfig[b.GetNamespace()]; ok {
+ brConfig = c
+ } else {
+ brConfig = cfg.Defaults.ClusterDefault
+ }
+ }
+
+ withNS := ctx
+ if brConfig == nil || brConfig.DisallowDifferentNamespaceConfig == nil || !*brConfig.DisallowDifferentNamespaceConfig {
+ withNS = apis.AllowDifferentNamespace(ctx)
+ }
+
+ // Make sure a BrokerClassAnnotation exists
+ var errs *apis.FieldError
+ if bc, ok := b.GetAnnotations()[BrokerClassAnnotationKey]; !ok || bc == "" {
+ errs = errs.Also(apis.ErrMissingField(BrokerClassAnnotationKey))
+ }
+
+ errs = errs.Also(b.Spec.Validate(withNS).ViaField("spec"))
+ if apis.IsInUpdate(ctx) {
+ original := apis.GetBaseline(ctx).(*Broker)
+ errs = errs.Also(b.CheckImmutableFields(ctx, original))
+ }
+ return errs
+}
+
+func (bs *BrokerSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+
+ // Validate the Config
+ if bs.Config != nil {
+ if ce := bs.Config.Validate(ctx); ce != nil {
+ errs = errs.Also(ce.ViaField("config"))
+ }
+ }
+
+ if bs.Delivery != nil {
+ if de := bs.Delivery.Validate(ctx); de != nil {
+ errs = errs.Also(de.ViaField("delivery"))
+ }
+ }
+ return errs
+}
+
+func (b *Broker) CheckImmutableFields(ctx context.Context, original *Broker) *apis.FieldError {
+ if original == nil {
+ return nil
+ }
+
+ // Only Delivery options are mutable.
+ ignoreArguments := cmpopts.IgnoreFields(BrokerSpec{}, "Delivery")
+ if diff, err := kmp.ShortDiff(original.Spec, b.Spec, ignoreArguments); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff Broker",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: diff,
+ }
+ }
+
+ // Make sure you can't change the class annotation.
+ if diff, _ := kmp.ShortDiff(original.GetAnnotations()[BrokerClassAnnotationKey], b.GetAnnotations()[BrokerClassAnnotationKey]); diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable annotations changed (-old +new)",
+ Paths: []string{"annotations"},
+ Details: diff,
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/doc.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/doc.go
new file mode 100644
index 000000000..97ed2d4ae
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 is the v1 version of the API.
+// +k8s:deepcopy-gen=package
+// +groupName=eventing.knative.dev
+package v1
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/register.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/register.go
new file mode 100644
index 000000000..3b3d5af8a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "knative.dev/eventing/pkg/apis/eventing"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: eventing.GroupName, Version: "v1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Broker{},
+ &BrokerList{},
+ &Trigger{},
+ &TriggerList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/test_helper.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/test_helper.go
new file mode 100644
index 000000000..b52ce75be
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/test_helper.go
@@ -0,0 +1,141 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+)
+
+type testHelper struct{}
+
+// TestHelper contains helpers for unit tests.
+var TestHelper = testHelper{}
+
+func (testHelper) ReadySubscriptionCondition() *apis.Condition {
+ return &apis.Condition{
+ Type: apis.ConditionReady,
+ Status: corev1.ConditionTrue,
+ Severity: apis.ConditionSeverityError,
+ }
+}
+
+func (testHelper) FalseSubscriptionCondition() *apis.Condition {
+ return &apis.Condition{
+ Type: apis.ConditionReady,
+ Status: corev1.ConditionFalse,
+ Severity: apis.ConditionSeverityError,
+ Message: "test induced failure condition",
+ }
+}
+
+func (testHelper) ReadySubscriptionStatus() *messagingv1.SubscriptionStatus {
+ ss := &messagingv1.SubscriptionStatus{}
+ ss.MarkChannelReady()
+ ss.MarkReferencesResolved()
+ ss.MarkAddedToChannel()
+ ss.MarkOIDCIdentityCreatedSucceeded()
+ return ss
+}
+
+func (t testHelper) ReadyBrokerStatus() *BrokerStatus {
+ bs := &BrokerStatus{}
+ bs.PropagateIngressAvailability(t.AvailableEndpoints())
+ bs.PropagateTriggerChannelReadiness(t.ReadyChannelStatus())
+ bs.PropagateFilterAvailability(t.AvailableEndpoints())
+ bs.SetAddress(&duckv1.Addressable{
+ URL: apis.HTTP("example.com"),
+ })
+ bs.MarkDeadLetterSinkResolvedSucceeded(eventingduckv1.DeliveryStatus{})
+ bs.MarkEventPoliciesTrue()
+ return bs
+}
+
+func (t testHelper) ReadyBrokerStatusWithoutDLS() *BrokerStatus {
+ bs := &BrokerStatus{}
+ bs.PropagateIngressAvailability(t.AvailableEndpoints())
+ bs.PropagateTriggerChannelReadiness(t.ReadyChannelStatus())
+ bs.PropagateFilterAvailability(t.AvailableEndpoints())
+ bs.SetAddress(&duckv1.Addressable{
+ URL: apis.HTTP("example.com"),
+ })
+ bs.MarkEventPoliciesTrue()
+ bs.MarkDeadLetterSinkNotConfigured()
+ return bs
+}
+
+func (testHelper) ReadyBrokerCondition() *apis.Condition {
+ return &apis.Condition{
+ Type: apis.ConditionReady,
+ Status: corev1.ConditionTrue,
+ Severity: apis.ConditionSeverityError,
+ }
+}
+
+func (testHelper) UnknownBrokerStatus() *BrokerStatus {
+ bs := &BrokerStatus{}
+ return bs
+}
+
+func (testHelper) FalseBrokerStatus() *BrokerStatus {
+ bs := &BrokerStatus{}
+ bs.SetAddress(nil)
+ return bs
+}
+
+func (testHelper) UnavailableEndpoints() *corev1.Endpoints {
+ ep := &corev1.Endpoints{}
+ ep.Name = "unavailable"
+ ep.Subsets = []corev1.EndpointSubset{{
+ NotReadyAddresses: []corev1.EndpointAddress{{
+ IP: "127.0.0.1",
+ }},
+ }}
+ return ep
+}
+
+func (testHelper) AvailableEndpoints() *corev1.Endpoints {
+ ep := &corev1.Endpoints{}
+ ep.Name = "available"
+ ep.Subsets = []corev1.EndpointSubset{{
+ Addresses: []corev1.EndpointAddress{{
+ IP: "127.0.0.1",
+ }},
+ }}
+ return ep
+}
+
+func (testHelper) ReadyChannelStatus() *eventingduckv1.ChannelableStatus {
+ cs := &eventingduckv1.ChannelableStatus{
+ Status: duckv1.Status{},
+ AddressStatus: duckv1.AddressStatus{
+ Address: &duckv1.Addressable{
+ URL: &apis.URL{Scheme: "http", Host: "foo"},
+ },
+ },
+ SubscribableStatus: eventingduckv1.SubscribableStatus{}}
+ return cs
+}
+
+func (t testHelper) NotReadyChannelStatus() *eventingduckv1.ChannelableStatus {
+ return &eventingduckv1.ChannelableStatus{}
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_conversion.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_conversion.go
new file mode 100644
index 000000000..8db47875d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *Trigger) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *Trigger) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_defaults.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_defaults.go
new file mode 100644
index 000000000..b8d86b598
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_defaults.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/eventing/pkg/apis/feature"
+
+ "knative.dev/pkg/apis"
+)
+
+const (
+ brokerLabel = "eventing.knative.dev/broker"
+)
+
+func (t *Trigger) SetDefaults(ctx context.Context) {
+ withNS := apis.WithinParent(ctx, t.ObjectMeta)
+ t.Spec.SetDefaults(withNS)
+ setLabels(ctx, t)
+}
+
+func (ts *TriggerSpec) SetDefaults(ctx context.Context) {
+ // Make a default filter that allows anything.
+ if ts.Filter == nil {
+ ts.Filter = &TriggerFilter{}
+ }
+ // Default the Subscriber namespace
+ ts.Subscriber.SetDefaults(ctx)
+ ts.Delivery.SetDefaults(ctx)
+}
+
+func setLabels(ctx context.Context, t *Trigger) {
+ if feature.FromContext(ctx).IsEnabled(feature.CrossNamespaceEventLinks) && t.Spec.BrokerRef != nil {
+ if len(t.Labels) == 0 {
+ t.Labels = map[string]string{}
+ }
+ t.Labels[brokerLabel] = t.Spec.BrokerRef.Name
+ } else if t.Spec.Broker != "" {
+ if len(t.Labels) == 0 {
+ t.Labels = map[string]string{}
+ }
+ t.Labels[brokerLabel] = t.Spec.Broker
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_lifecycle.go
new file mode 100644
index 000000000..e961d3979
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_lifecycle.go
@@ -0,0 +1,228 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/eventing/pkg/apis/feature"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+var triggerCondSet = apis.NewLivingConditionSet(TriggerConditionBroker, TriggerConditionSubscribed, TriggerConditionDependency, TriggerConditionSubscriberResolved, TriggerConditionDeadLetterSinkResolved, TriggerConditionOIDCIdentityCreated)
+
+const (
+ // TriggerConditionReady has status True when all subconditions below have been set to True.
+ TriggerConditionReady = apis.ConditionReady
+
+ TriggerConditionBroker apis.ConditionType = "BrokerReady"
+
+ TriggerConditionSubscribed apis.ConditionType = "SubscriptionReady"
+
+ TriggerConditionDependency apis.ConditionType = "DependencyReady"
+
+ TriggerConditionSubscriberResolved apis.ConditionType = "SubscriberResolved"
+
+ TriggerConditionDeadLetterSinkResolved apis.ConditionType = "DeadLetterSinkResolved"
+
+ TriggerConditionOIDCIdentityCreated apis.ConditionType = "OIDCIdentityCreated"
+
+ // TriggerAnyFilter Constant to represent that we should allow anything.
+ TriggerAnyFilter = ""
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*Trigger) GetConditionSet() apis.ConditionSet {
+ return triggerCondSet
+}
+
+// GetGroupVersionKind returns GroupVersionKind for Triggers
+func (t *Trigger) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("Trigger")
+}
+
+// GetUntypedSpec returns the spec of the Trigger.
+func (t *Trigger) GetUntypedSpec() interface{} {
+ return t.Spec
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (ts *TriggerStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return triggerCondSet.Manage(ts).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (ts *TriggerStatus) GetTopLevelCondition() *apis.Condition {
+ return triggerCondSet.Manage(ts).GetTopLevelCondition()
+}
+
+// IsReady returns true if the resource is ready overall.
+func (ts *TriggerStatus) IsReady() bool {
+ return triggerCondSet.Manage(ts).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (ts *TriggerStatus) InitializeConditions() {
+ triggerCondSet.Manage(ts).InitializeConditions()
+ ts.MarkOIDCIdentityCreatedNotSupported()
+}
+
+func (ts *TriggerStatus) PropagateBrokerCondition(bc *apis.Condition) {
+ if bc == nil {
+ ts.MarkBrokerNotConfigured()
+ return
+ }
+
+ switch {
+ case bc.Status == corev1.ConditionUnknown:
+ ts.MarkBrokerUnknown(bc.Reason, bc.Message)
+ case bc.Status == corev1.ConditionTrue:
+ triggerCondSet.Manage(ts).MarkTrue(TriggerConditionBroker)
+ case bc.Status == corev1.ConditionFalse:
+ ts.MarkBrokerFailed(bc.Reason, bc.Message)
+ default:
+ ts.MarkBrokerUnknown("BrokerUnknown", "The status of Broker is invalid: %v", bc.Status)
+ }
+}
+
+func (ts *TriggerStatus) MarkBrokerFailed(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkFalse(TriggerConditionBroker, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkBrokerUnknown(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionBroker, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkBrokerNotConfigured() {
+ triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionBroker,
+ "BrokerNotConfigured", "Broker has not yet been reconciled.")
+}
+
+func (ts *TriggerStatus) PropagateSubscriptionCondition(sc *apis.Condition) {
+ if sc == nil {
+ ts.MarkSubscriptionNotConfigured()
+ return
+ }
+
+ switch {
+ case sc.Status == corev1.ConditionUnknown:
+ ts.MarkSubscribedUnknown(sc.Reason, sc.Message)
+ case sc.Status == corev1.ConditionTrue:
+ triggerCondSet.Manage(ts).MarkTrue(TriggerConditionSubscribed)
+ case sc.Status == corev1.ConditionFalse:
+ ts.MarkNotSubscribed(sc.Reason, sc.Message)
+ default:
+ ts.MarkSubscribedUnknown("SubscriptionUnknown", "The status of Subscription is invalid: %v", sc.Status)
+ }
+}
+
+func (ts *TriggerStatus) MarkNotSubscribed(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkFalse(TriggerConditionSubscribed, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkSubscribedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionSubscribed, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkSubscriptionNotConfigured() {
+ triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionSubscribed,
+ "SubscriptionNotConfigured", "Subscription has not yet been reconciled.")
+}
+
+func (ts *TriggerStatus) MarkSubscriberResolvedSucceeded() {
+ triggerCondSet.Manage(ts).MarkTrue(TriggerConditionSubscriberResolved)
+}
+
+func (ts *TriggerStatus) MarkSubscriberResolvedFailed(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkFalse(TriggerConditionSubscriberResolved, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkSubscriberResolvedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionSubscriberResolved, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkDeadLetterSinkResolvedSucceeded() {
+ triggerCondSet.Manage(ts).MarkTrue(TriggerConditionDeadLetterSinkResolved)
+}
+
+func (ts *TriggerStatus) MarkDeadLetterSinkNotConfigured() {
+ triggerCondSet.Manage(ts).MarkTrueWithReason(TriggerConditionDeadLetterSinkResolved, "DeadLetterSinkNotConfigured", "No dead letter sink is configured.")
+}
+
+func (ts *TriggerStatus) MarkDeadLetterSinkResolvedFailed(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkFalse(TriggerConditionDeadLetterSinkResolved, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkDependencySucceeded() {
+ triggerCondSet.Manage(ts).MarkTrue(TriggerConditionDependency)
+}
+
+func (ts *TriggerStatus) MarkDependencyFailed(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkFalse(TriggerConditionDependency, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkDependencyUnknown(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionDependency, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkDependencyNotConfigured() {
+ triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionDependency,
+ "DependencyNotConfigured", "Dependency has not yet been reconciled.")
+}
+
+func (ts *TriggerStatus) PropagateDependencyStatus(ks *duckv1.Source) {
+ kc := ks.Status.GetCondition(apis.ConditionReady)
+ if kc == nil {
+ ts.MarkDependencyNotConfigured()
+ return
+ }
+
+ switch {
+ case kc.Status == corev1.ConditionUnknown:
+ ts.MarkDependencyUnknown(kc.Reason, kc.Message)
+ case kc.Status == corev1.ConditionTrue:
+ ts.MarkDependencySucceeded()
+ case kc.Status == corev1.ConditionFalse:
+ ts.MarkDependencyFailed(kc.Reason, kc.Message)
+ default:
+ ts.MarkDependencyUnknown("DependencyUnknown", "The status of Dependency is invalid: %v", kc.Status)
+ }
+}
+
+func (ts *TriggerStatus) MarkOIDCIdentityCreatedSucceeded() {
+ triggerCondSet.Manage(ts).MarkTrue(TriggerConditionOIDCIdentityCreated)
+}
+
+func (ts *TriggerStatus) MarkOIDCIdentityCreatedSucceededWithReason(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkTrueWithReason(TriggerConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkOIDCIdentityCreatedFailed(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkFalse(TriggerConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkOIDCIdentityCreatedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ triggerCondSet.Manage(ts).MarkUnknown(TriggerConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (ts *TriggerStatus) MarkOIDCIdentityCreatedNotSupported() {
+ // in case the OIDC feature is not supported, we mark the condition as true, to not mark the Trigger unready.
+ triggerCondSet.Manage(ts).MarkTrueWithReason(TriggerConditionOIDCIdentityCreated, fmt.Sprintf("%s feature not yet supported for this Broker class", feature.OIDCAuthentication), "")
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_types.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_types.go
new file mode 100644
index 000000000..2e217c73a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_types.go
@@ -0,0 +1,236 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+)
+
+const (
+ // DependencyAnnotation is the annotation key used to mark the sources that the Trigger depends on.
+ // This will be used when the kn client creates a source and trigger pair for the user such that the trigger only receives events produced by the paired source.
+ DependencyAnnotation = "knative.dev/dependency"
+
+ // InjectionAnnotation is the annotation key used to enable knative eventing
+ // injection for a namespace to automatically create a broker.
+ InjectionAnnotation = "eventing.knative.dev/injection"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Trigger represents a request to have events delivered to a subscriber from a
+// Broker's event pool.
+type Trigger struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the Trigger.
+ Spec TriggerSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the Trigger. This data may be out of
+ // date.
+ // +optional
+ Status TriggerStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that Trigger can be validated, can be defaulted, and has immutable fields.
+ _ apis.Validatable = (*Trigger)(nil)
+ _ apis.Defaultable = (*Trigger)(nil)
+
+ // Check that Trigger can return its spec untyped.
+ _ apis.HasSpec = (*Trigger)(nil)
+
+ _ runtime.Object = (*Trigger)(nil)
+
+ // Check that we can create OwnerReferences to a Trigger.
+ _ kmeta.OwnerRefable = (*Trigger)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*Trigger)(nil)
+)
+
+type TriggerSpec struct {
+ // Broker is the broker that this trigger receives events from.
+ Broker string `json:"broker,omitempty"`
+
+ // BrokerRef is the broker that is used for cross-namespace referencing.
+ BrokerRef *duckv1.KReference `json:"brokerRef,omitempty"`
+
+ // Filter is the filter to apply against all events from the Broker. Only events that pass this
+ // filter will be sent to the Subscriber. If not specified, will default to allowing all events.
+ //
+ // +optional
+ Filter *TriggerFilter `json:"filter,omitempty"`
+
+ // Filters is an experimental field that conforms to the CNCF CloudEvents Subscriptions
+ // API. It's an array of filter expressions that evaluate to true or false.
+ // If any filter expression in the array evaluates to false, the event MUST
+ // NOT be sent to the Subscriber. If all the filter expressions in the array
+ // evaluate to true, the event MUST be attempted to be delivered. Absence of
+ // a filter or empty array implies a value of true. In the event of users
+ // specifying both Filter and Filters, then the latter will override the former.
+ // This will allow users to try out the effect of the new Filters field
+ // without compromising the existing attribute-based Filter and try it out on existing
+ // Trigger objects.
+ //
+ // +optional
+ Filters []SubscriptionsAPIFilter `json:"filters,omitempty"`
+
+ // Subscriber is the addressable that receives events from the Broker that pass
+ // the Filter. It is required.
+ Subscriber duckv1.Destination `json:"subscriber"`
+
+ // Delivery contains the delivery spec for this specific trigger.
+ // +optional
+ Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"`
+}
+
+type TriggerFilter struct {
+ // Attributes filters events by exact match on event context attributes.
+ // Each key in the map is compared with the equivalent key in the event
+ // context. An event passes the filter if all values are equal to the
+ // specified values. Nested context attributes are not supported as keys. Only
+ // string values are supported.
+ //
+ // +optional
+ Attributes TriggerFilterAttributes `json:"attributes,omitempty"`
+}
+
+// SubscriptionsAPIFilter allows defining a filter expression using CloudEvents
+// Subscriptions API. If multiple filters are specified, then the same semantics
+// of SubscriptionsAPIFilter.All is applied. If no filter dialect or empty
+// object is specified, then the filter always accept the events.
+type SubscriptionsAPIFilter struct {
+ // All evaluates to true if all the nested expressions evaluate to true.
+ // It must contain at least one filter expression.
+ //
+ // +optional
+ All []SubscriptionsAPIFilter `json:"all,omitempty"`
+
+ // Any evaluates to true if at least one of the nested expressions evaluates
+ // to true. It must contain at least one filter expression.
+ //
+ // +optional
+ Any []SubscriptionsAPIFilter `json:"any,omitempty"`
+
+ // Not evaluates to true if the nested expression evaluates to false.
+ //
+ // +optional
+ Not *SubscriptionsAPIFilter `json:"not,omitempty"`
+
+ // Exact evaluates to true if the values of the matching CloudEvents attributes MUST
+ // all exactly match with the associated value String specified (case-sensitive).
+ // The keys are the names of the CloudEvents attributes to be matched,
+ // and their values are the String values to use in the comparison.
+ // The attribute name and value specified in the filter express MUST NOT be
+ // empty strings.
+ //
+ // +optional
+ Exact map[string]string `json:"exact,omitempty"`
+
+ // Prefix evaluates to true if the values of the matching CloudEvents attributes MUST
+ // all start with the associated value String specified (case sensitive).
+ // The keys are the names of the CloudEvents attributes to be matched,
+ // and their values are the String values to use in the comparison.
+ // The attribute name and value specified in the filter express MUST NOT be
+ // empty strings.
+ //
+ // +optional
+ Prefix map[string]string `json:"prefix,omitempty"`
+
+ // Suffix evaluates to true if the values of the matching CloudEvents attributes MUST
+ // all end with the associated value String specified (case sensitive).
+ // The keys are the names of the CloudEvents attributes to be matched,
+ // and their values are the String values to use in the comparison.
+ // The attribute name and value specified in the filter express MUST NOT be
+ // empty strings.
+ //
+ // +optional
+ Suffix map[string]string `json:"suffix,omitempty"`
+
+ // CESQL is a CloudEvents SQL expression that will be evaluated to true or false against each CloudEvent.
+ //
+ // +optional
+ CESQL string `json:"cesql,omitempty"`
+}
+
+// TriggerFilterAttributes is a map of context attribute names to values for
+// filtering by equality. Only exact matches will pass the filter. You can use
+// the value ” to indicate all strings match.
+type TriggerFilterAttributes map[string]string
+
+// TriggerStatus represents the current state of a Trigger.
+type TriggerStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Trigger that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+
+ // SubscriberURI is the resolved URI of the receiver for this Trigger.
+ // +optional
+ SubscriberURI *apis.URL `json:"subscriberUri,omitempty"`
+
+ // SubscriberCACerts is the Certification Authority (CA) certificates in PEM format
+ // according to https://www.rfc-editor.org/rfc/rfc7468 of the receiver for this Trigger.
+ // +optional
+ SubscriberCACerts *string `json:"subscriberCACerts,omitempty"`
+
+ // SubscriberAudience is the OIDC audience of the subscriber.
+ // +optional
+ SubscriberAudience *string `json:"subscriberAudience,omitempty"`
+
+ // DeliveryStatus contains a resolved URL to the dead letter sink address, and any other
+ // resolved delivery options.
+ eventingduckv1.DeliveryStatus `json:",inline"`
+
+ // Auth provides the relevant information for OIDC authentication.
+ // +optional
+ Auth *duckv1.AuthStatus `json:"auth,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// TriggerList is a collection of Triggers.
+type TriggerList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Trigger `json:"items"`
+}
+
+// GetStatus retrieves the status of the Trigger. Implements the KRShaped interface.
+func (t *Trigger) GetStatus() *duckv1.Status {
+ return &t.Status.Status
+}
+
+// GetCrossNamespaceRef returns the Broker reference for the Trigger. Implements the ResourceInfo interface.
+func (t *Trigger) GetCrossNamespaceRef() duckv1.KReference {
+ if t.Spec.BrokerRef != nil {
+ return *t.Spec.BrokerRef
+ }
+ return duckv1.KReference{}
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go
new file mode 100644
index 000000000..b0d8b2b24
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/trigger_validation.go
@@ -0,0 +1,293 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "regexp"
+
+ cesqlparser "github.com/cloudevents/sdk-go/sql/v2/parser"
+ "go.uber.org/zap"
+ corev1 "k8s.io/api/core/v1"
+ cn "knative.dev/eventing/pkg/crossnamespace"
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/kmp"
+ "knative.dev/pkg/logging"
+
+ "knative.dev/eventing/pkg/apis/feature"
+)
+
+var (
+ // Only allow lowercase alphanumeric, starting with letters.
+ validAttributeName = regexp.MustCompile(`^[a-z][a-z0-9]*$`)
+)
+
+// Validate the Trigger.
+func (t *Trigger) Validate(ctx context.Context) *apis.FieldError {
+ errs := t.Spec.Validate(apis.WithinSpec(ctx)).ViaField("spec")
+ errs = t.validateAnnotation(errs, DependencyAnnotation, t.validateDependencyAnnotation)
+ errs = t.validateAnnotation(errs, InjectionAnnotation, t.validateInjectionAnnotation)
+ if apis.IsInUpdate(ctx) {
+ original := apis.GetBaseline(ctx).(*Trigger)
+ errs = errs.Also(t.CheckImmutableFields(ctx, original))
+ }
+ if feature.FromContext(ctx).IsEnabled(feature.CrossNamespaceEventLinks) && t.Spec.BrokerRef != nil {
+ crossNamespaceError := cn.CheckNamespace(ctx, t)
+ if crossNamespaceError != nil {
+ errs = errs.Also(crossNamespaceError)
+ }
+ }
+ return errs
+}
+
+// Validate the TriggerSpec.
+func (ts *TriggerSpec) Validate(ctx context.Context) (errs *apis.FieldError) {
+ if ts.BrokerRef == nil && ts.Broker == "" {
+ errs = errs.Also(apis.ErrMissingField("broker"))
+ } else if ts.BrokerRef != nil && ts.Broker != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("broker", "brokerRef"))
+ }
+
+ if !feature.FromContext(ctx).IsEnabled(feature.CrossNamespaceEventLinks) && ts.BrokerRef != nil {
+ if ts.BrokerRef.Namespace != "" {
+ fe := apis.ErrDisallowedFields("namespace")
+ fe.Details = "only name, apiVersion and kind are supported fields when feature.CrossNamespaceEventLinks is disabled"
+ errs = errs.Also(fe)
+ }
+ }
+
+ return errs.Also(
+ ValidateAttributeFilters(ts.Filter).ViaField("filter"),
+ ).Also(
+ ValidateSubscriptionAPIFiltersList(ctx, ts.Filters).ViaField("filters"),
+ ).Also(
+ ts.Subscriber.Validate(ctx).ViaField("subscriber"),
+ ).Also(
+ ts.Delivery.Validate(ctx).ViaField("delivery"),
+ )
+}
+
+// CheckImmutableFields checks that any immutable fields were not changed.
+func (t *Trigger) CheckImmutableFields(ctx context.Context, original *Trigger) *apis.FieldError {
+ if original == nil {
+ return nil
+ }
+
+ if diff, err := kmp.ShortDiff(original.Spec.Broker, t.Spec.Broker); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff Trigger",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec", "broker"},
+ Details: diff,
+ }
+ }
+ return nil
+}
+
+func GetObjRefFromDependencyAnnotation(dependencyAnnotation string) (corev1.ObjectReference, error) {
+ var objectRef corev1.ObjectReference
+ if err := json.Unmarshal([]byte(dependencyAnnotation), &objectRef); err != nil {
+ return objectRef, err
+ }
+ return objectRef, nil
+}
+
+func (t *Trigger) validateAnnotation(errs *apis.FieldError, annotation string, function func(string) *apis.FieldError) *apis.FieldError {
+ if annotationValue, ok := t.GetAnnotations()[annotation]; ok {
+ annotationPrefix := fmt.Sprintf("metadata.annotations[%s]", annotation)
+ errs = errs.Also(function(annotationValue).ViaField(annotationPrefix))
+ }
+ return errs
+}
+
+func (t *Trigger) validateDependencyAnnotation(dependencyAnnotation string) *apis.FieldError {
+ depObjRef, err := GetObjRefFromDependencyAnnotation(dependencyAnnotation)
+ if err != nil {
+ return &apis.FieldError{
+ Message: fmt.Sprintf("The provided annotation was not a corev1.ObjectReference: %q", dependencyAnnotation),
+ Details: err.Error(),
+ Paths: []string{""},
+ }
+ }
+ var errs *apis.FieldError
+ if depObjRef.Namespace != "" && depObjRef.Namespace != t.GetNamespace() {
+ fe := &apis.FieldError{
+ Message: fmt.Sprintf("Namespace must be empty or equal to the trigger namespace %q", t.GetNamespace()),
+ Paths: []string{"namespace"},
+ }
+ errs = errs.Also(fe)
+ }
+ if depObjRef.Kind == "" {
+ fe := apis.ErrMissingField("kind")
+ errs = errs.Also(fe)
+ }
+ if depObjRef.Name == "" {
+ fe := apis.ErrMissingField("name")
+ errs = errs.Also(fe)
+ }
+ if depObjRef.APIVersion == "" {
+ fe := apis.ErrMissingField("apiVersion")
+ errs = errs.Also(fe)
+ }
+ return errs
+}
+
+func (t *Trigger) validateInjectionAnnotation(injectionAnnotation string) *apis.FieldError {
+ if injectionAnnotation != "enabled" && injectionAnnotation != "disabled" {
+ return &apis.FieldError{
+ Message: fmt.Sprintf(`The provided injection annotation value can only be "enabled" or "disabled", not %q`, injectionAnnotation),
+ Paths: []string{""},
+ }
+ }
+ if t.Spec.Broker != "default" {
+ return &apis.FieldError{
+ Message: fmt.Sprintf("The provided injection annotation is only used for default broker, but non-default broker specified here: %q", t.Spec.Broker),
+ Paths: []string{""},
+ }
+ }
+ return nil
+}
+
+func ValidateAttributeFilters(filter *TriggerFilter) (errs *apis.FieldError) {
+ if filter == nil {
+ return nil
+ }
+ return errs.Also(ValidateAttributesNames(filter.Attributes).ViaField("attributes"))
+}
+
+func ValidateAttributesNames(attrs map[string]string) (errs *apis.FieldError) {
+ for attr := range attrs {
+ if !validAttributeName.MatchString(attr) {
+ errs = errs.Also(apis.ErrInvalidKeyName(attr, apis.CurrentField, "Attribute name must start with a letter and can only contain lowercase alphanumeric").ViaKey(attr))
+ }
+ }
+ return errs
+}
+
+func ValidateSubscriptionAPIFiltersList(ctx context.Context, filters []SubscriptionsAPIFilter) (errs *apis.FieldError) {
+ if filters == nil {
+ return nil
+ }
+
+ for i, f := range filters {
+ f := f
+ errs = errs.Also(ValidateSubscriptionAPIFilter(ctx, &f)).ViaIndex(i)
+ }
+ return errs
+}
+
+func ValidateCESQLExpression(ctx context.Context, expression string) (errs *apis.FieldError) {
+ if expression == "" {
+ return nil
+ }
+ // Need to recover in case Parse panics
+ defer func() {
+ if r := recover(); r != nil {
+ logging.FromContext(ctx).Debug("Warning! Calling CESQL Parser panicked. Treating expression as invalid.", zap.Any("recovered value", r), zap.String("CESQL", expression))
+ errs = apis.ErrInvalidValue(expression, apis.CurrentField)
+ }
+ }()
+
+ if _, err := cesqlparser.Parse(expression); err != nil {
+ return apis.ErrInvalidValue(expression, apis.CurrentField, err.Error())
+ }
+ return nil
+}
+
+func ValidateSubscriptionAPIFilter(ctx context.Context, filter *SubscriptionsAPIFilter) (errs *apis.FieldError) {
+ if filter == nil {
+ return nil
+ }
+ errs = errs.Also(
+ ValidateOneOf(filter),
+ ).Also(
+ ValidateAttributesNames(filter.Exact).ViaField("exact"),
+ ).Also(
+ ValidateAttributesNames(filter.Prefix).ViaField("prefix"),
+ ).Also(
+ ValidateAttributesNames(filter.Suffix).ViaField("suffix"),
+ ).Also(
+ ValidateSubscriptionAPIFiltersList(ctx, filter.All).ViaField("all"),
+ ).Also(
+ ValidateSubscriptionAPIFiltersList(ctx, filter.Any).ViaField("any"),
+ ).Also(
+ ValidateSubscriptionAPIFilter(ctx, filter.Not).ViaField("not"),
+ ).Also(
+ ValidateCESQLExpression(ctx, filter.CESQL).ViaField("cesql"),
+ )
+ return errs
+}
+
+func ValidateOneOf(filter *SubscriptionsAPIFilter) (err *apis.FieldError) {
+ if filter != nil && hasMultipleDialects(filter) {
+ return apis.ErrGeneric("multiple dialects found, filters can have only one dialect set")
+ }
+ return nil
+}
+
+func hasMultipleDialects(filter *SubscriptionsAPIFilter) bool {
+ dialectFound := false
+ if len(filter.Exact) > 0 {
+ dialectFound = true
+ }
+ if len(filter.Prefix) > 0 {
+ if dialectFound {
+ return true
+ } else {
+ dialectFound = true
+ }
+ }
+ if len(filter.Suffix) > 0 {
+ if dialectFound {
+ return true
+ } else {
+ dialectFound = true
+ }
+ }
+ if len(filter.All) > 0 {
+ if dialectFound {
+ return true
+ } else {
+ dialectFound = true
+ }
+ }
+ if len(filter.Any) > 0 {
+ if dialectFound {
+ return true
+ } else {
+ dialectFound = true
+ }
+ }
+ if filter.Not != nil {
+ if dialectFound {
+ return true
+ } else {
+ dialectFound = true
+ }
+ }
+ if filter.CESQL != "" && dialectFound {
+ return true
+ }
+ return false
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..b873f4032
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1/zz_generated.deepcopy.go
@@ -0,0 +1,375 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apisduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ apis "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Broker) DeepCopyInto(out *Broker) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Broker.
+func (in *Broker) DeepCopy() *Broker {
+ if in == nil {
+ return nil
+ }
+ out := new(Broker)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Broker) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BrokerList) DeepCopyInto(out *BrokerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Broker, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerList.
+func (in *BrokerList) DeepCopy() *BrokerList {
+ if in == nil {
+ return nil
+ }
+ out := new(BrokerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *BrokerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BrokerSpec) DeepCopyInto(out *BrokerSpec) {
+ *out = *in
+ if in.Config != nil {
+ in, out := &in.Config, &out.Config
+ *out = new(duckv1.KReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Delivery != nil {
+ in, out := &in.Delivery, &out.Delivery
+ *out = new(apisduckv1.DeliverySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerSpec.
+func (in *BrokerSpec) DeepCopy() *BrokerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(BrokerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *BrokerStatus) DeepCopyInto(out *BrokerStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ in.AddressStatus.DeepCopyInto(&out.AddressStatus)
+ in.DeliveryStatus.DeepCopyInto(&out.DeliveryStatus)
+ in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new BrokerStatus.
+func (in *BrokerStatus) DeepCopy() *BrokerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(BrokerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionsAPIFilter) DeepCopyInto(out *SubscriptionsAPIFilter) {
+ *out = *in
+ if in.All != nil {
+ in, out := &in.All, &out.All
+ *out = make([]SubscriptionsAPIFilter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Any != nil {
+ in, out := &in.Any, &out.Any
+ *out = make([]SubscriptionsAPIFilter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.Not != nil {
+ in, out := &in.Not, &out.Not
+ *out = new(SubscriptionsAPIFilter)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Exact != nil {
+ in, out := &in.Exact, &out.Exact
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Prefix != nil {
+ in, out := &in.Prefix, &out.Prefix
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ if in.Suffix != nil {
+ in, out := &in.Suffix, &out.Suffix
+ *out = make(map[string]string, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionsAPIFilter.
+func (in *SubscriptionsAPIFilter) DeepCopy() *SubscriptionsAPIFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionsAPIFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Trigger) DeepCopyInto(out *Trigger) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Trigger.
+func (in *Trigger) DeepCopy() *Trigger {
+ if in == nil {
+ return nil
+ }
+ out := new(Trigger)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Trigger) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TriggerFilter) DeepCopyInto(out *TriggerFilter) {
+ *out = *in
+ if in.Attributes != nil {
+ in, out := &in.Attributes, &out.Attributes
+ *out = make(TriggerFilterAttributes, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerFilter.
+func (in *TriggerFilter) DeepCopy() *TriggerFilter {
+ if in == nil {
+ return nil
+ }
+ out := new(TriggerFilter)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in TriggerFilterAttributes) DeepCopyInto(out *TriggerFilterAttributes) {
+ {
+ in := &in
+ *out = make(TriggerFilterAttributes, len(*in))
+ for key, val := range *in {
+ (*out)[key] = val
+ }
+ return
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerFilterAttributes.
+func (in TriggerFilterAttributes) DeepCopy() TriggerFilterAttributes {
+ if in == nil {
+ return nil
+ }
+ out := new(TriggerFilterAttributes)
+ in.DeepCopyInto(out)
+ return *out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TriggerList) DeepCopyInto(out *TriggerList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Trigger, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerList.
+func (in *TriggerList) DeepCopy() *TriggerList {
+ if in == nil {
+ return nil
+ }
+ out := new(TriggerList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *TriggerList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TriggerSpec) DeepCopyInto(out *TriggerSpec) {
+ *out = *in
+ if in.BrokerRef != nil {
+ in, out := &in.BrokerRef, &out.BrokerRef
+ *out = new(duckv1.KReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Filter != nil {
+ in, out := &in.Filter, &out.Filter
+ *out = new(TriggerFilter)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]SubscriptionsAPIFilter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Subscriber.DeepCopyInto(&out.Subscriber)
+ if in.Delivery != nil {
+ in, out := &in.Delivery, &out.Delivery
+ *out = new(apisduckv1.DeliverySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerSpec.
+func (in *TriggerSpec) DeepCopy() *TriggerSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(TriggerSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TriggerStatus) DeepCopyInto(out *TriggerStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ if in.SubscriberURI != nil {
+ in, out := &in.SubscriberURI, &out.SubscriberURI
+ *out = new(apis.URL)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SubscriberCACerts != nil {
+ in, out := &in.SubscriberCACerts, &out.SubscriberCACerts
+ *out = new(string)
+ **out = **in
+ }
+ if in.SubscriberAudience != nil {
+ in, out := &in.SubscriberAudience, &out.SubscriberAudience
+ *out = new(string)
+ **out = **in
+ }
+ in.DeliveryStatus.DeepCopyInto(&out.DeliveryStatus)
+ if in.Auth != nil {
+ in, out := &in.Auth, &out.Auth
+ *out = new(duckv1.AuthStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TriggerStatus.
+func (in *TriggerStatus) DeepCopy() *TriggerStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(TriggerStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle.go
index 30c8575ea..a74431f0d 100644
--- a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle.go
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/eventpolicy_lifecycle.go
@@ -1,5 +1,5 @@
/*
-Copyright 2020 The Knative Authors
+Copyright 2024 The Knative Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -20,10 +20,12 @@ import (
"knative.dev/pkg/apis"
)
-var eventPolicyCondSet = apis.NewLivingConditionSet()
+var eventPolicyCondSet = apis.NewLivingConditionSet(EventPolicyConditionAuthenticationEnabled, EventPolicyConditionSubjectsResolved)
const (
- EventPolicyConditionReady = apis.ConditionReady
+ EventPolicyConditionReady = apis.ConditionReady
+ EventPolicyConditionAuthenticationEnabled apis.ConditionType = "AuthenticationEnabled"
+ EventPolicyConditionSubjectsResolved apis.ConditionType = "SubjectsResolved"
)
// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
@@ -32,21 +34,41 @@ func (*EventPolicy) GetConditionSet() apis.ConditionSet {
}
// GetCondition returns the condition currently associated with the given type, or nil.
-func (et *EventPolicyStatus) GetCondition(t apis.ConditionType) *apis.Condition {
- return eventPolicyCondSet.Manage(et).GetCondition(t)
+func (ep *EventPolicyStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return eventPolicyCondSet.Manage(ep).GetCondition(t)
}
// IsReady returns true if the resource is ready overall.
-func (et *EventPolicyStatus) IsReady() bool {
- return et.GetTopLevelCondition().IsTrue()
+func (ep *EventPolicyStatus) IsReady() bool {
+ return ep.GetTopLevelCondition().IsTrue()
}
// GetTopLevelCondition returns the top level Condition.
-func (et *EventPolicyStatus) GetTopLevelCondition() *apis.Condition {
- return eventPolicyCondSet.Manage(et).GetTopLevelCondition()
+func (ep *EventPolicyStatus) GetTopLevelCondition() *apis.Condition {
+ return eventPolicyCondSet.Manage(ep).GetTopLevelCondition()
}
// InitializeConditions sets relevant unset conditions to Unknown state.
-func (et *EventPolicyStatus) InitializeConditions() {
- eventPolicyCondSet.Manage(et).InitializeConditions()
+func (ep *EventPolicyStatus) InitializeConditions() {
+ eventPolicyCondSet.Manage(ep).InitializeConditions()
+}
+
+// MarkOIDCAuthenticationEnabled sets EventPolicyConditionAuthenticationEnabled condition to true.
+func (ep *EventPolicyStatus) MarkOIDCAuthenticationEnabled() {
+ eventPolicyCondSet.Manage(ep).MarkTrue(EventPolicyConditionAuthenticationEnabled)
+}
+
+// MarkOIDCAuthenticationDisabled sets EventPolicyConditionAuthenticationEnabled condition to false.
+func (ep *EventPolicyStatus) MarkOIDCAuthenticationDisabled(reason, messageFormat string, messageA ...interface{}) {
+ eventPolicyCondSet.Manage(ep).MarkFalse(EventPolicyConditionAuthenticationEnabled, reason, messageFormat, messageA...)
+}
+
+// MarkSubjectsResolved sets EventPolicyConditionSubjectsResolved condition to true.
+func (ep *EventPolicyStatus) MarkSubjectsResolvedSucceeded() {
+ eventPolicyCondSet.Manage(ep).MarkTrue(EventPolicyConditionSubjectsResolved)
+}
+
+// MarkSubjectsNotResolved sets EventPolicyConditionSubjectsResolved condition to false.
+func (ep *EventPolicyStatus) MarkSubjectsResolvedFailed(reason, messageFormat string, messageA ...interface{}) {
+ eventPolicyCondSet.Manage(ep).MarkFalse(EventPolicyConditionSubjectsResolved, reason, messageFormat, messageA...)
}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
index 0c267b319..5f05c240d 100644
--- a/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1alpha1/eventpolicy_validation.go
@@ -20,10 +20,18 @@ import (
"context"
"strings"
+ "knative.dev/eventing/pkg/apis/feature"
"knative.dev/pkg/apis"
)
func (ep *EventPolicy) Validate(ctx context.Context) *apis.FieldError {
+ // To not allow creation or spec updates of EventPolicy CRs
+ // if the oidc-authentication feature is not enabled
+ if apis.IsInCreate(ctx) || (apis.IsInUpdate(ctx) && apis.IsInSpec(ctx)) {
+ if !feature.FromContext(ctx).IsOIDCAuthentication() {
+ return apis.ErrGeneric("oidc-authentication feature not enabled")
+ }
+ }
return ep.Spec.Validate(ctx).ViaField("spec")
}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/doc.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/doc.go
new file mode 100644
index 000000000..32587778d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1beta1 is the v1beta1 version of the API.
+// +k8s:deepcopy-gen=package
+// +groupName=eventing.knative.dev
+package v1beta1
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_conversion.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_conversion.go
new file mode 100644
index 000000000..28ef3055d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_conversion.go
@@ -0,0 +1,88 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "context"
+
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+
+ "knative.dev/eventing/pkg/apis/eventing/v1beta2"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *EventType) ConvertTo(ctx context.Context, obj apis.Convertible) error {
+ switch sink := obj.(type) {
+ case *v1beta2.EventType:
+ sink.ObjectMeta = source.ObjectMeta
+ sink.Status = v1beta2.EventTypeStatus{
+ Status: source.Status.Status,
+ }
+ sink.Spec = v1beta2.EventTypeSpec{
+ Type: source.Spec.Type,
+ Source: source.Spec.Source,
+ Schema: source.Spec.Schema,
+ SchemaData: source.Spec.SchemaData,
+ Description: source.Spec.Description,
+ }
+
+ // for old stuff, we play nice here
+ // default to broker, but as a reference
+ if source.Spec.Reference == nil && source.Spec.Broker != "" {
+ sink.Spec.Reference = &duckv1.KReference{
+ APIVersion: "eventing.knative.dev/v1",
+ Kind: "Broker",
+ Name: source.Spec.Broker,
+ }
+ }
+
+ // if we have a reference, use it
+ if source.Spec.Reference != nil {
+ sink.Spec.Reference = source.Spec.Reference
+ }
+
+ return nil
+ default:
+ return apis.ConvertToViaProxy(ctx, source, &v1beta2.EventType{}, sink)
+ }
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *EventType) ConvertFrom(ctx context.Context, obj apis.Convertible) error {
+ switch source := obj.(type) {
+ case *v1beta2.EventType:
+ sink.ObjectMeta = source.ObjectMeta
+ sink.Status = EventTypeStatus{
+ Status: source.Status.Status,
+ }
+
+ sink.Spec = EventTypeSpec{
+ Type: source.Spec.Type,
+ Source: source.Spec.Source,
+ Schema: source.Spec.Schema,
+ SchemaData: source.Spec.SchemaData,
+ Reference: source.Spec.Reference,
+ Description: source.Spec.Description,
+ }
+
+ return nil
+ default:
+ return apis.ConvertFromViaProxy(ctx, source, &v1beta2.EventType{}, sink)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_defaults.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_defaults.go
new file mode 100644
index 000000000..06c0d794b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_defaults.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+func (et *EventType) SetDefaults(ctx context.Context) {
+ ctx = apis.WithinParent(ctx, et.ObjectMeta)
+ et.Spec.SetDefaults(ctx)
+}
+
+func (ets *EventTypeSpec) SetDefaults(ctx context.Context) {
+ if ets.Reference != nil {
+ ets.Reference.SetDefaults(ctx)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_lifecycle.go
new file mode 100644
index 000000000..139487e19
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_lifecycle.go
@@ -0,0 +1,104 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+
+ eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ "knative.dev/pkg/apis"
+)
+
+var eventTypeCondSet = apis.NewLivingConditionSet(EventTypeConditionBrokerExists, EventTypeConditionBrokerReady)
+
+const (
+ EventTypeConditionReady = apis.ConditionReady
+ EventTypeConditionBrokerExists apis.ConditionType = "BrokerExists"
+ EventTypeConditionBrokerReady apis.ConditionType = "BrokerReady"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*EventType) GetConditionSet() apis.ConditionSet {
+ return eventTypeCondSet
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (et *EventTypeStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return eventTypeCondSet.Manage(et).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (et *EventTypeStatus) IsReady() bool {
+ return eventTypeCondSet.Manage(et).IsHappy()
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (et *EventTypeStatus) GetTopLevelCondition() *apis.Condition {
+ return eventTypeCondSet.Manage(et).GetTopLevelCondition()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (et *EventTypeStatus) InitializeConditions() {
+ eventTypeCondSet.Manage(et).InitializeConditions()
+}
+
+func (et *EventTypeStatus) MarkBrokerExists() {
+ eventTypeCondSet.Manage(et).MarkTrue(EventTypeConditionBrokerExists)
+}
+
+func (et *EventTypeStatus) MarkBrokerDoesNotExist() {
+ eventTypeCondSet.Manage(et).MarkFalse(EventTypeConditionBrokerExists, "BrokerDoesNotExist", "Broker does not exist")
+}
+
+func (et *EventTypeStatus) MarkBrokerExistsUnknown(reason, messageFormat string, messageA ...interface{}) {
+ eventTypeCondSet.Manage(et).MarkUnknown(EventTypeConditionBrokerExists, reason, messageFormat, messageA...)
+}
+
+func (et *EventTypeStatus) MarkBrokerReady() {
+ eventTypeCondSet.Manage(et).MarkTrue(EventTypeConditionBrokerReady)
+}
+
+func (et *EventTypeStatus) MarkBrokerFailed(reason, messageFormat string, messageA ...interface{}) {
+ eventTypeCondSet.Manage(et).MarkFalse(EventTypeConditionBrokerReady, reason, messageFormat, messageA...)
+}
+
+func (et *EventTypeStatus) MarkBrokerUnknown(reason, messageFormat string, messageA ...interface{}) {
+ eventTypeCondSet.Manage(et).MarkUnknown(EventTypeConditionBrokerReady, reason, messageFormat, messageA...)
+}
+
+func (et *EventTypeStatus) MarkBrokerNotConfigured() {
+ eventTypeCondSet.Manage(et).MarkUnknown(EventTypeConditionBrokerReady,
+ "BrokerNotConfigured", "Broker has not yet been reconciled.")
+}
+
+func (et *EventTypeStatus) PropagateBrokerStatus(bs *eventingv1.BrokerStatus) {
+ bc := bs.GetConditionSet().Manage(bs).GetTopLevelCondition()
+ if bc == nil {
+ et.MarkBrokerNotConfigured()
+ return
+ }
+ switch {
+ case bc.Status == corev1.ConditionUnknown:
+ et.MarkBrokerUnknown(bc.Reason, bc.Message)
+ case bc.Status == corev1.ConditionTrue:
+ eventTypeCondSet.Manage(et).MarkTrue(EventTypeConditionBrokerReady)
+ case bc.Status == corev1.ConditionFalse:
+ et.MarkBrokerFailed(bc.Reason, bc.Message)
+ default:
+ et.MarkBrokerUnknown("BrokerUnknown", "The status of Broker is invalid: %v", bc.Status)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_types.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_types.go
new file mode 100644
index 000000000..95c4439e0
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_types.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventType represents a type of event that can be consumed from a Broker.
+// Deprecated: use v1beta2.EventType instead.
+type EventType struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the EventType.
+ Spec EventTypeSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the EventType.
+ // This data may be out of date.
+ // +optional
+ Status EventTypeStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that EventType can be validated, can be defaulted, and has immutable fields.
+ _ apis.Validatable = (*EventType)(nil)
+ _ apis.Defaultable = (*EventType)(nil)
+
+ // Check that EventType can return its spec untyped.
+ _ apis.HasSpec = (*EventType)(nil)
+
+ _ runtime.Object = (*EventType)(nil)
+
+ // Check that we can create OwnerReferences to an EventType.
+ _ kmeta.OwnerRefable = (*EventType)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*EventType)(nil)
+)
+
+type EventTypeSpec struct {
+ // Type represents the CloudEvents type. It is authoritative.
+ Type string `json:"type"`
+ // Source is a URI, it represents the CloudEvents source.
+ // +optional
+ Source *apis.URL `json:"source,omitempty"`
+ // Schema is a URI, it represents the CloudEvents schemaurl extension attribute.
+ // It may be a JSON schema, a protobuf schema, etc. It is optional.
+ // +optional
+ Schema *apis.URL `json:"schema,omitempty"`
+ // SchemaData allows the CloudEvents schema to be stored directly in the
+ // EventType. Content is dependent on the encoding. Optional attribute.
+ // The contents are not validated or manipulated by the system.
+ // +optional
+ SchemaData string `json:"schemaData,omitempty"`
+ // Broker refers to the Broker that can provide the EventType.
+ // +optional
+ Broker string `json:"broker,omitempty"`
+ // Reference is a KReference to the belonging addressable.
+ // For example, this could be a pointer to a Broker.
+ // +optional
+ Reference *duckv1.KReference `json:"reference,omitempty"`
+ // Description is an optional field used to describe the EventType, in any meaningful way.
+ // +optional
+ Description string `json:"description,omitempty"`
+}
+
+// EventTypeStatus represents the current state of a EventType.
+type EventTypeStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventTypeList is a collection of EventTypes.
+type EventTypeList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []EventType `json:"items"`
+}
+
+// GetGroupVersionKind returns GroupVersionKind for EventType
+func (p *EventType) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("EventType")
+}
+
+// GetUntypedSpec returns the spec of the EventType.
+func (e *EventType) GetUntypedSpec() interface{} {
+ return e.Spec
+}
+
+// GetStatus retrieves the status of the EventType. Implements the KRShaped interface.
+func (t *EventType) GetStatus() *duckv1.Status {
+ return &t.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_validation.go
new file mode 100644
index 000000000..3688bc899
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/eventtype_validation.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/kmp"
+)
+
+func (et *EventType) Validate(ctx context.Context) *apis.FieldError {
+ return et.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (ets *EventTypeSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+ if ets.Type == "" {
+ fe := apis.ErrMissingField("type")
+ errs = errs.Also(fe)
+ }
+ // TODO validate Source is a valid URI.
+ // TODO validate Schema is a valid URI.
+ // There is no validation of the SchemaData, it is application specific data.
+ return errs
+}
+
+func (et *EventType) CheckImmutableFields(ctx context.Context, original *EventType) *apis.FieldError {
+ if original == nil {
+ return nil
+ }
+
+ // All fields are immutable.
+ if diff, err := kmp.ShortDiff(original.Spec, et.Spec); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff EventType",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: diff,
+ }
+ }
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/register.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/register.go
new file mode 100644
index 000000000..7e45875a8
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta1
+
+import (
+ "knative.dev/eventing/pkg/apis/eventing"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: eventing.GroupName, Version: "v1beta1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &EventType{},
+ &EventTypeList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..381fbe4f5
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta1/zz_generated.deepcopy.go
@@ -0,0 +1,137 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apis "knative.dev/pkg/apis"
+ v1 "knative.dev/pkg/apis/duck/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventType) DeepCopyInto(out *EventType) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventType.
+func (in *EventType) DeepCopy() *EventType {
+ if in == nil {
+ return nil
+ }
+ out := new(EventType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventType) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeList) DeepCopyInto(out *EventTypeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EventType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeList.
+func (in *EventTypeList) DeepCopy() *EventTypeList {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventTypeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeSpec) DeepCopyInto(out *EventTypeSpec) {
+ *out = *in
+ if in.Source != nil {
+ in, out := &in.Source, &out.Source
+ *out = new(apis.URL)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Schema != nil {
+ in, out := &in.Schema, &out.Schema
+ *out = new(apis.URL)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Reference != nil {
+ in, out := &in.Reference, &out.Reference
+ *out = new(v1.KReference)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeSpec.
+func (in *EventTypeSpec) DeepCopy() *EventTypeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeStatus) DeepCopyInto(out *EventTypeStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeStatus.
+func (in *EventTypeStatus) DeepCopy() *EventTypeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/doc.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/doc.go
new file mode 100644
index 000000000..115faf55b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1beta2 is the v1beta2 version of the API.
+// +k8s:deepcopy-gen=package
+// +groupName=eventing.knative.dev
+package v1beta2
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_conversion.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_conversion.go
new file mode 100644
index 000000000..c7569fa82
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_conversion.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+
+ eventing "knative.dev/eventing/pkg/apis/eventing/v1"
+ "knative.dev/eventing/pkg/apis/eventing/v1beta3"
+)
+
+// ConvertTo converts the receiver into `to`.
+func (source *EventType) ConvertTo(ctx context.Context, to apis.Convertible) error {
+ switch sink := to.(type) {
+ case *v1beta3.EventType:
+
+ source.ObjectMeta.DeepCopyInto(&sink.ObjectMeta)
+ source.Status.Status.DeepCopyInto(&sink.Status.Status)
+
+ sink.Spec.Reference = source.Spec.Reference.DeepCopy()
+ sink.Spec.Description = source.Spec.Description
+
+ if source.Spec.Reference == nil && source.Spec.Broker != "" {
+ source.Spec.Reference = &duckv1.KReference{
+ Kind: "Broker",
+ Name: source.Spec.Broker,
+ APIVersion: eventing.SchemeGroupVersion.String(),
+ }
+ }
+
+ sink.Spec.Attributes = []v1beta3.EventAttributeDefinition{
+ {
+ Name: "specversion",
+ Required: true,
+ },
+ {
+ Name: "id",
+ Required: true,
+ },
+ }
+ // set all required attributes for the v1beta3 resource. if there is no value that makes sense, leave that empty
+ if source.Spec.Type != "" {
+ sink.Spec.Attributes = append(sink.Spec.Attributes, v1beta3.EventAttributeDefinition{
+ Name: "type",
+ Required: true,
+ Value: source.Spec.Type,
+ })
+ } else {
+ sink.Spec.Attributes = append(sink.Spec.Attributes, v1beta3.EventAttributeDefinition{
+ Name: "type",
+ Required: true,
+ })
+ }
+ if source.Spec.Source != nil {
+ sink.Spec.Attributes = append(sink.Spec.Attributes, v1beta3.EventAttributeDefinition{
+ Name: "source",
+ Required: true,
+ Value: source.Spec.Source.String(),
+ })
+ } else {
+ sink.Spec.Attributes = append(sink.Spec.Attributes, v1beta3.EventAttributeDefinition{
+ Name: "source",
+ Required: true,
+ })
+ }
+
+ // convert the schema so that we don't lose it in the conversion.
+ if source.Spec.Schema != nil {
+ sink.Spec.Attributes = append(sink.Spec.Attributes, v1beta3.EventAttributeDefinition{
+ Name: "schemadata",
+ Required: false,
+ Value: source.Spec.Schema.String(),
+ })
+ }
+ return nil
+ default:
+ return apis.ConvertToViaProxy(ctx, source, &v1beta3.EventType{}, to)
+ }
+
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *EventType) ConvertFrom(ctx context.Context, from apis.Convertible) error {
+ switch source := from.(type) {
+ case *v1beta3.EventType:
+
+ source.ObjectMeta.DeepCopyInto(&sink.ObjectMeta)
+ source.Status.Status.DeepCopyInto(&sink.Status.Status)
+
+ sink.Spec.Reference = source.Spec.Reference.DeepCopy()
+ sink.Spec.Description = source.Spec.Description
+
+ for _, at := range source.Spec.Attributes {
+ switch at.Name {
+ case "source":
+ sink.Spec.Source, _ = apis.ParseURL(at.Value)
+ case "type":
+ sink.Spec.Type = at.Value
+ case "schemadata":
+ sink.Spec.Schema, _ = apis.ParseURL(at.Value)
+ }
+ }
+
+ return nil
+ default:
+ return apis.ConvertFromViaProxy(ctx, from, &v1beta3.EventType{}, sink)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_defaults.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_defaults.go
new file mode 100644
index 000000000..217cc5158
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_defaults.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+func (et *EventType) SetDefaults(ctx context.Context) {
+ ctx = apis.WithinParent(ctx, et.ObjectMeta)
+ et.Spec.SetDefaults(ctx)
+}
+
+func (ets *EventTypeSpec) SetDefaults(ctx context.Context) {
+ if ets.Reference != nil {
+ ets.Reference.SetDefaults(ctx)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_lifecycle.go
new file mode 100644
index 000000000..300114a8c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_lifecycle.go
@@ -0,0 +1,65 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "knative.dev/pkg/apis"
+)
+
+var eventTypeCondSet = apis.NewLivingConditionSet(EventTypeConditionReferenceExists)
+
+const (
+ EventTypeConditionReady = apis.ConditionReady
+ EventTypeConditionReferenceExists apis.ConditionType = "ReferenceExists"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*EventType) GetConditionSet() apis.ConditionSet {
+ return eventTypeCondSet
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (et *EventTypeStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return eventTypeCondSet.Manage(et).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (et *EventTypeStatus) IsReady() bool {
+ return eventTypeCondSet.Manage(et).IsHappy()
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (et *EventTypeStatus) GetTopLevelCondition() *apis.Condition {
+ return eventTypeCondSet.Manage(et).GetTopLevelCondition()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (et *EventTypeStatus) InitializeConditions() {
+ eventTypeCondSet.Manage(et).InitializeConditions()
+}
+
+func (et *EventTypeStatus) MarkReferenceExists() {
+ eventTypeCondSet.Manage(et).MarkTrue(EventTypeConditionReferenceExists)
+}
+
+func (et *EventTypeStatus) MarkReferenceDoesNotExist() {
+ eventTypeCondSet.Manage(et).MarkFalse(EventTypeConditionReferenceExists, "ResourceDoesNotExist", "Resource in spec.reference does not exist")
+}
+
+func (et *EventTypeStatus) MarkReferenceExistsUnknown(reason, messageFormat string, messageA ...interface{}) {
+ eventTypeCondSet.Manage(et).MarkUnknown(EventTypeConditionReferenceExists, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_types.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_types.go
new file mode 100644
index 000000000..8356aa358
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_types.go
@@ -0,0 +1,123 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventType represents a type of event that can be consumed from a Broker.
+type EventType struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the EventType.
+ Spec EventTypeSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the EventType.
+ // This data may be out of date.
+ // +optional
+ Status EventTypeStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that EventType can be validated, can be defaulted, and has immutable fields.
+ _ apis.Validatable = (*EventType)(nil)
+ _ apis.Defaultable = (*EventType)(nil)
+
+ // Check that EventType can return its spec untyped.
+ _ apis.HasSpec = (*EventType)(nil)
+
+ _ runtime.Object = (*EventType)(nil)
+
+ // Check that we can create OwnerReferences to an EventType.
+ _ kmeta.OwnerRefable = (*EventType)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*EventType)(nil)
+)
+
+type EventTypeSpec struct {
+ // Type represents the CloudEvents type. It is authoritative.
+ Type string `json:"type"`
+ // Source is a URI, it represents the CloudEvents source.
+ // +optional
+ Source *apis.URL `json:"source,omitempty"`
+ // Schema is a URI, it represents the CloudEvents schemaurl extension attribute.
+ // It may be a JSON schema, a protobuf schema, etc. It is optional.
+ // +optional
+ Schema *apis.URL `json:"schema,omitempty"`
+ // SchemaData allows the CloudEvents schema to be stored directly in the
+ // EventType. Content is dependent on the encoding. Optional attribute.
+ // The contents are not validated or manipulated by the system.
+ // +optional
+ SchemaData string `json:"schemaData,omitempty"`
+ // Broker refers to the Broker that can provide the EventType.
+ // Deprecated: This field is deprecated and will be removed in a future release.
+ // +optional
+ Broker string `json:"broker,omitempty"`
+ // Reference is a KReference to the belonging addressable.
+ //For example, this could be a pointer to a Broker.
+ // +optional
+ Reference *duckv1.KReference `json:"reference,omitempty"`
+ // Description is an optional field used to describe the EventType, in any meaningful way.
+ // +optional
+ Description string `json:"description,omitempty"`
+}
+
+// EventTypeStatus represents the current state of a EventType.
+type EventTypeStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventTypeList is a collection of EventTypes.
+type EventTypeList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []EventType `json:"items"`
+}
+
+// GetGroupVersionKind returns GroupVersionKind for EventType
+func (p *EventType) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("EventType")
+}
+
+// GetUntypedSpec returns the spec of the EventType.
+func (e *EventType) GetUntypedSpec() interface{} {
+ return e.Spec
+}
+
+// GetStatus retrieves the status of the EventType. Implements the KRShaped interface.
+func (t *EventType) GetStatus() *duckv1.Status {
+ return &t.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_validation.go
new file mode 100644
index 000000000..5e610aad9
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/eventtype_validation.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/kmp"
+)
+
+func (et *EventType) Validate(ctx context.Context) *apis.FieldError {
+ return et.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (ets *EventTypeSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+ if ets.Type == "" {
+ fe := apis.ErrMissingField("type")
+ errs = errs.Also(fe)
+ }
+ // TODO validate Source is a valid URI.
+ // TODO validate Schema is a valid URI.
+ // There is no validation of the SchemaData, it is application specific data.
+ return errs
+}
+
+func (et *EventType) CheckImmutableFields(ctx context.Context, original *EventType) *apis.FieldError {
+ if original == nil {
+ return nil
+ }
+
+ // All fields are immutable.
+ if diff, err := kmp.ShortDiff(original.Spec, et.Spec); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff EventType",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: diff,
+ }
+ }
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/register.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/register.go
new file mode 100644
index 000000000..ef8f01caa
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "knative.dev/eventing/pkg/apis/eventing"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: eventing.GroupName, Version: "v1beta2"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &EventType{},
+ &EventTypeList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/zz_generated.deepcopy.go
new file mode 100644
index 000000000..c3e2eb977
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta2/zz_generated.deepcopy.go
@@ -0,0 +1,137 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apis "knative.dev/pkg/apis"
+ v1 "knative.dev/pkg/apis/duck/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventType) DeepCopyInto(out *EventType) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventType.
+func (in *EventType) DeepCopy() *EventType {
+ if in == nil {
+ return nil
+ }
+ out := new(EventType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventType) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeList) DeepCopyInto(out *EventTypeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EventType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeList.
+func (in *EventTypeList) DeepCopy() *EventTypeList {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventTypeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeSpec) DeepCopyInto(out *EventTypeSpec) {
+ *out = *in
+ if in.Source != nil {
+ in, out := &in.Source, &out.Source
+ *out = new(apis.URL)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Schema != nil {
+ in, out := &in.Schema, &out.Schema
+ *out = new(apis.URL)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Reference != nil {
+ in, out := &in.Reference, &out.Reference
+ *out = new(v1.KReference)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeSpec.
+func (in *EventTypeSpec) DeepCopy() *EventTypeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeStatus) DeepCopyInto(out *EventTypeStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeStatus.
+func (in *EventTypeStatus) DeepCopy() *EventTypeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/doc.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/doc.go
new file mode 100644
index 000000000..6636cd709
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2023 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1beta3 is the v1beta3 version of the API.
+// +k8s:deepcopy-gen=package
+// +groupName=eventing.knative.dev
+package v1beta3
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_conversion.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_conversion.go
new file mode 100644
index 000000000..195cc165a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2023 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta3
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *EventType) ConvertTo(ctx context.Context, to apis.Convertible) error {
+ return fmt.Errorf("v1beta3 is the highest known version, got: %T", to)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *EventType) ConvertFrom(ctx context.Context, from apis.Convertible) error {
+ return fmt.Errorf("v1beta3 is the highest known version, got: %T", from)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_defaults.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_defaults.go
new file mode 100644
index 000000000..4c894901f
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_defaults.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2023 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta3
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+func (et *EventType) SetDefaults(ctx context.Context) {
+ ctx = apis.WithinParent(ctx, et.ObjectMeta)
+ et.Spec.SetDefaults(ctx)
+}
+
+func (ets *EventTypeSpec) SetDefaults(ctx context.Context) {
+ if ets.Reference != nil {
+ ets.Reference.SetDefaults(ctx)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_lifecycle.go
new file mode 100644
index 000000000..35d994b44
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_lifecycle.go
@@ -0,0 +1,69 @@
+/*
+Copyright 2023 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta3
+
+import (
+ "knative.dev/pkg/apis"
+)
+
+var eventTypeCondSet = apis.NewLivingConditionSet(EventTypeConditionReferenceExists)
+
+const (
+ EventTypeConditionReady = apis.ConditionReady
+ EventTypeConditionReferenceExists apis.ConditionType = "ReferenceExists"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*EventType) GetConditionSet() apis.ConditionSet {
+ return eventTypeCondSet
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (et *EventTypeStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return eventTypeCondSet.Manage(et).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (et *EventTypeStatus) IsReady() bool {
+ return eventTypeCondSet.Manage(et).IsHappy()
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (et *EventTypeStatus) GetTopLevelCondition() *apis.Condition {
+ return eventTypeCondSet.Manage(et).GetTopLevelCondition()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (et *EventTypeStatus) InitializeConditions() {
+ eventTypeCondSet.Manage(et).InitializeConditions()
+}
+
+func (et *EventTypeStatus) MarkReferenceExists() {
+ eventTypeCondSet.Manage(et).MarkTrue(EventTypeConditionReferenceExists)
+}
+
+func (et *EventTypeStatus) MarkReferenceDoesNotExist() {
+ eventTypeCondSet.Manage(et).MarkFalse(EventTypeConditionReferenceExists, "ResourceDoesNotExist", "Resource in spec.reference does not exist")
+}
+
+func (et *EventTypeStatus) MarkReferenceNotSet() {
+ eventTypeCondSet.Manage(et).MarkTrueWithReason(EventTypeConditionReferenceExists, "ReferenceNotSet", "spec.reference is not set")
+}
+
+func (et *EventTypeStatus) MarkReferenceExistsUnknown(reason, messageFormat string, messageA ...interface{}) {
+ eventTypeCondSet.Manage(et).MarkUnknown(EventTypeConditionReferenceExists, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_types.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_types.go
new file mode 100644
index 000000000..2ecc74586
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_types.go
@@ -0,0 +1,120 @@
+/*
+Copyright 2023 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta3
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventType represents a type of event that can be consumed from a Broker.
+type EventType struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the EventType.
+ Spec EventTypeSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the EventType.
+ // This data may be out of date.
+ // +optional
+ Status EventTypeStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that EventType can be validated, can be defaulted, and has immutable fields.
+ _ apis.Validatable = (*EventType)(nil)
+ _ apis.Defaultable = (*EventType)(nil)
+
+ // Check that EventType can return its spec untyped.
+ _ apis.HasSpec = (*EventType)(nil)
+
+ _ runtime.Object = (*EventType)(nil)
+
+ // Check that we can create OwnerReferences to an EventType.
+ _ kmeta.OwnerRefable = (*EventType)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*EventType)(nil)
+)
+
+type EventTypeSpec struct {
+ // Reference is a KReference to the belonging addressable.
+ //For example, this could be a pointer to a Broker.
+ // +optional
+ Reference *duckv1.KReference `json:"reference,omitempty"`
+ // Description is an optional field used to describe the EventType, in any meaningful way.
+ // +optional
+ Description string `json:"description,omitempty"`
+ // Attributes is an array of CloudEvent attributes and extension attributes.
+ Attributes []EventAttributeDefinition `json:"attributes"`
+}
+
+type EventAttributeDefinition struct {
+ // Name is the name of the CloudEvents attribute.
+ Name string `json:"name"`
+ // Required determines whether this attribute must be set on corresponding CloudEvents.
+ Required bool `json:"required"`
+ // Value is a string representing the allowable values for the EventType attribute.
+ // It may be a single value such as "/apis/v1/namespaces/default/pingsource/ps", or it could be a template
+ // for the allowed values, such as "/apis/v1/namespaces/{namespace}/pingsource/{sourceName}.
+ // To specify a section of the string value which may change between different CloudEvents
+ // you can use curly brackets {} and optionally a variable name between them.
+ Value string `json:"value,omitempty"`
+}
+
+// EventTypeStatus represents the current state of a EventType.
+type EventTypeStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// EventTypeList is a collection of EventTypes.
+type EventTypeList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []EventType `json:"items"`
+}
+
+// GetGroupVersionKind returns GroupVersionKind for EventType
+func (p *EventType) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("EventType")
+}
+
+// GetUntypedSpec returns the spec of the EventType.
+func (e *EventType) GetUntypedSpec() interface{} {
+ return e.Spec
+}
+
+// GetStatus retrieves the status of the EventType. Implements the KRShaped interface.
+func (t *EventType) GetStatus() *duckv1.Status {
+ return &t.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_validation.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_validation.go
new file mode 100644
index 000000000..75590bcae
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/eventtype_validation.go
@@ -0,0 +1,85 @@
+/*
+Copyright 2023 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta3
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/kmp"
+)
+
+func (et *EventType) Validate(ctx context.Context) *apis.FieldError {
+ return et.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (ets *EventTypeSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+ // TODO: validate attribute with name=source is a valid URI
+ // TODO: validate attribute with name=schema is a valid URI
+ errs = errs.Also(ets.ValidateAttributes().ViaField("attributes"))
+ return errs
+}
+
+func (et *EventType) CheckImmutableFields(ctx context.Context, original *EventType) *apis.FieldError {
+ if original == nil {
+ return nil
+ }
+
+ // All fields are immutable.
+ if diff, err := kmp.ShortDiff(original.Spec, et.Spec); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff EventType",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: diff,
+ }
+ }
+ return nil
+}
+
+func (ets *EventTypeSpec) ValidateAttributes() *apis.FieldError {
+ attributes := make(map[string]EventAttributeDefinition, len(ets.Attributes))
+ for _, attr := range ets.Attributes {
+ attributes[attr.Name] = attr
+ }
+
+ missingFields := []string{}
+ if _, ok := attributes["type"]; !ok {
+ missingFields = append(missingFields, "type")
+ }
+ if _, ok := attributes["source"]; !ok {
+ missingFields = append(missingFields, "source")
+ }
+ if _, ok := attributes["specversion"]; !ok {
+ missingFields = append(missingFields, "specversion")
+ }
+ if _, ok := attributes["id"]; !ok {
+ missingFields = append(missingFields, "id")
+ }
+
+ if len(missingFields) > 0 {
+ return apis.ErrMissingField(missingFields...)
+ }
+
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/register.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/register.go
new file mode 100644
index 000000000..3b9e98fbf
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2023 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta3
+
+import (
+ "knative.dev/eventing/pkg/apis/eventing"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: eventing.GroupName, Version: "v1beta3"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &EventType{},
+ &EventTypeList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/zz_generated.deepcopy.go
new file mode 100644
index 000000000..e25635dc5
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/eventing/v1beta3/zz_generated.deepcopy.go
@@ -0,0 +1,147 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta3
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ v1 "knative.dev/pkg/apis/duck/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventAttributeDefinition) DeepCopyInto(out *EventAttributeDefinition) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventAttributeDefinition.
+func (in *EventAttributeDefinition) DeepCopy() *EventAttributeDefinition {
+ if in == nil {
+ return nil
+ }
+ out := new(EventAttributeDefinition)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventType) DeepCopyInto(out *EventType) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventType.
+func (in *EventType) DeepCopy() *EventType {
+ if in == nil {
+ return nil
+ }
+ out := new(EventType)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventType) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeList) DeepCopyInto(out *EventTypeList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]EventType, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeList.
+func (in *EventTypeList) DeepCopy() *EventTypeList {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *EventTypeList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeSpec) DeepCopyInto(out *EventTypeSpec) {
+ *out = *in
+ if in.Reference != nil {
+ in, out := &in.Reference, &out.Reference
+ *out = new(v1.KReference)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Attributes != nil {
+ in, out := &in.Attributes, &out.Attributes
+ *out = make([]EventAttributeDefinition, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeSpec.
+func (in *EventTypeSpec) DeepCopy() *EventTypeSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *EventTypeStatus) DeepCopyInto(out *EventTypeStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new EventTypeStatus.
+func (in *EventTypeStatus) DeepCopy() *EventTypeStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(EventTypeStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/feature/api_validation.go b/vendor/knative.dev/eventing/pkg/apis/feature/api_validation.go
index 2a5f9226a..a8144e245 100644
--- a/vendor/knative.dev/eventing/pkg/apis/feature/api_validation.go
+++ b/vendor/knative.dev/eventing/pkg/apis/feature/api_validation.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/feature/features.go b/vendor/knative.dev/eventing/pkg/apis/feature/features.go
index 982ca8c67..b2ca380fc 100644
--- a/vendor/knative.dev/eventing/pkg/apis/feature/features.go
+++ b/vendor/knative.dev/eventing/pkg/apis/feature/features.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,6 +18,7 @@ package feature
import (
"fmt"
+ "log"
"strings"
corev1 "k8s.io/api/core/v1"
@@ -75,7 +76,6 @@ func newDefaults() Flags {
DeliveryRetryAfter: Disabled,
DeliveryTimeout: Enabled,
KReferenceMapping: Disabled,
- NewTriggerFilters: Enabled,
TransportEncryption: Disabled,
OIDCAuthentication: Disabled,
EvenTypeAutoCreate: Disabled,
@@ -186,7 +186,8 @@ func NewFlagsConfigFromMap(data map[string]string) (Flags, error) {
} else if strings.Contains(k, NodeSelectorLabel) {
flags[sanitizedKey] = Flag(v)
} else {
- return flags, fmt.Errorf("cannot parse the feature flag '%s' = '%s'", k, v)
+ flags[k] = Flag(v)
+ log.Printf("Warning: unknown feature flag value %q=%q\n", k, v)
}
}
diff --git a/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go b/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go
index 99abc2076..6a7579fb7 100644
--- a/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go
+++ b/vendor/knative.dev/eventing/pkg/apis/feature/flag_names.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -21,7 +21,6 @@ const (
DeliveryRetryAfter = "delivery-retryafter"
DeliveryTimeout = "delivery-timeout"
KReferenceMapping = "kreference-mapping"
- NewTriggerFilters = "new-trigger-filters"
TransportEncryption = "transport-encryption"
EvenTypeAutoCreate = "eventtype-auto-create"
OIDCAuthentication = "authentication-oidc"
diff --git a/vendor/knative.dev/eventing/pkg/apis/feature/store.go b/vendor/knative.dev/eventing/pkg/apis/feature/store.go
index 8285f7862..06297c96d 100644
--- a/vendor/knative.dev/eventing/pkg/apis/feature/store.go
+++ b/vendor/knative.dev/eventing/pkg/apis/feature/store.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/register.go b/vendor/knative.dev/eventing/pkg/apis/flows/register.go
new file mode 100644
index 000000000..81b0c8f65
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/register.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package flows
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+const (
+ GroupName = "flows.knative.dev"
+)
+
+var (
+ // SequenceResource represents a Knative Sequence
+ SequenceResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "sequences",
+ }
+ // ParallelResource represents a Knative Parallel
+ ParallelResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "parallels",
+ }
+)
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/doc.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/doc.go
new file mode 100644
index 000000000..90007ffd3
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 is the v1 version of the API.
+// +k8s:deepcopy-gen=package
+// +groupName=flows.knative.dev
+package v1
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_conversion.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_conversion.go
new file mode 100644
index 000000000..b63af3e65
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *Parallel) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *Parallel) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_defaults.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_defaults.go
new file mode 100644
index 000000000..f739ed317
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_defaults.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/eventing/pkg/apis/messaging/config"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/pkg/apis"
+)
+
+func (p *Parallel) SetDefaults(ctx context.Context) {
+ if p == nil {
+ return
+ }
+
+ withNS := apis.WithinParent(ctx, p.ObjectMeta)
+ if p.Spec.ChannelTemplate == nil {
+ cfg := config.FromContextOrDefaults(ctx)
+ c, err := cfg.ChannelDefaults.GetChannelConfig(apis.ParentMeta(ctx).Namespace)
+
+ if err == nil {
+ p.Spec.ChannelTemplate = &messagingv1.ChannelTemplateSpec{
+ TypeMeta: c.TypeMeta,
+ Spec: c.Spec,
+ }
+ }
+ }
+ p.Spec.SetDefaults(withNS)
+}
+
+func (ps *ParallelSpec) SetDefaults(ctx context.Context) {
+ for _, branch := range ps.Branches {
+ if branch.Filter != nil {
+ branch.Filter.SetDefaults(ctx)
+ }
+ branch.Subscriber.SetDefaults(ctx)
+ if branch.Reply != nil {
+ branch.Reply.SetDefaults(ctx)
+ }
+ }
+ if ps.Reply != nil {
+ ps.Reply.SetDefaults(ctx)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_lifecycle.go
new file mode 100644
index 000000000..e8cbb3da1
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_lifecycle.go
@@ -0,0 +1,243 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ duckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/pkg/apis"
+ pkgduckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+var pCondSet = apis.NewLivingConditionSet(ParallelConditionReady, ParallelConditionChannelsReady, ParallelConditionSubscriptionsReady, ParallelConditionAddressable, ParallelConditionEventPoliciesReady)
+
+const (
+ // ParallelConditionReady has status True when all subconditions below have been set to True.
+ ParallelConditionReady = apis.ConditionReady
+
+ // ParallelConditionChannelsReady has status True when all the channels created as part of
+ // this parallel are ready.
+ ParallelConditionChannelsReady apis.ConditionType = "ChannelsReady"
+
+ // ParallelConditionSubscriptionsReady has status True when all the subscriptions created as part of
+ // this parallel are ready.
+ ParallelConditionSubscriptionsReady apis.ConditionType = "SubscriptionsReady"
+
+ // ParallelConditionAddressable has status true when this Parallel meets
+ // the Addressable contract and has a non-empty hostname.
+ ParallelConditionAddressable apis.ConditionType = "Addressable"
+
+ // ParallelConditionEventPoliciesReady has status True when applying EventPolicies for this
+ // Parallel are ready or if there are no EventPolicies.
+ ParallelConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*Parallel) GetConditionSet() apis.ConditionSet {
+ return pCondSet
+}
+
+// GetGroupVersionKind returns GroupVersionKind for Parallel
+func (*Parallel) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("Parallel")
+}
+
+// GetUntypedSpec returns the spec of the Parallel.
+func (p *Parallel) GetUntypedSpec() interface{} {
+ return p.Spec
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (ps *ParallelStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return pCondSet.Manage(ps).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (ps *ParallelStatus) IsReady() bool {
+ return pCondSet.Manage(ps).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (ps *ParallelStatus) InitializeConditions() {
+ pCondSet.Manage(ps).InitializeConditions()
+}
+
+// PropagateSubscriptionStatuses sets the ParallelConditionSubscriptionsReady based on
+// the status of the incoming subscriptions.
+func (ps *ParallelStatus) PropagateSubscriptionStatuses(filterSubscriptions []*messagingv1.Subscription, subscriptions []*messagingv1.Subscription) {
+ if ps.BranchStatuses == nil || len(subscriptions) != len(ps.BranchStatuses) {
+ ps.BranchStatuses = make([]ParallelBranchStatus, len(subscriptions))
+ }
+ ps.Auth = nil
+ allReady := true
+ // If there are no subscriptions, treat that as a False branch. Could go either way, but this seems right.
+ if len(subscriptions) == 0 {
+ allReady = false
+ }
+
+ for i, s := range subscriptions {
+ ps.BranchStatuses[i].SubscriptionStatus = ParallelSubscriptionStatus{
+ Subscription: corev1.ObjectReference{
+ APIVersion: s.APIVersion,
+ Kind: s.Kind,
+ Name: s.Name,
+ Namespace: s.Namespace,
+ },
+ }
+
+ readyCondition := s.Status.GetTopLevelCondition()
+ if readyCondition != nil {
+ ps.BranchStatuses[i].SubscriptionStatus.ReadyCondition = *readyCondition
+ if readyCondition.Status != corev1.ConditionTrue {
+ allReady = false
+ }
+ } else {
+ allReady = false
+ }
+
+ fs := filterSubscriptions[i]
+ ps.BranchStatuses[i].FilterSubscriptionStatus = ParallelSubscriptionStatus{
+ Subscription: corev1.ObjectReference{
+ APIVersion: fs.APIVersion,
+ Kind: fs.Kind,
+ Name: fs.Name,
+ Namespace: fs.Namespace,
+ },
+ }
+ readyCondition = fs.Status.GetCondition(messagingv1.SubscriptionConditionReady)
+ if readyCondition != nil {
+ ps.BranchStatuses[i].FilterSubscriptionStatus.ReadyCondition = *readyCondition
+ if readyCondition.Status != corev1.ConditionTrue {
+ allReady = false
+ }
+ } else {
+ allReady = false
+ }
+
+ if fs.Status.Auth != nil && fs.Status.Auth.ServiceAccountName != nil {
+ if ps.Auth == nil {
+ ps.Auth = &pkgduckv1.AuthStatus{}
+ }
+ ps.Auth.ServiceAccountNames = append(ps.Auth.ServiceAccountNames, *fs.Status.Auth.ServiceAccountName)
+ }
+
+ if s.Status.Auth != nil && s.Status.Auth.ServiceAccountName != nil {
+ if ps.Auth == nil {
+ ps.Auth = &pkgduckv1.AuthStatus{}
+ }
+ ps.Auth.ServiceAccountNames = append(ps.Auth.ServiceAccountNames, *s.Status.Auth.ServiceAccountName)
+ }
+ }
+ if allReady {
+ pCondSet.Manage(ps).MarkTrue(ParallelConditionSubscriptionsReady)
+ } else {
+ ps.MarkSubscriptionsNotReady("SubscriptionsNotReady", "Subscriptions are not ready yet, or there are none")
+ }
+}
+
+// PropagateChannelStatuses sets the ChannelStatuses and ParallelConditionChannelsReady based on the
+// status of the incoming channels.
+func (ps *ParallelStatus) PropagateChannelStatuses(ingressChannel *duckv1.Channelable, channels []*duckv1.Channelable) {
+ if ps.BranchStatuses == nil || len(channels) != len(ps.BranchStatuses) {
+ ps.BranchStatuses = make([]ParallelBranchStatus, len(channels))
+ }
+ allReady := true
+
+ ps.IngressChannelStatus.Channel = corev1.ObjectReference{
+ APIVersion: ingressChannel.APIVersion,
+ Kind: ingressChannel.Kind,
+ Name: ingressChannel.Name,
+ Namespace: ingressChannel.Namespace,
+ }
+
+ address := ingressChannel.Status.AddressStatus.Address
+ if address != nil {
+ ps.IngressChannelStatus.ReadyCondition = apis.Condition{Type: apis.ConditionReady, Status: corev1.ConditionTrue}
+ } else {
+ ps.IngressChannelStatus.ReadyCondition = apis.Condition{Type: apis.ConditionReady, Status: corev1.ConditionFalse, Reason: "NotAddressable", Message: "Channel is not addressable"}
+ allReady = false
+ }
+ ps.setAddress(address)
+
+ for i, c := range channels {
+ ps.BranchStatuses[i].FilterChannelStatus = ParallelChannelStatus{
+ Channel: corev1.ObjectReference{
+ APIVersion: c.APIVersion,
+ Kind: c.Kind,
+ Name: c.Name,
+ Namespace: c.Namespace,
+ },
+ }
+ // TODO: Once the addressable has a real status to dig through, use that here instead of
+ // addressable, because it might be addressable but not ready.
+ address := c.Status.AddressStatus.Address
+ if address != nil {
+ ps.BranchStatuses[i].FilterChannelStatus.ReadyCondition = apis.Condition{Type: apis.ConditionReady, Status: corev1.ConditionTrue}
+ } else {
+ ps.BranchStatuses[i].FilterChannelStatus.ReadyCondition = apis.Condition{Type: apis.ConditionReady, Status: corev1.ConditionFalse, Reason: "NotAddressable", Message: "Channel is not addressable"}
+ allReady = false
+ }
+ }
+ if allReady {
+ pCondSet.Manage(ps).MarkTrue(ParallelConditionChannelsReady)
+ } else {
+ ps.MarkChannelsNotReady("ChannelsNotReady", "Channels are not ready yet, or there are none")
+ }
+}
+
+func (ps *ParallelStatus) MarkChannelsNotReady(reason, messageFormat string, messageA ...interface{}) {
+ pCondSet.Manage(ps).MarkFalse(ParallelConditionChannelsReady, reason, messageFormat, messageA...)
+}
+
+func (ps *ParallelStatus) MarkSubscriptionsNotReady(reason, messageFormat string, messageA ...interface{}) {
+ pCondSet.Manage(ps).MarkFalse(ParallelConditionSubscriptionsReady, reason, messageFormat, messageA...)
+}
+
+func (ps *ParallelStatus) MarkAddressableNotReady(reason, messageFormat string, messageA ...interface{}) {
+ pCondSet.Manage(ps).MarkFalse(ParallelConditionAddressable, reason, messageFormat, messageA...)
+}
+
+func (ps *ParallelStatus) setAddress(address *pkgduckv1.Addressable) {
+ ps.Address = address
+ if address == nil {
+ pCondSet.Manage(ps).MarkFalse(ParallelConditionAddressable, "emptyAddress", "addressable is nil")
+ } else {
+ pCondSet.Manage(ps).MarkTrue(ParallelConditionAddressable)
+ }
+}
+
+// MarkEventPoliciesFailed marks the ParallelConditionEventPoliciesReady as False with the given reason and message.
+func (ps *ParallelStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ pCondSet.Manage(ps).MarkFalse(ParallelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+// MarkEventPoliciesUnknown marks the ParallelConditionEventPoliciesReady as Unknown with the given reason and message.
+func (ps *ParallelStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ pCondSet.Manage(ps).MarkUnknown(ParallelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+// MarkEventPoliciesTrue marks the ParallelConditionEventPoliciesReady as True.
+func (ps *ParallelStatus) MarkEventPoliciesTrue() {
+ pCondSet.Manage(ps).MarkTrue(ParallelConditionEventPoliciesReady)
+}
+
+// MarkEventPoliciesTrueWithReason marks the ParallelConditionEventPoliciesReady as True with the given reason and message.
+func (ps *ParallelStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ pCondSet.Manage(ps).MarkTrueWithReason(ParallelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_types.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_types.go
new file mode 100644
index 000000000..cccf1cea3
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_types.go
@@ -0,0 +1,172 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// Parallel defines conditional branches that will be wired in
+// series through Channels and Subscriptions.
+type Parallel struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the Parallel.
+ Spec ParallelSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the Parallel. This data may be out of
+ // date.
+ // +optional
+ Status ParallelStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that Parallel can be validated and defaulted.
+ _ apis.Validatable = (*Parallel)(nil)
+ _ apis.Defaultable = (*Parallel)(nil)
+
+ // Check that Parallel can return its spec untyped.
+ _ apis.HasSpec = (*Parallel)(nil)
+
+ // TODO: make appropriate fields immutable.
+ //_ apis.Immutable = (*Parallel)(nil)
+
+ _ runtime.Object = (*Parallel)(nil)
+
+ // Check that we can create OwnerReferences to a Parallel.
+ _ kmeta.OwnerRefable = (*Parallel)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*Parallel)(nil)
+)
+
+type ParallelSpec struct {
+ // Branches is the list of Filter/Subscribers pairs.
+ Branches []ParallelBranch `json:"branches"`
+
+ // ChannelTemplate specifies which Channel CRD to use. If left unspecified, it is set to the default Channel CRD
+ // for the namespace (or cluster, in case there are no defaults for the namespace).
+ // +optional
+ ChannelTemplate *messagingv1.ChannelTemplateSpec `json:"channelTemplate"`
+
+ // Reply is a Reference to where the result of a case Subscriber gets sent to
+ // when the case does not have a Reply
+ // +optional
+ Reply *duckv1.Destination `json:"reply,omitempty"`
+}
+
+type ParallelBranch struct {
+ // Filter is the expression guarding the branch
+ // +optional
+ Filter *duckv1.Destination `json:"filter,omitempty"`
+
+ // Subscriber receiving the event when the filter passes
+ Subscriber duckv1.Destination `json:"subscriber"`
+
+ // Reply is a Reference to where the result of Subscriber of this case gets sent to.
+ // If not specified, sent the result to the Parallel Reply
+ // +optional
+ Reply *duckv1.Destination `json:"reply,omitempty"`
+
+ // Delivery is the delivery specification for events to the subscriber
+ // This includes things like retries, DLS, etc.
+ // +optional
+ Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"`
+}
+
+// ParallelStatus represents the current state of a Parallel.
+type ParallelStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+
+ // IngressChannelStatus corresponds to the ingress channel status.
+ IngressChannelStatus ParallelChannelStatus `json:"ingressChannelStatus"`
+
+ // BranchStatuses is an array of corresponding to branch statuses.
+ // Matches the Spec.Branches array in the order.
+ BranchStatuses []ParallelBranchStatus `json:"branchStatuses"`
+
+ // AddressStatus is the starting point to this Parallel. Sending to this
+ // will target the first subscriber.
+ // It generally has the form {channel}.{namespace}.svc.{cluster domain name}
+ duckv1.AddressStatus `json:",inline"`
+
+ // Auth provides the relevant information for OIDC authentication.
+ // +optional
+ Auth *duckv1.AuthStatus `json:"auth,omitempty"`
+
+ // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ // +optional
+ eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
+}
+
+// ParallelBranchStatus represents the current state of a Parallel branch
+type ParallelBranchStatus struct {
+ // FilterSubscriptionStatus corresponds to the filter subscription status.
+ FilterSubscriptionStatus ParallelSubscriptionStatus `json:"filterSubscriptionStatus"`
+
+ // FilterChannelStatus corresponds to the filter channel status.
+ FilterChannelStatus ParallelChannelStatus `json:"filterChannelStatus"`
+
+ // SubscriptionStatus corresponds to the subscriber subscription status.
+ SubscriptionStatus ParallelSubscriptionStatus `json:"subscriberSubscriptionStatus"`
+}
+
+type ParallelChannelStatus struct {
+ // Channel is the reference to the underlying channel.
+ Channel corev1.ObjectReference `json:"channel"`
+
+ // ReadyCondition indicates whether the Channel is ready or not.
+ ReadyCondition apis.Condition `json:"ready"`
+}
+
+type ParallelSubscriptionStatus struct {
+ // Subscription is the reference to the underlying Subscription.
+ Subscription corev1.ObjectReference `json:"subscription"`
+
+ // ReadyCondition indicates whether the Subscription is ready or not.
+ ReadyCondition apis.Condition `json:"ready"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ParallelList is a collection of Parallels.
+type ParallelList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Parallel `json:"items"`
+}
+
+// GetStatus retrieves the status of the Parallel. Implements the KRShaped interface.
+func (p *Parallel) GetStatus() *duckv1.Status {
+ return &p.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_validation.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_validation.go
new file mode 100644
index 000000000..78a7703c4
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/parallel_validation.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+func (p *Parallel) Validate(ctx context.Context) *apis.FieldError {
+ return p.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (ps *ParallelSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+
+ if len(ps.Branches) == 0 {
+ errs = errs.Also(apis.ErrMissingField("branches"))
+ }
+
+ for i, s := range ps.Branches {
+ if err := s.Filter.Validate(ctx); err != nil {
+ errs = errs.Also(apis.ErrInvalidArrayValue(s, "branches.filter", i))
+ }
+
+ if e := s.Subscriber.Validate(ctx); e != nil {
+ errs = errs.Also(apis.ErrInvalidArrayValue(s, "branches.subscriber", i))
+ }
+
+ if e := s.Reply.Validate(ctx); e != nil {
+ errs = errs.Also(apis.ErrInvalidArrayValue(s, "branches.reply", i))
+ }
+ }
+
+ if ps.ChannelTemplate == nil {
+ errs = errs.Also(apis.ErrMissingField("channelTemplate"))
+ return errs
+ }
+
+ if len(ps.ChannelTemplate.APIVersion) == 0 {
+ errs = errs.Also(apis.ErrMissingField("channelTemplate.apiVersion"))
+ }
+
+ if len(ps.ChannelTemplate.Kind) == 0 {
+ errs = errs.Also(apis.ErrMissingField("channelTemplate.kind"))
+ }
+
+ if err := ps.Reply.Validate(ctx); err != nil {
+ errs = errs.Also(err.ViaField("reply"))
+ }
+
+ return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/register.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/register.go
new file mode 100644
index 000000000..e0bd176de
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/register.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "knative.dev/eventing/pkg/apis/flows"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: flows.GroupName, Version: "v1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &Sequence{},
+ &SequenceList{},
+ &Parallel{},
+ &ParallelList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_conversion.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_conversion.go
new file mode 100644
index 000000000..70392cbb4
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *Sequence) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *Sequence) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_defaults.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_defaults.go
new file mode 100644
index 000000000..d78c63a13
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_defaults.go
@@ -0,0 +1,62 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/eventing/pkg/apis/messaging/config"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/pkg/apis"
+)
+
+func (s *Sequence) SetDefaults(ctx context.Context) {
+ if s == nil {
+ return
+ }
+
+ withNS := apis.WithinParent(ctx, s.ObjectMeta)
+ if s.Spec.ChannelTemplate == nil {
+ cfg := config.FromContextOrDefaults(ctx)
+ c, err := cfg.ChannelDefaults.GetChannelConfig(apis.ParentMeta(ctx).Namespace)
+
+ if err == nil {
+ s.Spec.ChannelTemplate = &messagingv1.ChannelTemplateSpec{
+ TypeMeta: c.TypeMeta,
+ Spec: c.Spec,
+ }
+ }
+ }
+ s.Spec.SetDefaults(withNS)
+}
+
+func (ss *SequenceSpec) SetDefaults(ctx context.Context) {
+ // Default the namespace for all the steps.
+ for _, s := range ss.Steps {
+ s.SetDefaults(ctx)
+ }
+ // Default the reply
+ if ss.Reply != nil {
+ ss.Reply.SetDefaults(ctx)
+ }
+}
+
+func (ss *SequenceStep) SetDefaults(ctx context.Context) {
+ ss.Destination.SetDefaults(ctx)
+
+ // No delivery defaults.
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_lifecycle.go
new file mode 100644
index 000000000..7e6817fc6
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_lifecycle.go
@@ -0,0 +1,230 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+var sCondSet = apis.NewLivingConditionSet(
+ SequenceConditionReady,
+ SequenceConditionChannelsReady,
+ SequenceConditionSubscriptionsReady,
+ SequenceConditionAddressable,
+ SequenceConditionEventPoliciesReady,
+)
+
+const (
+ // SequenceConditionReady has status True when all subconditions below have been set to True.
+ SequenceConditionReady = apis.ConditionReady
+
+ // SequenceConditionChannelsReady has status True when all the channels created as part of
+ // this sequence are ready.
+ SequenceConditionChannelsReady apis.ConditionType = "ChannelsReady"
+
+ // SequenceConditionSubscriptionsReady has status True when all the subscriptions created as part of
+ // this sequence are ready.
+ SequenceConditionSubscriptionsReady apis.ConditionType = "SubscriptionsReady"
+
+ // SequenceConditionAddressable has status true when this Sequence meets
+ // the Addressable contract and has a non-empty hostname.
+ SequenceConditionAddressable apis.ConditionType = "Addressable"
+
+ // SequenceConditionEventPoliciesReady has status True when all the applying EventPolicies for this
+ // Sequence are ready.
+ SequenceConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*Sequence) GetConditionSet() apis.ConditionSet {
+ return sCondSet
+}
+
+// GetGroupVersionKind returns GroupVersionKind for InMemoryChannels
+func (*Sequence) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("Sequence")
+}
+
+// GetUntypedSpec returns the spec of the Sequence.
+func (s *Sequence) GetUntypedSpec() interface{} {
+ return s.Spec
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (ss *SequenceStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return sCondSet.Manage(ss).GetCondition(t)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (ss *SequenceStatus) IsReady() bool {
+ return sCondSet.Manage(ss).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (ss *SequenceStatus) InitializeConditions() {
+ sCondSet.Manage(ss).InitializeConditions()
+}
+
+// PropagateSubscriptionStatuses sets the SubscriptionStatuses and SequenceConditionSubscriptionsReady based on
+// the status of the incoming subscriptions.
+func (ss *SequenceStatus) PropagateSubscriptionStatuses(subscriptions []*messagingv1.Subscription) {
+ ss.SubscriptionStatuses = make([]SequenceSubscriptionStatus, len(subscriptions))
+ ss.Auth = nil
+ allReady := true
+ // If there are no subscriptions, treat that as a False case. Could go either way, but this seems right.
+ if len(subscriptions) == 0 {
+ allReady = false
+ }
+
+ for i, s := range subscriptions {
+ ss.SubscriptionStatuses[i] = SequenceSubscriptionStatus{
+ Subscription: corev1.ObjectReference{
+ APIVersion: s.APIVersion,
+ Kind: s.Kind,
+ Name: s.Name,
+ Namespace: s.Namespace,
+ },
+ }
+
+ if readyCondition := s.Status.GetCondition(messagingv1.SubscriptionConditionReady); readyCondition != nil {
+ ss.SubscriptionStatuses[i].ReadyCondition = *readyCondition
+ if !readyCondition.IsTrue() {
+ allReady = false
+ }
+ } else {
+ ss.SubscriptionStatuses[i].ReadyCondition = apis.Condition{
+ Type: apis.ConditionReady,
+ Status: corev1.ConditionUnknown,
+ Reason: "NoReady",
+ Message: "Subscription does not have Ready condition",
+ LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Now())},
+ }
+ allReady = false
+ }
+
+ if s.Status.Auth != nil && s.Status.Auth.ServiceAccountName != nil {
+ if ss.Auth == nil {
+ ss.Auth = &duckv1.AuthStatus{}
+ }
+
+ ss.Auth.ServiceAccountNames = append(ss.Auth.ServiceAccountNames, *s.Status.Auth.ServiceAccountName)
+ }
+ }
+ if allReady {
+ sCondSet.Manage(ss).MarkTrue(SequenceConditionSubscriptionsReady)
+ } else {
+ ss.MarkSubscriptionsNotReady("SubscriptionsNotReady", "Subscriptions are not ready yet, or there are none")
+ }
+}
+
+// PropagateChannelStatuses sets the ChannelStatuses and SequenceConditionChannelsReady based on the
+// status of the incoming channels.
+func (ss *SequenceStatus) PropagateChannelStatuses(channels []*eventingduckv1.Channelable) {
+ ss.ChannelStatuses = make([]SequenceChannelStatus, len(channels))
+ allReady := true
+ // If there are no channels, treat that as a False case. Could go either way, but this seems right.
+ if len(channels) == 0 {
+ allReady = false
+
+ }
+ for i, c := range channels {
+ // Mark the Sequence address as the Address of the first channel.
+ if i == 0 {
+ ss.setAddress(c.Status.Address)
+ }
+
+ ss.ChannelStatuses[i] = SequenceChannelStatus{
+ Channel: corev1.ObjectReference{
+ APIVersion: c.APIVersion,
+ Kind: c.Kind,
+ Name: c.Name,
+ Namespace: c.Namespace,
+ },
+ }
+
+ if ready := c.Status.GetCondition(apis.ConditionReady); ready != nil {
+ ss.ChannelStatuses[i].ReadyCondition = *ready
+ if !ready.IsTrue() {
+ allReady = false
+ }
+ } else {
+ ss.ChannelStatuses[i].ReadyCondition = apis.Condition{
+ Type: apis.ConditionReady,
+ Status: corev1.ConditionUnknown,
+ Reason: "NoReady",
+ Message: "Channel does not have Ready condition",
+ LastTransitionTime: apis.VolatileTime{Inner: metav1.NewTime(time.Now())},
+ }
+ allReady = false
+ }
+ }
+ if allReady {
+ sCondSet.Manage(ss).MarkTrue(SequenceConditionChannelsReady)
+ } else {
+ ss.MarkChannelsNotReady("ChannelsNotReady", "Channels are not ready yet, or there are none")
+ }
+}
+
+func (ss *SequenceStatus) MarkChannelsNotReady(reason, messageFormat string, messageA ...interface{}) {
+ sCondSet.Manage(ss).MarkUnknown(SequenceConditionChannelsReady, reason, messageFormat, messageA...)
+}
+
+func (ss *SequenceStatus) MarkSubscriptionsNotReady(reason, messageFormat string, messageA ...interface{}) {
+ sCondSet.Manage(ss).MarkUnknown(SequenceConditionSubscriptionsReady, reason, messageFormat, messageA...)
+}
+
+func (ss *SequenceStatus) MarkAddressableNotReady(reason, messageFormat string, messageA ...interface{}) {
+ sCondSet.Manage(ss).MarkUnknown(SequenceConditionAddressable, reason, messageFormat, messageA...)
+}
+
+func (ss *SequenceStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ sCondSet.Manage(ss).MarkFalse(SequenceConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (ss *SequenceStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ sCondSet.Manage(ss).MarkUnknown(SequenceConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (ss *SequenceStatus) MarkEventPoliciesTrue() {
+ sCondSet.Manage(ss).MarkTrue(SequenceConditionEventPoliciesReady)
+}
+
+func (ss *SequenceStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ sCondSet.Manage(ss).MarkTrueWithReason(SequenceConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (ss *SequenceStatus) setAddress(address *duckv1.Addressable) {
+ if address == nil || address.URL == nil {
+ ss.Address = duckv1.Addressable{}
+ sCondSet.Manage(ss).MarkUnknown(SequenceConditionAddressable, "emptyAddress", "addressable is nil")
+ } else {
+ ss.Address = duckv1.Addressable{
+ URL: address.URL,
+ CACerts: address.CACerts,
+ Audience: address.Audience,
+ }
+ sCondSet.Manage(ss).MarkTrue(SequenceConditionAddressable)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_types.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_types.go
new file mode 100644
index 000000000..6aea66331
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_types.go
@@ -0,0 +1,156 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// Sequence defines a sequence of Subscribers that will be wired in
+// series through Channels and Subscriptions.
+type Sequence struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the Sequence.
+ Spec SequenceSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the Sequence. This data may be out of
+ // date.
+ // +optional
+ Status SequenceStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that Sequence can be validated and defaulted.
+ _ apis.Validatable = (*Sequence)(nil)
+ _ apis.Defaultable = (*Sequence)(nil)
+
+ // Check that Sequence can return its spec untyped.
+ _ apis.HasSpec = (*Sequence)(nil)
+
+ // TODO: make appropriate fields immutable.
+ //_ apis.Immutable = (*Sequence)(nil)
+
+ _ runtime.Object = (*Sequence)(nil)
+
+ // Check that we can create OwnerReferences to a Sequence.
+ _ kmeta.OwnerRefable = (*Sequence)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*Sequence)(nil)
+)
+
+type SequenceSpec struct {
+ // Steps is the list of Destinations (processors / functions) that will be called in the order
+ // provided. Each step has its own delivery options
+ Steps []SequenceStep `json:"steps"`
+
+ // ChannelTemplate specifies which Channel CRD to use. If left unspecified, it is set to the default Channel CRD
+ // for the namespace (or cluster, in case there are no defaults for the namespace).
+ // +optional
+ ChannelTemplate *messagingv1.ChannelTemplateSpec `json:"channelTemplate,omitempty"`
+
+ // Reply is a Reference to where the result of the last Subscriber gets sent to.
+ // +optional
+ Reply *duckv1.Destination `json:"reply,omitempty"`
+}
+
+type SequenceStep struct {
+ // Subscriber receiving the step event
+ duckv1.Destination `json:",inline"`
+
+ // Delivery is the delivery specification for events to the subscriber
+ // This includes things like retries, DLS, etc.
+ // +optional
+ Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"`
+}
+
+type SequenceChannelStatus struct {
+ // Channel is the reference to the underlying channel.
+ Channel corev1.ObjectReference `json:"channel"`
+
+ // ReadyCondition indicates whether the Channel is ready or not.
+ ReadyCondition apis.Condition `json:"ready"`
+}
+
+type SequenceSubscriptionStatus struct {
+ // Subscription is the reference to the underlying Subscription.
+ Subscription corev1.ObjectReference `json:"subscription"`
+
+ // ReadyCondition indicates whether the Subscription is ready or not.
+ ReadyCondition apis.Condition `json:"ready"`
+}
+
+// SequenceStatus represents the current state of a Sequence.
+type SequenceStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+
+ // SubscriptionStatuses is an array of corresponding Subscription statuses.
+ // Matches the Spec.Steps array in the order.
+ // +optional
+ SubscriptionStatuses []SequenceSubscriptionStatus `json:"subscriptionStatuses,omitempty"`
+
+ // ChannelStatuses is an array of corresponding Channel statuses.
+ // Matches the Spec.Steps array in the order.
+ // +optional
+ ChannelStatuses []SequenceChannelStatus `json:"channelStatuses,omitempty"`
+
+ // Address is the starting point to this Sequence. Sending to this
+ // will target the first subscriber.
+ // It generally has the form {channel}.{namespace}.svc.{cluster domain name}
+ // +optional
+ Address duckv1.Addressable `json:"address,omitempty"`
+
+ // Auth provides the relevant information for OIDC authentication.
+ // +optional
+ Auth *duckv1.AuthStatus `json:"auth,omitempty"`
+
+ // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this Broker
+ // +optional
+ eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SequenceList is a collection of Sequences.
+type SequenceList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Sequence `json:"items"`
+}
+
+// GetStatus retrieves the status of the Sequence. Implements the KRShaped interface.
+func (p *Sequence) GetStatus() *duckv1.Status {
+ return &p.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_validation.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_validation.go
new file mode 100644
index 000000000..a059c52cc
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/sequence_validation.go
@@ -0,0 +1,68 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/pkg/apis"
+)
+
+func (p *Sequence) Validate(ctx context.Context) *apis.FieldError {
+ return p.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (ps *SequenceSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+
+ if len(ps.Steps) == 0 {
+ errs = errs.Also(apis.ErrMissingField("steps"))
+ }
+
+ for i, s := range ps.Steps {
+ if e := s.Validate(ctx); e != nil {
+ errs = errs.Also(apis.ErrInvalidArrayValue(s, "steps", i))
+ }
+ }
+
+ if ps.ChannelTemplate == nil {
+ errs = errs.Also(apis.ErrMissingField("channelTemplate"))
+ } else {
+ if ce := messagingv1.IsValidChannelTemplate(ps.ChannelTemplate); ce != nil {
+ errs = errs.Also(ce.ViaField("channelTemplate"))
+ }
+ }
+
+ if err := ps.Reply.Validate(ctx); err != nil {
+ errs = errs.Also(err.ViaField("reply"))
+ }
+
+ return errs
+}
+
+func (ss *SequenceStep) Validate(ctx context.Context) *apis.FieldError {
+ errs := ss.Destination.Validate(ctx)
+
+ if ss.Delivery != nil {
+ if de := ss.Delivery.Validate(ctx); de != nil {
+ errs = errs.Also(de.ViaField("delivery"))
+ }
+ }
+
+ return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/test_helpers.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/test_helpers.go
new file mode 100644
index 000000000..8c9e432cf
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/test_helpers.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "github.com/google/go-cmp/cmp/cmpopts"
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/pkg/apis"
+)
+
+var (
+ defaultChannelTemplate = &messagingv1.ChannelTemplateSpec{
+ TypeMeta: v1.TypeMeta{
+ APIVersion: SchemeGroupVersion.String(),
+ Kind: "InMemoryChannel",
+ },
+ }
+ ignoreAllButTypeAndStatus = cmpopts.IgnoreFields(
+ apis.Condition{},
+ "LastTransitionTime", "Message", "Reason", "Severity")
+)
diff --git a/vendor/knative.dev/eventing/pkg/apis/flows/v1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/flows/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..03008035d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/flows/v1/zz_generated.deepcopy.go
@@ -0,0 +1,432 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apisduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Parallel) DeepCopyInto(out *Parallel) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Parallel.
+func (in *Parallel) DeepCopy() *Parallel {
+ if in == nil {
+ return nil
+ }
+ out := new(Parallel)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Parallel) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParallelBranch) DeepCopyInto(out *ParallelBranch) {
+ *out = *in
+ if in.Filter != nil {
+ in, out := &in.Filter, &out.Filter
+ *out = new(duckv1.Destination)
+ (*in).DeepCopyInto(*out)
+ }
+ in.Subscriber.DeepCopyInto(&out.Subscriber)
+ if in.Reply != nil {
+ in, out := &in.Reply, &out.Reply
+ *out = new(duckv1.Destination)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Delivery != nil {
+ in, out := &in.Delivery, &out.Delivery
+ *out = new(apisduckv1.DeliverySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelBranch.
+func (in *ParallelBranch) DeepCopy() *ParallelBranch {
+ if in == nil {
+ return nil
+ }
+ out := new(ParallelBranch)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParallelBranchStatus) DeepCopyInto(out *ParallelBranchStatus) {
+ *out = *in
+ in.FilterSubscriptionStatus.DeepCopyInto(&out.FilterSubscriptionStatus)
+ in.FilterChannelStatus.DeepCopyInto(&out.FilterChannelStatus)
+ in.SubscriptionStatus.DeepCopyInto(&out.SubscriptionStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelBranchStatus.
+func (in *ParallelBranchStatus) DeepCopy() *ParallelBranchStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ParallelBranchStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParallelChannelStatus) DeepCopyInto(out *ParallelChannelStatus) {
+ *out = *in
+ out.Channel = in.Channel
+ in.ReadyCondition.DeepCopyInto(&out.ReadyCondition)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelChannelStatus.
+func (in *ParallelChannelStatus) DeepCopy() *ParallelChannelStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ParallelChannelStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParallelList) DeepCopyInto(out *ParallelList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Parallel, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelList.
+func (in *ParallelList) DeepCopy() *ParallelList {
+ if in == nil {
+ return nil
+ }
+ out := new(ParallelList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ParallelList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParallelSpec) DeepCopyInto(out *ParallelSpec) {
+ *out = *in
+ if in.Branches != nil {
+ in, out := &in.Branches, &out.Branches
+ *out = make([]ParallelBranch, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ChannelTemplate != nil {
+ in, out := &in.ChannelTemplate, &out.ChannelTemplate
+ *out = new(messagingv1.ChannelTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Reply != nil {
+ in, out := &in.Reply, &out.Reply
+ *out = new(duckv1.Destination)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelSpec.
+func (in *ParallelSpec) DeepCopy() *ParallelSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ParallelSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParallelStatus) DeepCopyInto(out *ParallelStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ in.IngressChannelStatus.DeepCopyInto(&out.IngressChannelStatus)
+ if in.BranchStatuses != nil {
+ in, out := &in.BranchStatuses, &out.BranchStatuses
+ *out = make([]ParallelBranchStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.AddressStatus.DeepCopyInto(&out.AddressStatus)
+ if in.Auth != nil {
+ in, out := &in.Auth, &out.Auth
+ *out = new(duckv1.AuthStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelStatus.
+func (in *ParallelStatus) DeepCopy() *ParallelStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ParallelStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ParallelSubscriptionStatus) DeepCopyInto(out *ParallelSubscriptionStatus) {
+ *out = *in
+ out.Subscription = in.Subscription
+ in.ReadyCondition.DeepCopyInto(&out.ReadyCondition)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ParallelSubscriptionStatus.
+func (in *ParallelSubscriptionStatus) DeepCopy() *ParallelSubscriptionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ParallelSubscriptionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Sequence) DeepCopyInto(out *Sequence) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Sequence.
+func (in *Sequence) DeepCopy() *Sequence {
+ if in == nil {
+ return nil
+ }
+ out := new(Sequence)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Sequence) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SequenceChannelStatus) DeepCopyInto(out *SequenceChannelStatus) {
+ *out = *in
+ out.Channel = in.Channel
+ in.ReadyCondition.DeepCopyInto(&out.ReadyCondition)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SequenceChannelStatus.
+func (in *SequenceChannelStatus) DeepCopy() *SequenceChannelStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SequenceChannelStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SequenceList) DeepCopyInto(out *SequenceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Sequence, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SequenceList.
+func (in *SequenceList) DeepCopy() *SequenceList {
+ if in == nil {
+ return nil
+ }
+ out := new(SequenceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SequenceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SequenceSpec) DeepCopyInto(out *SequenceSpec) {
+ *out = *in
+ if in.Steps != nil {
+ in, out := &in.Steps, &out.Steps
+ *out = make([]SequenceStep, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ChannelTemplate != nil {
+ in, out := &in.ChannelTemplate, &out.ChannelTemplate
+ *out = new(messagingv1.ChannelTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Reply != nil {
+ in, out := &in.Reply, &out.Reply
+ *out = new(duckv1.Destination)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SequenceSpec.
+func (in *SequenceSpec) DeepCopy() *SequenceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SequenceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SequenceStatus) DeepCopyInto(out *SequenceStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ if in.SubscriptionStatuses != nil {
+ in, out := &in.SubscriptionStatuses, &out.SubscriptionStatuses
+ *out = make([]SequenceSubscriptionStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ChannelStatuses != nil {
+ in, out := &in.ChannelStatuses, &out.ChannelStatuses
+ *out = make([]SequenceChannelStatus, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ in.Address.DeepCopyInto(&out.Address)
+ if in.Auth != nil {
+ in, out := &in.Auth, &out.Auth
+ *out = new(duckv1.AuthStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SequenceStatus.
+func (in *SequenceStatus) DeepCopy() *SequenceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SequenceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SequenceStep) DeepCopyInto(out *SequenceStep) {
+ *out = *in
+ in.Destination.DeepCopyInto(&out.Destination)
+ if in.Delivery != nil {
+ in, out := &in.Delivery, &out.Delivery
+ *out = new(apisduckv1.DeliverySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SequenceStep.
+func (in *SequenceStep) DeepCopy() *SequenceStep {
+ if in == nil {
+ return nil
+ }
+ out := new(SequenceStep)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SequenceSubscriptionStatus) DeepCopyInto(out *SequenceSubscriptionStatus) {
+ *out = *in
+ out.Subscription = in.Subscription
+ in.ReadyCondition.DeepCopyInto(&out.ReadyCondition)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SequenceSubscriptionStatus.
+func (in *SequenceSubscriptionStatus) DeepCopy() *SequenceSubscriptionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SequenceSubscriptionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/config/channel_defaults.go b/vendor/knative.dev/eventing/pkg/apis/messaging/config/channel_defaults.go
new file mode 100644
index 000000000..2a99ad207
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/config/channel_defaults.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "sigs.k8s.io/yaml"
+)
+
+const (
+ // ChannelDefaultsConfigName is the name of config map for the default
+ // configs that channels should use.
+ ChannelDefaultsConfigName = "default-ch-webhook"
+
+ // ChannelDefaulterKey is the key in the ConfigMap to get the name of the default
+ // Channel CRD.
+ ChannelDefaulterKey = "default-ch-config"
+)
+
+// NewChannelDefaultsConfigFromMap creates a Defaults from the supplied Map
+func NewChannelDefaultsConfigFromMap(data map[string]string) (*ChannelDefaults, error) {
+ nc := &ChannelDefaults{}
+
+ // Parse out the Broker Configuration Cluster default section
+ value, present := data[ChannelDefaulterKey]
+ if !present || value == "" {
+ return nil, fmt.Errorf("ConfigMap is missing (or empty) key: %q : %v", ChannelDefaulterKey, data)
+ }
+ if err := parseEntry(value, nc); err != nil {
+ return nil, fmt.Errorf("Failed to parse the entry: %s", err)
+ }
+ return nc, nil
+}
+
+func parseEntry(entry string, out interface{}) error {
+ j, err := yaml.YAMLToJSON([]byte(entry))
+ if err != nil {
+ return fmt.Errorf("ConfigMap's value could not be converted to JSON: %s : %v", err, entry)
+ }
+ return json.Unmarshal(j, &out)
+}
+
+// NewChannelDefaultsConfigFromConfigMap creates a ChannelDefaults from the supplied configMap
+func NewChannelDefaultsConfigFromConfigMap(config *corev1.ConfigMap) (*ChannelDefaults, error) {
+ return NewChannelDefaultsConfigFromMap(config.Data)
+}
+
+// ChannelDefaults includes the default values to be populated by the webhook.
+type ChannelDefaults struct {
+ // NamespaceDefaults are the default Channels CRDs for each namespace. namespace is the
+ // key, the value is the default ChannelTemplate to use.
+ NamespaceDefaults map[string]*ChannelTemplateSpec `json:"namespaceDefaults,omitempty"`
+ // ClusterDefaultChannel is the default Channel CRD for all namespaces that are not in
+ // NamespaceDefaultChannels.
+ ClusterDefault *ChannelTemplateSpec `json:"clusterDefault,omitempty"`
+}
+
+// GetChannelConfig returns a namespace specific Channel Configuration, and if
+// that doesn't exist, return a Cluster Default and if that doesn't exist
+// return an error.
+func (d *ChannelDefaults) GetChannelConfig(ns string) (*ChannelTemplateSpec, error) {
+ if d == nil {
+ return nil, errors.New("Defaults are nil")
+ }
+ value, present := d.NamespaceDefaults[ns]
+ if present {
+ return value, nil
+ }
+ if d.ClusterDefault != nil {
+ return d.ClusterDefault, nil
+ }
+ return nil, errors.New("Defaults for Channel Configurations have not been set up.")
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/config/channel_template_types.go b/vendor/knative.dev/eventing/pkg/apis/messaging/config/channel_template_types.go
new file mode 100644
index 000000000..bb9e7ccfa
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/config/channel_template_types.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// This is a verbatim copy from messaging/v1/channel_template_types.go
+// but we can not import v1beta1 since we use this there as well, so
+// replicate it here.
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type ChannelTemplateSpec struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Spec defines the Spec to use for each channel created. Passed
+ // in verbatim to the Channel CRD as Spec section.
+ // +optional
+ Spec *runtime.RawExtension `json:"spec,omitempty"`
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/config/doc.go b/vendor/knative.dev/eventing/pkg/apis/messaging/config/doc.go
new file mode 100644
index 000000000..61305f830
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/config/doc.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// +k8s:deepcopy-gen=package
+
+// Package config holds the typed objects that define the schemas for
+// ConfigMap objects that pertain to our API objects.
+package config
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/config/store.go b/vendor/knative.dev/eventing/pkg/apis/messaging/config/store.go
new file mode 100644
index 000000000..88a2b4527
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/config/store.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "context"
+
+ "knative.dev/pkg/configmap"
+)
+
+type channelCfgKey struct{}
+
+// Config holds the collection of configurations that we attach to contexts.
+// +k8s:deepcopy-gen=false
+type Config struct {
+ ChannelDefaults *ChannelDefaults
+}
+
+// FromContext extracts a Config from the provided context.
+func FromContext(ctx context.Context) *Config {
+ x, ok := ctx.Value(channelCfgKey{}).(*Config)
+ if ok {
+ return x
+ }
+ return nil
+}
+
+// FromContextOrDefaults is like FromContext, but when no Config is attached it
+// returns a Config populated with the defaults for each of the Config fields.
+func FromContextOrDefaults(ctx context.Context) *Config {
+ if cfg := FromContext(ctx); cfg != nil {
+ return cfg
+ }
+ channelDefaults, _ := NewChannelDefaultsConfigFromMap(map[string]string{})
+ return &Config{
+ ChannelDefaults: channelDefaults,
+ }
+}
+
+// ToContext attaches the provided Config to the provided context, returning the
+// new context with the Config attached.
+func ToContext(ctx context.Context, c *Config) context.Context {
+ return context.WithValue(ctx, channelCfgKey{}, c)
+}
+
+// Store is a typed wrapper around configmap.Untyped store to handle our configmaps.
+// +k8s:deepcopy-gen=false
+type Store struct {
+ *configmap.UntypedStore
+}
+
+// NewStore creates a new store of Configs and optionally calls functions when ConfigMaps are updated.
+func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store {
+ store := &Store{
+ UntypedStore: configmap.NewUntypedStore(
+ "channeldefaults",
+ logger,
+ configmap.Constructors{
+ ChannelDefaultsConfigName: NewChannelDefaultsConfigFromConfigMap,
+ },
+ onAfterStore...,
+ ),
+ }
+
+ return store
+}
+
+// ToContext attaches the current Config state to the provided context.
+func (s *Store) ToContext(ctx context.Context) context.Context {
+ return ToContext(ctx, s.Load())
+}
+
+// Load creates a Config from the current config state of the Store.
+func (s *Store) Load() *Config {
+ return &Config{
+ ChannelDefaults: s.UntypedLoad(ChannelDefaultsConfigName).(*ChannelDefaults).DeepCopy(),
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/config/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/messaging/config/zz_generated.deepcopy.go
new file mode 100644
index 000000000..0d2c38a6d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/config/zz_generated.deepcopy.go
@@ -0,0 +1,92 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package config
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChannelDefaults) DeepCopyInto(out *ChannelDefaults) {
+ *out = *in
+ if in.NamespaceDefaults != nil {
+ in, out := &in.NamespaceDefaults, &out.NamespaceDefaults
+ *out = make(map[string]*ChannelTemplateSpec, len(*in))
+ for key, val := range *in {
+ var outVal *ChannelTemplateSpec
+ if val == nil {
+ (*out)[key] = nil
+ } else {
+ in, out := &val, &outVal
+ *out = new(ChannelTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ (*out)[key] = outVal
+ }
+ }
+ if in.ClusterDefault != nil {
+ in, out := &in.ClusterDefault, &out.ClusterDefault
+ *out = new(ChannelTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelDefaults.
+func (in *ChannelDefaults) DeepCopy() *ChannelDefaults {
+ if in == nil {
+ return nil
+ }
+ out := new(ChannelDefaults)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChannelTemplateSpec) DeepCopyInto(out *ChannelTemplateSpec) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Spec != nil {
+ in, out := &in.Spec, &out.Spec
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelTemplateSpec.
+func (in *ChannelTemplateSpec) DeepCopy() *ChannelTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ChannelTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ChannelTemplateSpec) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/register.go b/vendor/knative.dev/eventing/pkg/apis/messaging/register.go
new file mode 100644
index 000000000..96e6f8b44
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/register.go
@@ -0,0 +1,44 @@
+/*
+Copyright 2019 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package messaging
+
+import "k8s.io/apimachinery/pkg/runtime/schema"
+
+const (
+ GroupName = "messaging.knative.dev"
+ // SubscribableDuckVersionAnnotation is the annotation we use to declare
+ // which Subscribable duck version type we conform to.
+ SubscribableDuckVersionAnnotation = "messaging.knative.dev/subscribable"
+)
+
+var (
+ // SubscriptionsResource represents a Knative Subscription
+ SubscriptionsResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "subscriptions",
+ }
+ // ChannelsResource represents a Knative Channel
+ ChannelsResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "channels",
+ }
+ // InMemoryChannelsResource represents a Knative Channel
+ InMemoryChannelsResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "inmemorychannels",
+ }
+)
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_conversion.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_conversion.go
new file mode 100644
index 000000000..d5ce8d46c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *Channel) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *Channel) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_defaults.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_defaults.go
new file mode 100644
index 000000000..f9d55953b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_defaults.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+
+ "knative.dev/eventing/pkg/apis/messaging"
+ "knative.dev/eventing/pkg/apis/messaging/config"
+)
+
+func (c *Channel) SetDefaults(ctx context.Context) {
+ if c.Annotations == nil {
+ c.Annotations = make(map[string]string)
+ }
+ if _, ok := c.Annotations[messaging.SubscribableDuckVersionAnnotation]; !ok {
+ c.Annotations[messaging.SubscribableDuckVersionAnnotation] = "v1"
+ }
+
+ c.Spec.SetDefaults(apis.WithinParent(ctx, c.ObjectMeta))
+}
+
+func (cs *ChannelSpec) SetDefaults(ctx context.Context) {
+ if cs.ChannelTemplate != nil {
+ return
+ }
+
+ cfg := config.FromContextOrDefaults(ctx)
+ c, err := cfg.ChannelDefaults.GetChannelConfig(apis.ParentMeta(ctx).Namespace)
+ if err == nil {
+ cs.ChannelTemplate = &ChannelTemplateSpec{
+ c.TypeMeta,
+ c.Spec,
+ }
+ }
+ cs.Delivery.SetDefaults(ctx)
+}
+
+// ChannelDefaulter sets the default Channel CRD and Arguments on Channels that do not
+// specify any implementation.
+type ChannelDefaulter interface {
+ // GetDefault determines the default Channel CRD for the given namespace.
+ GetDefault(namespace string) *ChannelTemplateSpec
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_lifecycle.go
new file mode 100644
index 000000000..c8cf84a19
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_lifecycle.go
@@ -0,0 +1,169 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+
+ eventingduck "knative.dev/eventing/pkg/apis/duck/v1"
+)
+
+var chCondSet = apis.NewLivingConditionSet(
+ ChannelConditionBackingChannelReady,
+ ChannelConditionAddressable,
+ ChannelConditionDeadLetterSinkResolved,
+ ChannelConditionEventPoliciesReady,
+)
+
+const (
+ // ChannelConditionReady has status True when all subconditions below have been set to True.
+ ChannelConditionReady = apis.ConditionReady
+
+ // ChannelConditionBackingChannelReady has status True when the backing Channel CRD is ready.
+ ChannelConditionBackingChannelReady apis.ConditionType = "BackingChannelReady"
+
+ // ChannelConditionAddressable has status true when this Channel meets
+ // the Addressable contract and has a non-empty hostname.
+ ChannelConditionAddressable apis.ConditionType = "Addressable"
+
+ // ChannelConditionDeadLetterSinkResolved has status True when there is a Dead Letter Sink ref or URI
+ // defined in the Spec.Delivery, is a valid destination and its correctly resolved into a valid URI
+ ChannelConditionDeadLetterSinkResolved apis.ConditionType = "DeadLetterSinkResolved"
+
+ // ChannelConditionEventPoliciesReady has status True when all the EventPolicies which reference this
+ // Channel are Ready too.
+ ChannelConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*Channel) GetConditionSet() apis.ConditionSet {
+ return chCondSet
+}
+
+// GetGroupVersionKind returns GroupVersionKind for Channels.
+func (*Channel) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("Channel")
+}
+
+// GetUntypedSpec returns the spec of the Channel.
+func (c *Channel) GetUntypedSpec() interface{} {
+ return c.Spec
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (cs *ChannelStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return chCondSet.Manage(cs).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (cs *ChannelStatus) GetTopLevelCondition() *apis.Condition {
+ return chCondSet.Manage(cs).GetTopLevelCondition()
+}
+
+// IsReady returns true if the resource is ready overall.
+func (cs *ChannelStatus) IsReady() bool {
+ return chCondSet.Manage(cs).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (cs *ChannelStatus) InitializeConditions() {
+ chCondSet.Manage(cs).InitializeConditions()
+}
+
+func (cs *ChannelStatus) SetAddress(address *duckv1.Addressable) {
+ cs.Address = address
+ if address == nil || address.URL.IsEmpty() {
+ chCondSet.Manage(cs).MarkFalse(ChannelConditionAddressable, "EmptyHostname", "hostname is the empty string")
+ } else {
+ chCondSet.Manage(cs).MarkTrue(ChannelConditionAddressable)
+
+ }
+}
+
+func (cs *ChannelStatus) MarkBackingChannelFailed(reason, messageFormat string, messageA ...interface{}) {
+ chCondSet.Manage(cs).MarkFalse(ChannelConditionBackingChannelReady, reason, messageFormat, messageA...)
+}
+
+func (cs *ChannelStatus) MarkBackingChannelUnknown(reason, messageFormat string, messageA ...interface{}) {
+ chCondSet.Manage(cs).MarkUnknown(ChannelConditionBackingChannelReady, reason, messageFormat, messageA...)
+}
+
+func (cs *ChannelStatus) MarkBackingChannelNotConfigured() {
+ chCondSet.Manage(cs).MarkUnknown(ChannelConditionBackingChannelReady,
+ "BackingChannelNotConfigured", "BackingChannel has not yet been reconciled.")
+}
+
+func (cs *ChannelStatus) MarkBackingChannelReady() {
+ chCondSet.Manage(cs).MarkTrue(ChannelConditionBackingChannelReady)
+}
+
+func (cs *ChannelStatus) PropagateStatuses(chs *eventingduck.ChannelableStatus) {
+ // TODO: Once you can get a Ready status from Channelable in a generic way, use it here.
+ readyCondition := chs.Status.GetCondition(apis.ConditionReady)
+ if readyCondition == nil {
+ cs.MarkBackingChannelNotConfigured()
+ } else {
+ switch {
+ case readyCondition.Status == corev1.ConditionUnknown:
+ cs.MarkBackingChannelUnknown(readyCondition.Reason, readyCondition.Message)
+ case readyCondition.Status == corev1.ConditionTrue:
+ cs.MarkBackingChannelReady()
+ case readyCondition.Status == corev1.ConditionFalse:
+ cs.MarkBackingChannelFailed(readyCondition.Reason, readyCondition.Message)
+ default:
+ cs.MarkBackingChannelUnknown("BackingChannelUnknown", "The status of BackingChannel is invalid: %v", readyCondition.Status)
+ }
+ }
+ // Set the address and update the Addressable conditions.
+ cs.SetAddress(chs.AddressStatus.Address)
+ // Set the subscribable status.
+ cs.SubscribableStatus = chs.SubscribableStatus
+}
+
+func (cs *ChannelStatus) MarkDeadLetterSinkResolvedSucceeded(deadLetterSink eventingduck.DeliveryStatus) {
+ cs.DeliveryStatus = deadLetterSink
+ chCondSet.Manage(cs).MarkTrue(ChannelConditionDeadLetterSinkResolved)
+}
+
+func (cs *ChannelStatus) MarkDeadLetterSinkNotConfigured() {
+ cs.DeliveryStatus = eventingduck.DeliveryStatus{}
+ chCondSet.Manage(cs).MarkTrueWithReason(ChannelConditionDeadLetterSinkResolved, "DeadLetterSinkNotConfigured", "No dead letter sink is configured.")
+}
+
+func (cs *ChannelStatus) MarkDeadLetterSinkResolvedFailed(reason, messageFormat string, messageA ...interface{}) {
+ cs.DeliveryStatus = eventingduck.DeliveryStatus{}
+ chCondSet.Manage(cs).MarkFalse(ChannelConditionDeadLetterSinkResolved, reason, messageFormat, messageA...)
+}
+
+func (cs *ChannelStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ chCondSet.Manage(cs).MarkFalse(ChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (cs *ChannelStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ chCondSet.Manage(cs).MarkUnknown(ChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (cs *ChannelStatus) MarkEventPoliciesTrue() {
+ chCondSet.Manage(cs).MarkTrue(ChannelConditionEventPoliciesReady)
+}
+
+func (cs *ChannelStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ chCondSet.Manage(cs).MarkTrueWithReason(ChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_template_types.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_template_types.go
new file mode 100644
index 000000000..c558a33b2
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_template_types.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+)
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+type ChannelTemplateSpec struct {
+ metav1.TypeMeta `json:",inline"`
+
+ // Spec defines the Spec to use for each channel created. Passed
+ // in verbatim to the Channel CRD as Spec section.
+ // +optional
+ Spec *runtime.RawExtension `json:"spec,omitempty"`
+}
+
+// ChannelTemplateSpecOption is an optional function for ChannelTemplateSpec.
+type ChannelTemplateSpecOption func(*ChannelTemplateSpec) error
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_types.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_types.go
new file mode 100644
index 000000000..1196c76bc
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_types.go
@@ -0,0 +1,103 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// Channel represents a generic Channel. It is normally used when we want a
+// Channel, but do not need a specific Channel implementation.
+type Channel struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the Channel.
+ Spec ChannelSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the Channel. This data may be out
+ // of date.
+ // +optional
+ Status ChannelStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that Channel can be validated and defaulted.
+ _ apis.Validatable = (*Channel)(nil)
+ _ apis.Defaultable = (*Channel)(nil)
+
+ // Check that Channel can return its spec untyped.
+ _ apis.HasSpec = (*Channel)(nil)
+
+ _ runtime.Object = (*Channel)(nil)
+
+ // Check that we can create OwnerReferences to a Channel.
+ _ kmeta.OwnerRefable = (*Channel)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*Channel)(nil)
+)
+
+// ChannelSpec defines which subscribers have expressed interest in receiving
+// events from this Channel. It also defines the ChannelTemplate to use in
+// order to create the CRD Channel backing this Channel.
+type ChannelSpec struct {
+ // ChannelTemplate specifies which Channel CRD to use to create the CRD
+ // Channel backing this Channel. This is immutable after creation.
+ // Normally this is set by the Channel defaulter, not directly by the user.
+ // +optional
+ ChannelTemplate *ChannelTemplateSpec `json:"channelTemplate,omitempty"`
+
+ // Channel conforms to ChannelableSpec
+ eventingduckv1.ChannelableSpec `json:",inline"`
+}
+
+// ChannelStatus represents the current state of a Channel.
+type ChannelStatus struct {
+ // Channel conforms to ChannelableStatus
+ eventingduckv1.ChannelableStatus `json:",inline"`
+
+ // Channel is an KReference to the Channel CRD backing this Channel.
+ // +optional
+ Channel *duckv1.KReference `json:"channel,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ChannelList is a collection of Channels.
+type ChannelList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []Channel `json:"items"`
+}
+
+// GetStatus retrieves the status of the Channel. Implements the KRShaped
+// interface.
+func (t *Channel) GetStatus() *duckv1.Status {
+ return &t.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_validation.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_validation.go
new file mode 100644
index 000000000..9443a4f48
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/channel_validation.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/kmp"
+)
+
+func (c *Channel) Validate(ctx context.Context) *apis.FieldError {
+ withNS := apis.WithinParent(ctx, c.ObjectMeta)
+ errs := c.Spec.Validate(withNS).ViaField("spec")
+ if apis.IsInUpdate(ctx) {
+ original := apis.GetBaseline(ctx).(*Channel)
+ errs = errs.Also(c.CheckImmutableFields(ctx, original))
+ }
+ return errs
+}
+
+func (cs *ChannelSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+
+ if cs.ChannelTemplate == nil {
+ // The Channel defaulter is expected to set this, not the users.
+ errs = errs.Also(apis.ErrMissingField("channelTemplate"))
+ } else {
+ if cte := IsValidChannelTemplate(cs.ChannelTemplate); cte != nil {
+ errs = errs.Also(cte.ViaField("channelTemplate"))
+ }
+ }
+
+ if len(cs.SubscribableSpec.Subscribers) > 0 {
+ errs = errs.Also(apis.ErrDisallowedFields("subscribers").ViaField("subscribable"))
+ }
+
+ if cs.Delivery != nil {
+ if fe := cs.Delivery.Validate(ctx); fe != nil {
+ errs = errs.Also(fe.ViaField("delivery"))
+ }
+ }
+
+ return errs
+}
+
+func IsValidChannelTemplate(ct *ChannelTemplateSpec) *apis.FieldError {
+ var errs *apis.FieldError
+ if ct.Kind == "" {
+ errs = errs.Also(apis.ErrMissingField("kind"))
+ }
+ if ct.APIVersion == "" {
+ errs = errs.Also(apis.ErrMissingField("apiVersion"))
+ }
+ return errs
+}
+
+func (c *Channel) CheckImmutableFields(ctx context.Context, original *Channel) *apis.FieldError {
+ if original == nil {
+ return nil
+ }
+
+ ignoreArguments := cmpopts.IgnoreFields(ChannelSpec{}, "SubscribableSpec")
+ if diff, err := kmp.ShortDiff(original.Spec, c.Spec, ignoreArguments); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff Channel",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: diff,
+ }
+ }
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/doc.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/doc.go
new file mode 100644
index 000000000..4a9dda155
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/doc.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Api versions allow the api contract for a resource to be changed while keeping
+// backward compatibility by support multiple concurrent versions
+// of the same resource
+
+// Package v1 is the v1 version of the API.
+// +k8s:deepcopy-gen=package
+// +groupName=messaging.knative.dev
+package v1
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_conversion.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_conversion.go
new file mode 100644
index 000000000..bc9a25158
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *InMemoryChannel) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *InMemoryChannel) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_defaults.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_defaults.go
new file mode 100644
index 000000000..ea3888bd9
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_defaults.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+
+ "knative.dev/eventing/pkg/apis/messaging"
+)
+
+func (imc *InMemoryChannel) SetDefaults(ctx context.Context) {
+ // Set the duck subscription to the stored version of the duck
+ // we support. Reason for this is that the stored version will
+ // not get a chance to get modified, but for newer versions
+ // conversion webhook will be able to take a crack at it and
+ // can modify it to match the duck shape.
+ if imc.Annotations == nil {
+ imc.Annotations = make(map[string]string)
+ }
+ if _, ok := imc.Annotations[messaging.SubscribableDuckVersionAnnotation]; !ok {
+ imc.Annotations[messaging.SubscribableDuckVersionAnnotation] = "v1"
+ }
+
+ ctx = apis.WithinParent(ctx, imc.ObjectMeta)
+ imc.Spec.SetDefaults(ctx)
+}
+
+func (imcs *InMemoryChannelSpec) SetDefaults(ctx context.Context) {
+ imcs.Delivery.SetDefaults(ctx)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_lifecycle.go
new file mode 100644
index 000000000..66c47ef23
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_lifecycle.go
@@ -0,0 +1,205 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/utils/pointer"
+ "knative.dev/pkg/apis"
+ v1 "knative.dev/pkg/apis/duck/v1"
+
+ eventingduck "knative.dev/eventing/pkg/apis/duck/v1"
+)
+
+var imcCondSet = apis.NewLivingConditionSet(
+ InMemoryChannelConditionServiceReady,
+ InMemoryChannelConditionEndpointsReady,
+ InMemoryChannelConditionAddressable,
+ InMemoryChannelConditionChannelServiceReady,
+ InMemoryChannelConditionDeadLetterSinkResolved,
+ InMemoryChannelConditionEventPoliciesReady,
+)
+
+const (
+ // InMemoryChannelConditionReady has status True when all subconditions below have been set to True.
+ InMemoryChannelConditionReady = apis.ConditionReady
+
+ // InMemoryChannelConditionDispatcherReady has status True when a Dispatcher deployment is ready
+ // Keyed off appsv1.DeploymentAvailable, which means minimum available replicas required are up
+ // and running for at least minReadySeconds.
+ InMemoryChannelConditionDispatcherReady apis.ConditionType = "DispatcherReady"
+
+ // InMemoryChannelConditionServiceReady has status True when a k8s Service is ready. This
+ // basically just means it exists because there's no meaningful status in Service. See Endpoints
+ // below.
+ InMemoryChannelConditionServiceReady apis.ConditionType = "ServiceReady"
+
+ // InMemoryChannelConditionEndpointsReady has status True when a k8s Service Endpoints are backed
+ // by at least one endpoint.
+ InMemoryChannelConditionEndpointsReady apis.ConditionType = "EndpointsReady"
+
+ // InMemoryChannelConditionAddressable has status true when this InMemoryChannel meets
+ // the Addressable contract and has a non-empty hostname.
+ InMemoryChannelConditionAddressable apis.ConditionType = "Addressable"
+
+ // InMemoryChannelConditionServiceReady has status True when a k8s Service representing the channel is ready.
+ // Because this uses ExternalName, there are no endpoints to check.
+ InMemoryChannelConditionChannelServiceReady apis.ConditionType = "ChannelServiceReady"
+
+ // InMemoryChannelConditionDeadLetterSinkResolved has status True when there is a Dead Letter Sink ref or URI
+ // defined in the Spec.Delivery, is a valid destination and its correctly resolved into a valid URI
+ InMemoryChannelConditionDeadLetterSinkResolved apis.ConditionType = "DeadLetterSinkResolved"
+
+ // InMemoryChannelConditionEventPoliciesReady has status True when all the applying EventPolicies for this
+ // InMemoryChannel are ready.
+ InMemoryChannelConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*InMemoryChannel) GetConditionSet() apis.ConditionSet {
+ return imcCondSet
+}
+
+// GetGroupVersionKind returns GroupVersionKind for InMemoryChannels
+func (*InMemoryChannel) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("InMemoryChannel")
+}
+
+// GetUntypedSpec returns the spec of the InMemoryChannel.
+func (i *InMemoryChannel) GetUntypedSpec() interface{} {
+ return i.Spec
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (imcs *InMemoryChannelStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return imcCondSet.Manage(imcs).GetCondition(t)
+}
+
+// IsReady returns true if the Status condition InMemoryChannelConditionReady
+// is true and the latest spec has been observed.
+func (imc *InMemoryChannel) IsReady() bool {
+ imcs := imc.Status
+ return imcs.ObservedGeneration == imc.Generation &&
+ imc.GetConditionSet().Manage(&imcs).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (imcs *InMemoryChannelStatus) InitializeConditions() {
+ imcCondSet.Manage(imcs).InitializeConditions()
+}
+
+func (imcs *InMemoryChannelStatus) SetAddress(addr *v1.Addressable) {
+ imcs.Address = addr
+ if addr != nil && addr.URL != nil {
+ imcs.Address.Name = pointer.String(addr.URL.Scheme)
+ imcCondSet.Manage(imcs).MarkTrue(InMemoryChannelConditionAddressable)
+ } else {
+ imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionAddressable, "emptyHostname", "hostname is the empty string")
+ }
+}
+
+func (imcs *InMemoryChannelStatus) MarkDispatcherFailed(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionDispatcherReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkDispatcherUnknown(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkUnknown(InMemoryChannelConditionDispatcherReady, reason, messageFormat, messageA...)
+}
+
+// TODO: Unify this with the ones from Eventing. Say: Broker, Trigger.
+func (imcs *InMemoryChannelStatus) PropagateDispatcherStatus(ds *appsv1.DeploymentStatus) {
+ for _, cond := range ds.Conditions {
+ if cond.Type == appsv1.DeploymentAvailable {
+ if cond.Status == corev1.ConditionTrue {
+ imcCondSet.Manage(imcs).MarkTrue(InMemoryChannelConditionDispatcherReady)
+ } else if cond.Status == corev1.ConditionFalse {
+ imcs.MarkDispatcherFailed("DispatcherDeploymentFalse", "The status of Dispatcher Deployment is False: %s : %s", cond.Reason, cond.Message)
+ } else if cond.Status == corev1.ConditionUnknown {
+ imcs.MarkDispatcherUnknown("DispatcherDeploymentUnknown", "The status of Dispatcher Deployment is Unknown: %s : %s", cond.Reason, cond.Message)
+ }
+ }
+ }
+}
+
+func (imcs *InMemoryChannelStatus) MarkServiceFailed(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionServiceReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkServiceUnknown(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkUnknown(InMemoryChannelConditionServiceReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkServiceTrue() {
+ imcCondSet.Manage(imcs).MarkTrue(InMemoryChannelConditionServiceReady)
+}
+
+func (imcs *InMemoryChannelStatus) MarkChannelServiceFailed(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionChannelServiceReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkChannelServiceUnknown(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkUnknown(InMemoryChannelConditionChannelServiceReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkChannelServiceTrue() {
+ imcCondSet.Manage(imcs).MarkTrue(InMemoryChannelConditionChannelServiceReady)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEndpointsFailed(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionEndpointsReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEndpointsUnknown(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkUnknown(InMemoryChannelConditionEndpointsReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEndpointsTrue() {
+ imcCondSet.Manage(imcs).MarkTrue(InMemoryChannelConditionEndpointsReady)
+}
+
+func (imcs *InMemoryChannelStatus) MarkDeadLetterSinkResolvedSucceeded(ds eventingduck.DeliveryStatus) {
+ imcs.DeliveryStatus = ds
+ imcCondSet.Manage(imcs).MarkTrue(InMemoryChannelConditionDeadLetterSinkResolved)
+}
+
+func (imcs *InMemoryChannelStatus) MarkDeadLetterSinkNotConfigured() {
+ imcs.DeliveryStatus = eventingduck.DeliveryStatus{}
+ imcCondSet.Manage(imcs).MarkTrueWithReason(InMemoryChannelConditionDeadLetterSinkResolved, "DeadLetterSinkNotConfigured", "No dead letter sink is configured.")
+}
+
+func (imcs *InMemoryChannelStatus) MarkDeadLetterSinkResolvedFailed(reason, messageFormat string, messageA ...interface{}) {
+ imcs.DeliveryStatus = eventingduck.DeliveryStatus{}
+ imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionDeadLetterSinkResolved, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkFalse(InMemoryChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkUnknown(InMemoryChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEventPoliciesTrue() {
+ imcCondSet.Manage(imcs).MarkTrue(InMemoryChannelConditionEventPoliciesReady)
+}
+
+func (imcs *InMemoryChannelStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ imcCondSet.Manage(imcs).MarkTrueWithReason(InMemoryChannelConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go
new file mode 100644
index 000000000..d45d1a971
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_types.go
@@ -0,0 +1,91 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InMemoryChannel is a resource representing an in memory channel
+type InMemoryChannel struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ // Spec defines the desired state of the Channel.
+ Spec InMemoryChannelSpec `json:"spec,omitempty"`
+
+ // Status represents the current state of the Channel. This data may be out of
+ // date.
+ // +optional
+ Status InMemoryChannelStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that InMemoryChannel can be validated and defaulted.
+ _ apis.Validatable = (*InMemoryChannel)(nil)
+ _ apis.Defaultable = (*InMemoryChannel)(nil)
+
+ // Check that InMemoryChannel can return its spec untyped.
+ _ apis.HasSpec = (*InMemoryChannel)(nil)
+
+ _ runtime.Object = (*InMemoryChannel)(nil)
+
+ // Check that we can create OwnerReferences to an InMemoryChannel.
+ _ kmeta.OwnerRefable = (*InMemoryChannel)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*InMemoryChannel)(nil)
+)
+
+// InMemoryChannelSpec defines which subscribers have expressed interest in
+// receiving events from this InMemoryChannel.
+// arguments for a Channel.
+type InMemoryChannelSpec struct {
+ // Channel conforms to Duck type Channelable.
+ eventingduckv1.ChannelableSpec `json:",inline"`
+}
+
+// ChannelStatus represents the current state of a Channel.
+type InMemoryChannelStatus struct {
+ // Channel conforms to Duck type ChannelableStatus.
+ eventingduckv1.ChannelableStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// InMemoryChannelList is a collection of in-memory channels.
+type InMemoryChannelList struct {
+ metav1.TypeMeta `json:",inline"`
+ // +optional
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []InMemoryChannel `json:"items"`
+}
+
+// GetStatus retrieves the status of the InMemoryChannel. Implements the KRShaped interface.
+func (t *InMemoryChannel) GetStatus() *duckv1.Status {
+ return &t.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_validation.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_validation.go
new file mode 100644
index 000000000..5f7e5f42d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/in_memory_channel_validation.go
@@ -0,0 +1,106 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/kmp"
+
+ "knative.dev/eventing/pkg/apis/eventing"
+)
+
+const eventingControllerSAName = "system:serviceaccount:knative-eventing:eventing-controller"
+
+func (imc *InMemoryChannel) Validate(ctx context.Context) *apis.FieldError {
+ errs := imc.Spec.Validate(ctx).ViaField("spec")
+
+ // Validate annotations
+ if imc.Annotations != nil {
+ if scope, ok := imc.Annotations[eventing.ScopeAnnotationKey]; ok {
+ if scope != eventing.ScopeNamespace && scope != eventing.ScopeCluster {
+ iv := apis.ErrInvalidValue(scope, "")
+ iv.Details = "expected either 'cluster' or 'namespace'"
+ errs = errs.Also(iv.ViaFieldKey("annotations", eventing.ScopeAnnotationKey).ViaField("metadata"))
+ }
+ }
+ }
+
+ if apis.IsInUpdate(ctx) {
+ // Validate that if any changes were made to spec.subscribers, they were made by the eventing-controller
+ original := apis.GetBaseline(ctx).(*InMemoryChannel)
+ errs = errs.Also(imc.CheckSubscribersChangeAllowed(ctx, original))
+ }
+
+ return errs
+}
+
+func (imcs *InMemoryChannelSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+ for i, subscriber := range imcs.SubscribableSpec.Subscribers {
+ if subscriber.ReplyURI == nil && subscriber.SubscriberURI == nil {
+ fe := apis.ErrMissingField("replyURI", "subscriberURI")
+ fe.Details = "expected at least one of, got none"
+ errs = errs.Also(fe.ViaField(fmt.Sprintf("subscriber[%d]", i)).ViaField("subscribable"))
+ }
+ }
+
+ return errs
+}
+
+func (imc *InMemoryChannel) CheckSubscribersChangeAllowed(ctx context.Context, original *InMemoryChannel) *apis.FieldError {
+ if original == nil {
+ return nil
+ }
+
+ if !canChangeChannelSpecAuth(ctx) {
+ return imc.checkSubsciberSpecAuthChanged(original, ctx)
+ }
+ return nil
+}
+
+func (imc *InMemoryChannel) checkSubsciberSpecAuthChanged(original *InMemoryChannel, ctx context.Context) *apis.FieldError {
+ if diff, err := kmp.ShortDiff(original.Spec.Subscribers, imc.Spec.Subscribers); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff Channel.Spec.Subscribers",
+ Paths: []string{"spec.subscribers"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ user := apis.GetUserInfo(ctx)
+ userName := ""
+ if user != nil {
+ userName = user.Username
+ }
+ return &apis.FieldError{
+ Message: fmt.Sprintf("Channel.Spec.Subscribers changed by user %s which was not the %s service account", userName, eventingControllerSAName),
+ Paths: []string{"spec.subscribers"},
+ Details: diff,
+ }
+ }
+ return nil
+}
+
+func canChangeChannelSpecAuth(ctx context.Context) bool {
+ user := apis.GetUserInfo(ctx)
+ if user == nil {
+ return false
+ }
+ return user.Username == eventingControllerSAName
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/register.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/register.go
new file mode 100644
index 000000000..2d80c1b04
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/register.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "knative.dev/eventing/pkg/apis/messaging"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: messaging.GroupName, Version: "v1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &InMemoryChannel{},
+ &InMemoryChannelList{},
+ &Subscription{},
+ &SubscriptionList{},
+ &Channel{},
+ &ChannelList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscribable_channelable_validation.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscribable_channelable_validation.go
new file mode 100644
index 000000000..008ee41ee
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscribable_channelable_validation.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/api/equality"
+ "knative.dev/eventing/pkg/apis/feature"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+func isChannelEmpty(f duckv1.KReference) bool {
+ return equality.Semantic.DeepEqual(f, duckv1.KReference{})
+}
+
+// Valid if it is a valid object reference.
+func isValidChannel(ctx context.Context, f duckv1.KReference) *apis.FieldError {
+ errs := f.Validate(ctx)
+
+ if !feature.FromContext(ctx).IsEnabled(feature.CrossNamespaceEventLinks) {
+ // Only name, apiVersion and kind are supported fields when feature.CrossNamespaceEventLinks is disabled
+ if f.Namespace != "" {
+ fe := apis.ErrDisallowedFields("namespace")
+ fe.Details = "only name, apiVersion and kind are supported fields when feature.CrossNamespaceEventLinks is disabled"
+ errs = errs.Also(fe)
+ }
+ }
+
+ return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_conversion.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_conversion.go
new file mode 100644
index 000000000..b5f2ce40e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *Subscription) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *Subscription) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_defaults.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_defaults.go
new file mode 100644
index 000000000..63e3ec026
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_defaults.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+func (s *Subscription) SetDefaults(ctx context.Context) {
+ if s == nil {
+ return
+ }
+ ctx = apis.WithinParent(ctx, s.ObjectMeta)
+ s.Spec.SetDefaults(ctx)
+}
+
+func (ss *SubscriptionSpec) SetDefaults(ctx context.Context) {
+ if ss == nil {
+ return
+ }
+
+ ss.Subscriber.SetDefaults(ctx)
+ ss.Reply.SetDefaults(ctx)
+ ss.Delivery.SetDefaults(ctx)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_lifecycle.go
new file mode 100644
index 000000000..095731d4b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_lifecycle.go
@@ -0,0 +1,133 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "knative.dev/pkg/apis"
+)
+
+// SubCondSet is a condition set with Ready as the happy condition and
+// ReferencesResolved and ChannelReady as the dependent conditions.
+var SubCondSet = apis.NewLivingConditionSet(SubscriptionConditionReferencesResolved, SubscriptionConditionAddedToChannel, SubscriptionConditionChannelReady, SubscriptionConditionOIDCIdentityCreated)
+
+const (
+ // SubscriptionConditionReady has status True when all subconditions below have been set to True.
+ SubscriptionConditionReady = apis.ConditionReady
+ // SubscriptionConditionReferencesResolved has status True when all the specified references have been successfully
+ // resolved.
+ SubscriptionConditionReferencesResolved apis.ConditionType = "ReferencesResolved"
+
+ // SubscriptionConditionAddedToChannel has status True when controller has successfully added a
+ // subscription to the spec.channel resource.
+ SubscriptionConditionAddedToChannel apis.ConditionType = "AddedToChannel"
+
+ // SubscriptionConditionChannelReady has status True when the channel has marked the subscriber as 'ready'
+ SubscriptionConditionChannelReady apis.ConditionType = "ChannelReady"
+
+ SubscriptionConditionOIDCIdentityCreated apis.ConditionType = "OIDCIdentityCreated"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*Subscription) GetConditionSet() apis.ConditionSet {
+ return SubCondSet
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (ss *SubscriptionStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return SubCondSet.Manage(ss).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (ss *SubscriptionStatus) GetTopLevelCondition() *apis.Condition {
+ return SubCondSet.Manage(ss).GetTopLevelCondition()
+}
+
+// IsReady returns true if the resource is ready overall.
+func (ss *SubscriptionStatus) IsReady() bool {
+ return SubCondSet.Manage(ss).IsHappy()
+}
+
+// IsAddedToChannel returns true if SubscriptionConditionAddedToChannel is true
+func (ss *SubscriptionStatus) IsAddedToChannel() bool {
+ return ss.GetCondition(SubscriptionConditionAddedToChannel).IsTrue()
+}
+
+// AreReferencesResolved returns true if SubscriptionConditionReferencesResolved is true
+func (ss *SubscriptionStatus) AreReferencesResolved() bool {
+ return ss.GetCondition(SubscriptionConditionReferencesResolved).IsTrue()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (ss *SubscriptionStatus) InitializeConditions() {
+ SubCondSet.Manage(ss).InitializeConditions()
+}
+
+// MarkReferencesResolved sets the ReferencesResolved condition to True state.
+func (ss *SubscriptionStatus) MarkReferencesResolved() {
+ SubCondSet.Manage(ss).MarkTrue(SubscriptionConditionReferencesResolved)
+}
+
+// MarkChannelReady sets the ChannelReady condition to True state.
+func (ss *SubscriptionStatus) MarkChannelReady() {
+ SubCondSet.Manage(ss).MarkTrue(SubscriptionConditionChannelReady)
+}
+
+// MarkAddedToChannel sets the AddedToChannel condition to True state.
+func (ss *SubscriptionStatus) MarkAddedToChannel() {
+ SubCondSet.Manage(ss).MarkTrue(SubscriptionConditionAddedToChannel)
+}
+
+// MarkReferencesNotResolved sets the ReferencesResolved condition to False state.
+func (ss *SubscriptionStatus) MarkReferencesNotResolved(reason, messageFormat string, messageA ...interface{}) {
+ SubCondSet.Manage(ss).MarkFalse(SubscriptionConditionReferencesResolved, reason, messageFormat, messageA...)
+}
+
+// MarkReferencesResolvedUnknown sets the ReferencesResolved condition to Unknown state.
+func (ss *SubscriptionStatus) MarkReferencesResolvedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ SubCondSet.Manage(ss).MarkUnknown(SubscriptionConditionReferencesResolved, reason, messageFormat, messageA...)
+}
+
+// MarkChannelFailed sets the ChannelReady condition to False state.
+func (ss *SubscriptionStatus) MarkChannelFailed(reason, messageFormat string, messageA ...interface{}) {
+ SubCondSet.Manage(ss).MarkFalse(SubscriptionConditionChannelReady, reason, messageFormat, messageA...)
+}
+
+// MarkChannelUnknown sets the ChannelReady condition to Unknown state.
+func (ss *SubscriptionStatus) MarkChannelUnknown(reason, messageFormat string, messageA ...interface{}) {
+ SubCondSet.Manage(ss).MarkUnknown(SubscriptionConditionChannelReady, reason, messageFormat, messageA...)
+}
+
+// MarkNotAddedToChannel sets the AddedToChannel condition to False state.
+func (ss *SubscriptionStatus) MarkNotAddedToChannel(reason, messageFormat string, messageA ...interface{}) {
+ SubCondSet.Manage(ss).MarkFalse(SubscriptionConditionAddedToChannel, reason, messageFormat, messageA...)
+}
+
+func (ss *SubscriptionStatus) MarkOIDCIdentityCreatedSucceeded() {
+ SubCondSet.Manage(ss).MarkTrue(SubscriptionConditionOIDCIdentityCreated)
+}
+
+func (ss *SubscriptionStatus) MarkOIDCIdentityCreatedSucceededWithReason(reason, messageFormat string, messageA ...interface{}) {
+ SubCondSet.Manage(ss).MarkTrueWithReason(SubscriptionConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (ss *SubscriptionStatus) MarkOIDCIdentityCreatedFailed(reason, messageFormat string, messageA ...interface{}) {
+ SubCondSet.Manage(ss).MarkFalse(SubscriptionConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (ss *SubscriptionStatus) MarkOIDCIdentityCreatedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ SubCondSet.Manage(ss).MarkUnknown(SubscriptionConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_types.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_types.go
new file mode 100644
index 000000000..cb514339a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_types.go
@@ -0,0 +1,189 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// Subscription routes events received on a Channel to a DNS name and
+// corresponds to the subscriptions.channels.knative.dev CRD.
+type Subscription struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata"`
+ Spec SubscriptionSpec `json:"spec"`
+ Status SubscriptionStatus `json:"status,omitempty"`
+}
+
+var (
+ // Check that Subscription can be validated, can be defaulted, and has immutable fields.
+ _ apis.Validatable = (*Subscription)(nil)
+ _ apis.Defaultable = (*Subscription)(nil)
+
+ // Check that Subscription can return its spec untyped.
+ _ apis.HasSpec = (*Subscription)(nil)
+
+ _ runtime.Object = (*Subscription)(nil)
+
+ // Check that we can create OwnerReferences to a Subscription.
+ _ kmeta.OwnerRefable = (*Subscription)(nil)
+
+ // Check that the type conforms to the duck Knative Resource shape.
+ _ duckv1.KRShaped = (*Subscription)(nil)
+)
+
+// SubscriptionSpec specifies the Channel for incoming events, a Subscriber target
+// for processing those events and where to put the result of the processing. Only
+// From (where the events are coming from) is always required. You can optionally
+// only Process the events (results in no output events) by leaving out the Reply.
+// You can also perform an identity transformation on the incoming events by leaving
+// out the Subscriber and only specifying Reply.
+//
+// The following are all valid specifications:
+// channel --[subscriber]--> reply
+// Sink, no outgoing events:
+// channel -- subscriber
+// no-op function (identity transformation):
+// channel --> reply
+type SubscriptionSpec struct {
+ // Reference to a channel that will be used to create the subscription
+ // You can specify only the following fields of the KReference:
+ // - Kind
+ // - APIVersion
+ // - Name
+ // - Namespace
+ // The resource pointed by this KReference must meet the
+ // contract to the ChannelableSpec duck type. If the resource does not
+ // meet this contract it will be reflected in the Subscription's status.
+ //
+ // This field is immutable. We have no good answer on what happens to
+ // the events that are currently in the channel being consumed from
+ // and what the semantics there should be. For now, you can always
+ // delete the Subscription and recreate it to point to a different
+ // channel, giving the user more control over what semantics should
+ // be used (drain the channel first, possibly have events dropped,
+ // etc.)
+ Channel duckv1.KReference `json:"channel"`
+
+ // Subscriber is reference to function for processing events.
+ // Events from the Channel will be delivered here and replies are
+ // sent to a Destination as specified by the Reply.
+ Subscriber *duckv1.Destination `json:"subscriber,omitempty"`
+
+ // Reply specifies (optionally) how to handle events returned from
+ // the Subscriber target.
+ // +optional
+ Reply *duckv1.Destination `json:"reply,omitempty"`
+
+ // Delivery configuration
+ // +optional
+ Delivery *eventingduckv1.DeliverySpec `json:"delivery,omitempty"`
+}
+
+// SubscriptionStatus (computed) for a subscription
+type SubscriptionStatus struct {
+ // inherits duck/v1 Status, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last processed by the controller.
+ // * Conditions - the latest available observations of a resource's current state.
+ duckv1.Status `json:",inline"`
+
+ // PhysicalSubscription is the fully resolved values that this Subscription represents.
+ PhysicalSubscription SubscriptionStatusPhysicalSubscription `json:"physicalSubscription,omitempty"`
+
+ // Auth provides the relevant information for OIDC authentication.
+ // +optional
+ Auth *duckv1.AuthStatus `json:"auth,omitempty"`
+}
+
+// SubscriptionStatusPhysicalSubscription represents the fully resolved values for this
+// Subscription.
+type SubscriptionStatusPhysicalSubscription struct {
+ // SubscriberURI is the fully resolved URI for spec.subscriber.
+ // +optional
+ SubscriberURI *apis.URL `json:"subscriberUri,omitempty"`
+
+ // SubscriberCACerts is the Certification Authority (CA) certificates in PEM
+ // format according to https://www.rfc-editor.org/rfc/rfc7468 for the
+ // resolved URI for spec.subscriber.
+ // +optional
+ SubscriberCACerts *string `json:"subscriberCACerts,omitempty"`
+
+ // SubscriberAudience is the OIDC audience for the the resolved URI for
+ // spec.subscriber.
+ // +optional
+ SubscriberAudience *string `json:"subscriberAudience,omitempty"`
+
+ // ReplyURI is the fully resolved URI for the spec.reply.
+ // +optional
+ ReplyURI *apis.URL `json:"replyUri,omitempty"`
+
+ // ReplyCACerts is the Certification Authority (CA) certificates in PEM
+ // format according to https://www.rfc-editor.org/rfc/rfc7468 for the
+ // resolved URI for the spec.reply.
+ // +optional
+ ReplyCACerts *string `json:"replyCACerts,omitempty"`
+
+ // ReplyAudience is the OIDC audience for the the resolved URI for
+ // spec.reply.
+ // +optional
+ ReplyAudience *string `json:"replyAudience,omitempty"`
+
+ // DeliveryStatus contains a resolved URL to the dead letter sink address, and any other
+ // resolved delivery options.
+ eventingduckv1.DeliveryStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SubscriptionList returned in list operations
+type SubscriptionList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata"`
+ Items []Subscription `json:"items"`
+}
+
+// GetGroupVersionKind returns GroupVersionKind for Subscriptions
+func (*Subscription) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("Subscription")
+}
+
+// GetUntypedSpec returns the spec of the Subscription.
+func (s *Subscription) GetUntypedSpec() interface{} {
+ return s.Spec
+}
+
+// GetStatus retrieves the status of the Subscription. Implements the KRShaped interface.
+func (s *Subscription) GetStatus() *duckv1.Status {
+ return &s.Status.Status
+}
+
+// GetCrossNamespaceRef returns the Channel reference for the Subscription. Implements the ResourceInfo interface.
+func (s *Subscription) GetCrossNamespaceRef() duckv1.KReference {
+ return s.Spec.Channel
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_validation.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_validation.go
new file mode 100644
index 000000000..050ab1a2d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/subscription_validation.go
@@ -0,0 +1,111 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "k8s.io/apimachinery/pkg/api/equality"
+ "knative.dev/eventing/pkg/apis/feature"
+ cn "knative.dev/eventing/pkg/crossnamespace"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmp"
+)
+
+func (s *Subscription) Validate(ctx context.Context) *apis.FieldError {
+ errs := s.Spec.Validate(ctx).ViaField("spec")
+ if apis.IsInUpdate(ctx) {
+ original := apis.GetBaseline(ctx).(*Subscription)
+ errs = errs.Also(s.CheckImmutableFields(ctx, original))
+ }
+ // s.Validate(ctx) because krshaped is defined on the entire subscription, not just the spec
+ if feature.FromContext(ctx).IsEnabled(feature.CrossNamespaceEventLinks) {
+ crossNamespaceError := cn.CheckNamespace(ctx, s)
+ if crossNamespaceError != nil {
+ errs = errs.Also(crossNamespaceError)
+ }
+ }
+ return errs
+}
+
+func (ss *SubscriptionSpec) Validate(ctx context.Context) *apis.FieldError {
+ // We require always Channel.
+ // Also at least one of 'subscriber' and 'reply' must be defined (non-nil and non-empty).
+
+ var errs *apis.FieldError
+ if isChannelEmpty(ss.Channel) {
+ fe := apis.ErrMissingField("channel")
+ fe.Details = "the Subscription must reference a channel"
+ return fe
+ } else if fe := isValidChannel(ctx, ss.Channel); fe != nil {
+ errs = errs.Also(fe.ViaField("channel"))
+ }
+
+ // Check if we follow the spec and have a valid reference to a subscriber
+ if isDestinationNilOrEmpty(ss.Subscriber) {
+ fe := apis.ErrMissingField("subscriber")
+ fe.Details = "the Subscription must reference a subscriber"
+ errs = errs.Also(fe)
+ } else {
+ if fe := ss.Subscriber.Validate(ctx); fe != nil {
+ errs = errs.Also(fe.ViaField("subscriber"))
+ }
+ }
+
+ if !isDestinationNilOrEmpty(ss.Reply) {
+ if fe := ss.Reply.Validate(ctx); fe != nil {
+ errs = errs.Also(fe.ViaField("reply"))
+ }
+ }
+
+ if ss.Delivery != nil {
+ if fe := ss.Delivery.Validate(ctx); fe != nil {
+ errs = errs.Also(fe.ViaField("delivery"))
+ }
+ }
+
+ return errs
+}
+
+func isDestinationNilOrEmpty(d *duckv1.Destination) bool {
+ return d == nil || equality.Semantic.DeepEqual(d, &duckv1.Destination{})
+}
+
+func (s *Subscription) CheckImmutableFields(ctx context.Context, original *Subscription) *apis.FieldError {
+ if original == nil {
+ return nil
+ }
+
+ // Only Subscriber and Reply are mutable.
+ ignoreArguments := cmpopts.IgnoreFields(SubscriptionSpec{}, "Subscriber", "Reply", "Delivery")
+ if diff, err := kmp.ShortDiff(original.Spec, s.Spec, ignoreArguments); err != nil {
+ return &apis.FieldError{
+ Message: "Failed to diff Subscription",
+ Paths: []string{"spec"},
+ Details: err.Error(),
+ }
+ } else if diff != "" {
+ return &apis.FieldError{
+ Message: "Immutable fields changed (-old +new)",
+ Paths: []string{"spec"},
+ Details: diff,
+ }
+ }
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/messaging/v1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..fff2d237b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/messaging/v1/zz_generated.deepcopy.go
@@ -0,0 +1,422 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ apisduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ apis "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Channel) DeepCopyInto(out *Channel) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Channel.
+func (in *Channel) DeepCopy() *Channel {
+ if in == nil {
+ return nil
+ }
+ out := new(Channel)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Channel) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChannelList) DeepCopyInto(out *ChannelList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Channel, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelList.
+func (in *ChannelList) DeepCopy() *ChannelList {
+ if in == nil {
+ return nil
+ }
+ out := new(ChannelList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ChannelList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChannelSpec) DeepCopyInto(out *ChannelSpec) {
+ *out = *in
+ if in.ChannelTemplate != nil {
+ in, out := &in.ChannelTemplate, &out.ChannelTemplate
+ *out = new(ChannelTemplateSpec)
+ (*in).DeepCopyInto(*out)
+ }
+ in.ChannelableSpec.DeepCopyInto(&out.ChannelableSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelSpec.
+func (in *ChannelSpec) DeepCopy() *ChannelSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ChannelSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChannelStatus) DeepCopyInto(out *ChannelStatus) {
+ *out = *in
+ in.ChannelableStatus.DeepCopyInto(&out.ChannelableStatus)
+ if in.Channel != nil {
+ in, out := &in.Channel, &out.Channel
+ *out = new(duckv1.KReference)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelStatus.
+func (in *ChannelStatus) DeepCopy() *ChannelStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ChannelStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ChannelTemplateSpec) DeepCopyInto(out *ChannelTemplateSpec) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ if in.Spec != nil {
+ in, out := &in.Spec, &out.Spec
+ *out = new(runtime.RawExtension)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ChannelTemplateSpec.
+func (in *ChannelTemplateSpec) DeepCopy() *ChannelTemplateSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ChannelTemplateSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ChannelTemplateSpec) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InMemoryChannel) DeepCopyInto(out *InMemoryChannel) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InMemoryChannel.
+func (in *InMemoryChannel) DeepCopy() *InMemoryChannel {
+ if in == nil {
+ return nil
+ }
+ out := new(InMemoryChannel)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InMemoryChannel) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InMemoryChannelList) DeepCopyInto(out *InMemoryChannelList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]InMemoryChannel, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InMemoryChannelList.
+func (in *InMemoryChannelList) DeepCopy() *InMemoryChannelList {
+ if in == nil {
+ return nil
+ }
+ out := new(InMemoryChannelList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *InMemoryChannelList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InMemoryChannelSpec) DeepCopyInto(out *InMemoryChannelSpec) {
+ *out = *in
+ in.ChannelableSpec.DeepCopyInto(&out.ChannelableSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InMemoryChannelSpec.
+func (in *InMemoryChannelSpec) DeepCopy() *InMemoryChannelSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(InMemoryChannelSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *InMemoryChannelStatus) DeepCopyInto(out *InMemoryChannelStatus) {
+ *out = *in
+ in.ChannelableStatus.DeepCopyInto(&out.ChannelableStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InMemoryChannelStatus.
+func (in *InMemoryChannelStatus) DeepCopy() *InMemoryChannelStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(InMemoryChannelStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *Subscription) DeepCopyInto(out *Subscription) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Subscription.
+func (in *Subscription) DeepCopy() *Subscription {
+ if in == nil {
+ return nil
+ }
+ out := new(Subscription)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *Subscription) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionList) DeepCopyInto(out *SubscriptionList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]Subscription, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionList.
+func (in *SubscriptionList) DeepCopy() *SubscriptionList {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SubscriptionList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionSpec) DeepCopyInto(out *SubscriptionSpec) {
+ *out = *in
+ in.Channel.DeepCopyInto(&out.Channel)
+ if in.Subscriber != nil {
+ in, out := &in.Subscriber, &out.Subscriber
+ *out = new(duckv1.Destination)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Reply != nil {
+ in, out := &in.Reply, &out.Reply
+ *out = new(duckv1.Destination)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Delivery != nil {
+ in, out := &in.Delivery, &out.Delivery
+ *out = new(apisduckv1.DeliverySpec)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionSpec.
+func (in *SubscriptionSpec) DeepCopy() *SubscriptionSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionStatus) DeepCopyInto(out *SubscriptionStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ in.PhysicalSubscription.DeepCopyInto(&out.PhysicalSubscription)
+ if in.Auth != nil {
+ in, out := &in.Auth, &out.Auth
+ *out = new(duckv1.AuthStatus)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatus.
+func (in *SubscriptionStatus) DeepCopy() *SubscriptionStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SubscriptionStatusPhysicalSubscription) DeepCopyInto(out *SubscriptionStatusPhysicalSubscription) {
+ *out = *in
+ if in.SubscriberURI != nil {
+ in, out := &in.SubscriberURI, &out.SubscriberURI
+ *out = new(apis.URL)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.SubscriberCACerts != nil {
+ in, out := &in.SubscriberCACerts, &out.SubscriberCACerts
+ *out = new(string)
+ **out = **in
+ }
+ if in.SubscriberAudience != nil {
+ in, out := &in.SubscriberAudience, &out.SubscriberAudience
+ *out = new(string)
+ **out = **in
+ }
+ if in.ReplyURI != nil {
+ in, out := &in.ReplyURI, &out.ReplyURI
+ *out = new(apis.URL)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.ReplyCACerts != nil {
+ in, out := &in.ReplyCACerts, &out.ReplyCACerts
+ *out = new(string)
+ **out = **in
+ }
+ if in.ReplyAudience != nil {
+ in, out := &in.ReplyAudience, &out.ReplyAudience
+ *out = new(string)
+ **out = **in
+ }
+ in.DeliveryStatus.DeepCopyInto(&out.DeliveryStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SubscriptionStatusPhysicalSubscription.
+func (in *SubscriptionStatusPhysicalSubscription) DeepCopy() *SubscriptionStatusPhysicalSubscription {
+ if in == nil {
+ return nil
+ }
+ out := new(SubscriptionStatusPhysicalSubscription)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/constants.go b/vendor/knative.dev/eventing/pkg/apis/sinks/constants.go
new file mode 100644
index 000000000..20aff4446
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/constants.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sinks
+
+const (
+ JobSinkJobsLabelSelector = "sinks.knative.dev/job-sink=true"
+ JobSinkNameLabel = "sinks.knative.dev/job-sink-name"
+ JobSinkIDLabel = "sinks.knative.dev/job-sink-id"
+)
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/register.go b/vendor/knative.dev/eventing/pkg/apis/sinks/register.go
new file mode 100644
index 000000000..676fa75e8
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/register.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sinks
+
+import (
+ "context"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/client-go/kubernetes"
+)
+
+const (
+ GroupName = "sinks.knative.dev"
+)
+
+var (
+ // JobSinkResource respresents a Knative Eventing sink JobSink
+ JobSinkResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "jobsinks",
+ }
+)
+
+type Config struct {
+ KubeClient kubernetes.Interface
+}
+
+type configKey struct{}
+
+func WithConfig(ctx context.Context, cfg *Config) context.Context {
+ return context.WithValue(ctx, configKey{}, cfg)
+}
+
+func GetConfig(ctx context.Context) *Config {
+ v := ctx.Value(configKey{})
+ if v == nil {
+ panic("Missing value for config")
+ }
+ return v.(*Config)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/doc.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/doc.go
new file mode 100644
index 000000000..23def349d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1alpha1 contains API Schema definitions for the sources v1alpha1 API group.
+// +k8s:deepcopy-gen=package
+// +groupName=sinks.knative.dev
+package v1alpha1
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_conversion.go
new file mode 100644
index 000000000..3944fd171
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_conversion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+// Converts source from v1alpha1.JobSink into a higher version.
+func (sink *JobSink) ConvertTo(ctx context.Context, obj apis.Convertible) error {
+ return fmt.Errorf("v1alpha1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+// Converts source from a higher version into v1beta2.JobSink
+func (sink *JobSink) ConvertFrom(ctx context.Context, obj apis.Convertible) error {
+ return fmt.Errorf("v1alpha1 is the highest known version, got: %T", sink)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go
new file mode 100644
index 000000000..13f62e868
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_defaults.go
@@ -0,0 +1,24 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+)
+
+func (sink *JobSink) SetDefaults(ctx context.Context) {
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_lifecycle.go
new file mode 100644
index 000000000..bc29a4156
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_lifecycle.go
@@ -0,0 +1,119 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "fmt"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+
+ "knative.dev/eventing/pkg/apis/sinks"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+const (
+ // JobSinkConditionReady has status True when the JobSink is ready to send events.
+ JobSinkConditionReady = apis.ConditionReady
+
+ JobSinkConditionAddressable apis.ConditionType = "Addressable"
+
+ // JobSinkConditionEventPoliciesReady has status True when all the applying EventPolicies for this
+ // JobSink are ready.
+ JobSinkConditionEventPoliciesReady apis.ConditionType = "EventPoliciesReady"
+)
+
+var JobSinkCondSet = apis.NewLivingConditionSet(
+ JobSinkConditionAddressable,
+ JobSinkConditionEventPoliciesReady,
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*JobSink) GetConditionSet() apis.ConditionSet {
+ return JobSinkCondSet
+}
+
+// GetUntypedSpec returns the spec of the JobSink.
+func (sink *JobSink) GetUntypedSpec() interface{} {
+ return sink.Spec
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (sink *JobSink) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("JobSink")
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *JobSinkStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return JobSinkCondSet.Manage(s).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (ps *JobSinkStatus) GetTopLevelCondition() *apis.Condition {
+ return JobSinkCondSet.Manage(ps).GetTopLevelCondition()
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *JobSinkStatus) IsReady() bool {
+ return JobSinkCondSet.Manage(s).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *JobSinkStatus) InitializeConditions() {
+ JobSinkCondSet.Manage(s).InitializeConditions()
+}
+
+// MarkAddressableReady marks the Addressable condition to True.
+func (s *JobSinkStatus) MarkAddressableReady() {
+ JobSinkCondSet.Manage(s).MarkTrue(JobSinkConditionAddressable)
+}
+
+// MarkEventPoliciesFailed marks the EventPoliciesReady condition to False with the given reason and message.
+func (s *JobSinkStatus) MarkEventPoliciesFailed(reason, messageFormat string, messageA ...interface{}) {
+ JobSinkCondSet.Manage(s).MarkFalse(JobSinkConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+// MarkEventPoliciesUnknown marks the EventPoliciesReady condition to Unknown with the given reason and message.
+func (s *JobSinkStatus) MarkEventPoliciesUnknown(reason, messageFormat string, messageA ...interface{}) {
+ JobSinkCondSet.Manage(s).MarkUnknown(JobSinkConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+// MarkEventPoliciesTrue marks the EventPoliciesReady condition to True.
+func (s *JobSinkStatus) MarkEventPoliciesTrue() {
+ JobSinkCondSet.Manage(s).MarkTrue(JobSinkConditionEventPoliciesReady)
+}
+
+// MarkEventPoliciesTrueWithReason marks the EventPoliciesReady condition to True with the given reason and message.
+func (s *JobSinkStatus) MarkEventPoliciesTrueWithReason(reason, messageFormat string, messageA ...interface{}) {
+ JobSinkCondSet.Manage(s).MarkTrueWithReason(JobSinkConditionEventPoliciesReady, reason, messageFormat, messageA...)
+}
+
+func (e *JobSink) SetJobStatusSelector() {
+ if e.Spec.Job != nil {
+ e.Status.JobStatus.Selector = fmt.Sprintf("%s=%s", sinks.JobSinkNameLabel, e.GetName())
+ }
+}
+
+func (s *JobSinkStatus) SetAddress(address *duckv1.Addressable) {
+ s.Address = address
+ if address == nil || address.URL.IsEmpty() {
+ JobSinkCondSet.Manage(s).MarkFalse(JobSinkConditionAddressable, "EmptyHostname", "hostname is the empty string")
+ } else {
+ JobSinkCondSet.Manage(s).MarkTrue(JobSinkConditionAddressable)
+
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go
new file mode 100644
index 000000000..18c9153d9
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_types.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ batchv1 "k8s.io/api/batch/v1"
+ "knative.dev/pkg/apis"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ eventingduckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// JobSink is the Schema for the JobSink API.
+type JobSink struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec JobSinkSpec `json:"spec,omitempty"`
+ Status JobSinkStatus `json:"status,omitempty"`
+}
+
+// Check the interfaces that JobSink should be implementing.
+var (
+ _ runtime.Object = (*JobSink)(nil)
+ _ kmeta.OwnerRefable = (*JobSink)(nil)
+ _ apis.Validatable = (*JobSink)(nil)
+ _ apis.Defaultable = (*JobSink)(nil)
+ _ apis.HasSpec = (*JobSink)(nil)
+ _ duckv1.KRShaped = (*JobSink)(nil)
+)
+
+// JobSinkSpec defines the desired state of the JobSink.
+type JobSinkSpec struct {
+ // Job to run when an event occur.
+ // +optional
+ Job *batchv1.Job `json:"job,omitempty"`
+}
+
+// JobSinkStatus defines the observed state of JobSink.
+type JobSinkStatus struct {
+ duckv1.Status `json:",inline"`
+
+ // AddressStatus is the part where the JobSink fulfills the Addressable contract.
+ // It exposes the endpoint as an URI to get events delivered.
+ // +optional
+ duckv1.AddressStatus `json:",inline"`
+
+ // +optional
+ JobStatus JobStatus `json:"job,omitempty"`
+
+ // AppliedEventPoliciesStatus contains the list of EventPolicies which apply to this JobSink
+ // +optional
+ eventingduckv1.AppliedEventPoliciesStatus `json:",inline"`
+}
+
+type JobStatus struct {
+ Selector string `json:"selector,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// JobSinkList contains a list of JobSink.
+type JobSinkList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []JobSink `json:"items"`
+}
+
+// GetStatus retrieves the status of the JobSink. Implements the KRShaped interface.
+func (sink *JobSink) GetStatus() *duckv1.Status {
+ return &sink.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_validation.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_validation.go
new file mode 100644
index 000000000..7ed631ba3
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/job_sink_validation.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "context"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apiserver/pkg/storage/names"
+ "knative.dev/pkg/apis"
+
+ "knative.dev/eventing/pkg/apis/sinks"
+)
+
+func (sink *JobSink) Validate(ctx context.Context) *apis.FieldError {
+ ctx = apis.WithinParent(ctx, sink.ObjectMeta)
+ return sink.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (sink *JobSinkSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+
+ if sink.Job == nil {
+ return errs.Also(apis.ErrMissingOneOf("job"))
+ }
+
+ if sink.Job != nil {
+ job := sink.Job.DeepCopy()
+ job.Name = names.SimpleNameGenerator.GenerateName(apis.ParentMeta(ctx).Name)
+ _, err := sinks.GetConfig(ctx).KubeClient.
+ BatchV1().
+ Jobs(apis.ParentMeta(ctx).Namespace).
+ Create(ctx, job, metav1.CreateOptions{
+ DryRun: []string{metav1.DryRunAll},
+ FieldValidation: metav1.FieldValidationStrict,
+ })
+ if err != nil {
+ return apis.ErrGeneric(err.Error(), "job")
+ }
+ }
+
+ return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go
new file mode 100644
index 000000000..827ebc28b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "knative.dev/eventing/pkg/apis/sinks"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: sinks.GroupName, Version: "v1alpha1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &JobSink{},
+ &JobSinkList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/test_helpers.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/test_helpers.go
new file mode 100644
index 000000000..ca14a4674
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/test_helpers.go
@@ -0,0 +1,28 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1alpha1
+
+import (
+ "github.com/google/go-cmp/cmp/cmpopts"
+ "knative.dev/pkg/apis"
+)
+
+var (
+ ignoreAllButTypeAndStatus = cmpopts.IgnoreFields(
+ apis.Condition{},
+ "LastTransitionTime", "Message", "Reason", "Severity")
+)
diff --git a/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..58c9fdfaf
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sinks/v1alpha1/zz_generated.deepcopy.go
@@ -0,0 +1,145 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ v1 "k8s.io/api/batch/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobSink) DeepCopyInto(out *JobSink) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSink.
+func (in *JobSink) DeepCopy() *JobSink {
+ if in == nil {
+ return nil
+ }
+ out := new(JobSink)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *JobSink) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobSinkList) DeepCopyInto(out *JobSinkList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]JobSink, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSinkList.
+func (in *JobSinkList) DeepCopy() *JobSinkList {
+ if in == nil {
+ return nil
+ }
+ out := new(JobSinkList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *JobSinkList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobSinkSpec) DeepCopyInto(out *JobSinkSpec) {
+ *out = *in
+ if in.Job != nil {
+ in, out := &in.Job, &out.Job
+ *out = new(v1.Job)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSinkSpec.
+func (in *JobSinkSpec) DeepCopy() *JobSinkSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(JobSinkSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobSinkStatus) DeepCopyInto(out *JobSinkStatus) {
+ *out = *in
+ in.Status.DeepCopyInto(&out.Status)
+ in.AddressStatus.DeepCopyInto(&out.AddressStatus)
+ out.JobStatus = in.JobStatus
+ in.AppliedEventPoliciesStatus.DeepCopyInto(&out.AppliedEventPoliciesStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobSinkStatus.
+func (in *JobSinkStatus) DeepCopy() *JobSinkStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(JobSinkStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *JobStatus) DeepCopyInto(out *JobStatus) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new JobStatus.
+func (in *JobStatus) DeepCopy() *JobStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(JobStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/config/ping_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/config/ping_defaults.go
new file mode 100644
index 000000000..08f67dd18
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/config/ping_defaults.go
@@ -0,0 +1,80 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ corev1 "k8s.io/api/core/v1"
+
+ cm "knative.dev/pkg/configmap"
+)
+
+const (
+ // PingDefaultsConfigName is the name of config map for the default
+ // configs that pings should use.
+ PingDefaultsConfigName = "config-ping-defaults"
+
+ DataMaxSizeKey = "data-max-size"
+
+ // Legacy configuration item should be removed when a migration
+ // update script is released.
+ LegacyDataMaxSizeKey = "dataMaxSize"
+
+ DefaultDataMaxSize = -1
+)
+
+// NewPingDefaultsConfigFromMap creates a Defaults from the supplied Map
+func NewPingDefaultsConfigFromMap(data map[string]string) (*PingDefaults, error) {
+ nc := &PingDefaults{DataMaxSize: DefaultDataMaxSize}
+
+ if err := cm.Parse(data,
+ // Legacy for backwards compatibility
+ cm.AsInt64(LegacyDataMaxSizeKey, &nc.DataMaxSize),
+
+ cm.AsInt64(DataMaxSizeKey, &nc.DataMaxSize),
+ ); err != nil {
+ return nil, err
+ }
+
+ return nc, nil
+}
+
+// NewPingDefaultsConfigFromConfigMap creates a PingDefaults from the supplied configMap
+func NewPingDefaultsConfigFromConfigMap(config *corev1.ConfigMap) (*PingDefaults, error) {
+ return NewPingDefaultsConfigFromMap(config.Data)
+}
+
+// PingDefaults includes the default values to be populated by the webhook.
+type PingDefaults struct {
+ DataMaxSize int64 `json:"data-max-size"`
+}
+
+func (d *PingDefaults) GetPingConfig() *PingDefaults {
+ if d.DataMaxSize < 0 {
+ d.DataMaxSize = DefaultDataMaxSize
+ }
+ return d
+
+}
+
+func (d *PingDefaults) DeepCopy() *PingDefaults {
+ if d == nil {
+ return nil
+ }
+ out := new(PingDefaults)
+ *out = *d
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/config/store.go b/vendor/knative.dev/eventing/pkg/apis/sources/config/store.go
new file mode 100644
index 000000000..4113c6b36
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/config/store.go
@@ -0,0 +1,97 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package config
+
+import (
+ "context"
+
+ "knative.dev/pkg/configmap"
+)
+
+type pingCfgKey struct{}
+
+// Config holds the collection of configurations that we attach to contexts.
+// +k8s:deepcopy-gen=false
+type Config struct {
+ PingDefaults *PingDefaults
+}
+
+// FromContext extracts a Config from the provided context.
+func FromContext(ctx context.Context) *Config {
+ x, ok := ctx.Value(pingCfgKey{}).(*Config)
+ if ok {
+ return x
+ }
+ return nil
+}
+
+// FromContextOrDefaults is like FromContext, but when no Config is attached it
+// returns a Config populated with the defaults for each of the Config fields.
+func FromContextOrDefaults(ctx context.Context) *Config {
+ if cfg := FromContext(ctx); cfg != nil {
+ return cfg
+ }
+ pingDefaults, err := NewPingDefaultsConfigFromMap(map[string]string{})
+ if err != nil || pingDefaults == nil {
+ pingDefaults = &PingDefaults{DataMaxSize: DefaultDataMaxSize}
+ pingDefaults.GetPingConfig()
+ }
+
+ return &Config{
+ PingDefaults: pingDefaults,
+ }
+}
+
+// ToContext attaches the provided Config to the provided context, returning the
+// new context with the Config attached.
+func ToContext(ctx context.Context, c *Config) context.Context {
+ return context.WithValue(ctx, pingCfgKey{}, c)
+}
+
+// Store is a typed wrapper around configmap.Untyped store to handle our configmaps.
+// +k8s:deepcopy-gen=false
+type Store struct {
+ *configmap.UntypedStore
+}
+
+// NewStore creates a new store of Configs and optionally calls functions when ConfigMaps are updated.
+func NewStore(logger configmap.Logger, onAfterStore ...func(name string, value interface{})) *Store {
+ store := &Store{
+ UntypedStore: configmap.NewUntypedStore(
+ "pingdefaults",
+ logger,
+ configmap.Constructors{
+ PingDefaultsConfigName: NewPingDefaultsConfigFromConfigMap,
+ },
+ onAfterStore...,
+ ),
+ }
+
+ return store
+}
+
+// ToContext attaches the current Config state to the provided context.
+func (s *Store) ToContext(ctx context.Context) context.Context {
+ return ToContext(ctx, s.Load())
+}
+
+// Load creates a Config from the current config state of the Store.
+func (s *Store) Load() *Config {
+ return &Config{
+ PingDefaults: s.UntypedLoad(PingDefaultsConfigName).(*PingDefaults).DeepCopy(),
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/constants.go b/vendor/knative.dev/eventing/pkg/apis/sources/constants.go
new file mode 100644
index 000000000..42cfe2902
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/constants.go
@@ -0,0 +1,47 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sources
+
+const (
+ // ApiServerSourceAddEventType is the ApiServerSource CloudEvent type for adds.
+ ApiServerSourceAddEventType = "dev.knative.apiserver.resource.add"
+ // ApiServerSourceUpdateEventType is the ApiServerSource CloudEvent type for updates.
+ ApiServerSourceUpdateEventType = "dev.knative.apiserver.resource.update"
+ // ApiServerSourceDeleteEventType is the ApiServerSource CloudEvent type for deletions.
+ ApiServerSourceDeleteEventType = "dev.knative.apiserver.resource.delete"
+
+ // ApiServerSourceAddRefEventType is the ApiServerSource CloudEvent type for ref adds.
+ ApiServerSourceAddRefEventType = "dev.knative.apiserver.ref.add"
+ // ApiServerSourceUpdateRefEventType is the ApiServerSource CloudEvent type for ref updates.
+ ApiServerSourceUpdateRefEventType = "dev.knative.apiserver.ref.update"
+ // ApiServerSourceDeleteRefEventType is the ApiServerSource CloudEvent type for ref deletions.
+ ApiServerSourceDeleteRefEventType = "dev.knative.apiserver.ref.delete"
+)
+
+// ApiServerSourceEventReferenceModeTypes is the list of CloudEvent types the ApiServerSource with EventMode of ReferenceMode emits.
+var ApiServerSourceEventReferenceModeTypes = []string{
+ ApiServerSourceAddRefEventType,
+ ApiServerSourceDeleteRefEventType,
+ ApiServerSourceUpdateRefEventType,
+}
+
+// ApiServerSourceEventResourceModeTypes is the list of CloudEvent types the ApiServerSource with EventMode of ResourceMode emits.
+var ApiServerSourceEventResourceModeTypes = []string{
+ ApiServerSourceAddEventType,
+ ApiServerSourceDeleteEventType,
+ ApiServerSourceUpdateEventType,
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/register.go b/vendor/knative.dev/eventing/pkg/apis/sources/register.go
new file mode 100644
index 000000000..04716c8ca
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/register.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package sources
+
+import (
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis/duck"
+)
+
+const (
+ GroupName = "sources.knative.dev"
+
+ // SourceDuckLabelKey is the label key to indicate
+ // whether the CRD is a Source duck type.
+ // Valid values: "true" or "false"
+ SourceDuckLabelKey = duck.GroupName + "/source"
+
+ // SourceDuckLabelValue is the label value to indicate
+ // the CRD is a Source duck type.
+ SourceDuckLabelValue = "true"
+)
+
+var (
+ // ApiServerSourceResource respresents a Knative Eventing Sources ApiServerSource
+ ApiServerSourceResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "apiserversources",
+ }
+ // PingSourceResource respresents a Knative Eventing Sources PingSource
+ PingSourceResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "pingsources",
+ }
+ // SinkBindingResource respresents a Knative Eventing Sources SinkBinding
+ SinkBindingResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "sinkbindings",
+ }
+
+ // ContainerSourceResource respresents a Knative Eventing Sources ContainerSource
+ ContainerSourceResource = schema.GroupResource{
+ Group: GroupName,
+ Resource: "containersources",
+ }
+)
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_conversion.go
new file mode 100644
index 000000000..844542941
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *ApiServerSource) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *ApiServerSource) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_defaults.go
new file mode 100644
index 000000000..1e52d9576
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_defaults.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+)
+
+func (s *ApiServerSource) SetDefaults(ctx context.Context) {
+ s.Spec.SetDefaults(ctx)
+}
+
+func (ss *ApiServerSourceSpec) SetDefaults(ctx context.Context) {
+
+ if ss.EventMode == "" {
+ ss.EventMode = ReferenceMode
+ }
+
+ if ss.ServiceAccountName == "" {
+ ss.ServiceAccountName = "default"
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_lifecycle.go
new file mode 100644
index 000000000..42e4df840
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_lifecycle.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+
+ "knative.dev/pkg/apis"
+)
+
+const (
+ // ApiServerConditionReady has status True when the ApiServerSource is ready to send events.
+ ApiServerConditionReady = apis.ConditionReady
+
+ // ApiServerConditionSinkProvided has status True when the ApiServerSource has been configured with a sink target.
+ ApiServerConditionSinkProvided apis.ConditionType = "SinkProvided"
+
+ // ApiServerConditionDeployed has status True when the ApiServerSource has had it's deployment created.
+ ApiServerConditionDeployed apis.ConditionType = "Deployed"
+
+ // ApiServerConditionSufficientPermissions has status True when the ApiServerSource has sufficient permissions to access resources.
+ ApiServerConditionSufficientPermissions apis.ConditionType = "SufficientPermissions"
+
+ // ApiServerConditionOIDCIdentityCreated has status True when the ApiServerSource has created an OIDC identity.
+ ApiServerConditionOIDCIdentityCreated apis.ConditionType = "OIDCIdentityCreated"
+)
+
+var apiserverCondSet = apis.NewLivingConditionSet(
+ ApiServerConditionSinkProvided,
+ ApiServerConditionDeployed,
+ ApiServerConditionSufficientPermissions,
+ ApiServerConditionOIDCIdentityCreated,
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*ApiServerSource) GetConditionSet() apis.ConditionSet {
+ return apiserverCondSet
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (*ApiServerSource) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("ApiServerSource")
+}
+
+// GetUntypedSpec returns the spec of the ApiServerSource.
+func (s *ApiServerSource) GetUntypedSpec() interface{} {
+ return s.Spec
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *ApiServerSourceStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return apiserverCondSet.Manage(s).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level condition.
+func (s *ApiServerSourceStatus) GetTopLevelCondition() *apis.Condition {
+ return apiserverCondSet.Manage(s).GetTopLevelCondition()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *ApiServerSourceStatus) InitializeConditions() {
+ apiserverCondSet.Manage(s).InitializeConditions()
+}
+
+// MarkSink sets the condition that the source has a sink configured.
+func (s *ApiServerSourceStatus) MarkSink(addr *duckv1.Addressable) {
+ if addr != nil {
+ s.SinkURI = addr.URL
+ s.SinkCACerts = addr.CACerts
+ s.SinkAudience = addr.Audience
+ apiserverCondSet.Manage(s).MarkTrue(ApiServerConditionSinkProvided)
+ } else {
+ apiserverCondSet.Manage(s).MarkFalse(ApiServerConditionSinkProvided, "SinkEmpty", "Sink has resolved to empty.%s", "")
+ }
+}
+
+// MarkNoSink sets the condition that the source does not have a sink configured.
+func (s *ApiServerSourceStatus) MarkNoSink(reason, messageFormat string, messageA ...interface{}) {
+ apiserverCondSet.Manage(s).MarkFalse(ApiServerConditionSinkProvided, reason, messageFormat, messageA...)
+}
+
+// PropagateDeploymentAvailability uses the availability of the provided Deployment to determine if
+// ApiServerConditionDeployed should be marked as true or false.
+func (s *ApiServerSourceStatus) PropagateDeploymentAvailability(d *appsv1.Deployment) {
+ deploymentAvailableFound := false
+ for _, cond := range d.Status.Conditions {
+ if cond.Type == appsv1.DeploymentAvailable {
+ deploymentAvailableFound = true
+ if cond.Status == corev1.ConditionTrue {
+ apiserverCondSet.Manage(s).MarkTrue(ApiServerConditionDeployed)
+ } else if cond.Status == corev1.ConditionFalse {
+ apiserverCondSet.Manage(s).MarkFalse(ApiServerConditionDeployed, cond.Reason, cond.Message)
+ } else if cond.Status == corev1.ConditionUnknown {
+ apiserverCondSet.Manage(s).MarkUnknown(ApiServerConditionDeployed, cond.Reason, cond.Message)
+ }
+ }
+ }
+ if !deploymentAvailableFound {
+ apiserverCondSet.Manage(s).MarkUnknown(ApiServerConditionDeployed, "DeploymentUnavailable", "The Deployment '%s' is unavailable.", d.Name)
+ }
+}
+
+// MarkSufficientPermissions sets the condition that the source has enough permissions to access the resources.
+func (s *ApiServerSourceStatus) MarkSufficientPermissions() {
+ apiserverCondSet.Manage(s).MarkTrue(ApiServerConditionSufficientPermissions)
+}
+
+// MarkNoSufficientPermissions sets the condition that the source does not have enough permissions to access the resources
+func (s *ApiServerSourceStatus) MarkNoSufficientPermissions(reason, messageFormat string, messageA ...interface{}) {
+ apiserverCondSet.Manage(s).MarkFalse(ApiServerConditionSufficientPermissions, reason, messageFormat, messageA...)
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *ApiServerSourceStatus) IsReady() bool {
+ return apiserverCondSet.Manage(s).IsHappy()
+}
+
+func (s *ApiServerSourceStatus) MarkOIDCIdentityCreatedSucceeded() {
+ apiserverCondSet.Manage(s).MarkTrue(ApiServerConditionOIDCIdentityCreated)
+}
+
+func (s *ApiServerSourceStatus) MarkOIDCIdentityCreatedSucceededWithReason(reason, messageFormat string, messageA ...interface{}) {
+ apiserverCondSet.Manage(s).MarkTrueWithReason(ApiServerConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (s *ApiServerSourceStatus) MarkOIDCIdentityCreatedFailed(reason, messageFormat string, messageA ...interface{}) {
+ apiserverCondSet.Manage(s).MarkFalse(ApiServerConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (s *ApiServerSourceStatus) MarkOIDCIdentityCreatedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ apiserverCondSet.Manage(s).MarkUnknown(ApiServerConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_types.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_types.go
new file mode 100644
index 000000000..e3d30765e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_types.go
@@ -0,0 +1,154 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// ApiServerSource is the Schema for the apiserversources API
+type ApiServerSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ApiServerSourceSpec `json:"spec,omitempty"`
+ Status ApiServerSourceStatus `json:"status,omitempty"`
+}
+
+// Check the interfaces that ApiServerSource should be implementing.
+var (
+ _ runtime.Object = (*ApiServerSource)(nil)
+ _ kmeta.OwnerRefable = (*ApiServerSource)(nil)
+ _ apis.Validatable = (*ApiServerSource)(nil)
+ _ apis.Defaultable = (*ApiServerSource)(nil)
+ _ apis.HasSpec = (*ApiServerSource)(nil)
+ _ duckv1.KRShaped = (*ApiServerSource)(nil)
+)
+
+// ApiServerSourceSpec defines the desired state of ApiServerSource
+type ApiServerSourceSpec struct {
+ // inherits duck/v1 SourceSpec, which currently provides:
+ // * Sink - a reference to an object that will resolve to a domain name or
+ // a URI directly to use as the sink.
+ // * CloudEventOverrides - defines overrides to control the output format
+ // and modifications of the event sent to the sink.
+ duckv1.SourceSpec `json:",inline"`
+
+ // Resource are the resources this source will track and send related
+ // lifecycle events from the Kubernetes ApiServer, with an optional label
+ // selector to help filter.
+ // +required
+ Resources []APIVersionKindSelector `json:"resources,omitempty"`
+
+ // ResourceOwner is an additional filter to only track resources that are
+ // owned by a specific resource type. If ResourceOwner matches Resources[n]
+ // then Resources[n] is allowed to pass the ResourceOwner filter.
+ // +optional
+ ResourceOwner *APIVersionKind `json:"owner,omitempty"`
+
+ // EventMode controls the format of the event.
+ // `Reference` sends a dataref event type for the resource under watch.
+ // `Resource` send the full resource lifecycle event.
+ // Defaults to `Reference`
+ // +optional
+ EventMode string `json:"mode,omitempty"`
+
+ // ServiceAccountName is the name of the ServiceAccount to use to run this
+ // source. Defaults to default if not set.
+ // +optional
+ ServiceAccountName string `json:"serviceAccountName,omitempty"`
+
+ // NamespaceSelector is a label selector to capture the namespaces that
+ // should be watched by the source.
+ // +optional
+ NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty"`
+
+ // Filters is an experimental field that conforms to the CNCF CloudEvents Subscriptions
+ // API. It's an array of filter expressions that evaluate to true or false.
+ // If any filter expression in the array evaluates to false, the event MUST
+ // NOT be sent to the Sink. If all the filter expressions in the array
+ // evaluate to true, the event MUST be attempted to be delivered. Absence of
+ // a filter or empty array implies a value of true.
+ //
+ // +optional
+ Filters []eventingv1.SubscriptionsAPIFilter `json:"filters,omitempty"`
+}
+
+// ApiServerSourceStatus defines the observed state of ApiServerSource
+type ApiServerSourceStatus struct {
+ // inherits duck/v1 SourceStatus, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last
+ // processed by the controller.
+ // * Conditions - the latest available observations of a resource's current
+ // state.
+ // * SinkURI - the current active sink URI that has been configured for the
+ // Source.
+ duckv1.SourceStatus `json:",inline"`
+
+ // Namespaces show the namespaces currently watched by the ApiServerSource
+ Namespaces []string `json:"namespaces"`
+}
+
+// APIVersionKind is an APIVersion and Kind tuple.
+type APIVersionKind struct {
+ // APIVersion - the API version of the resource to watch.
+ APIVersion string `json:"apiVersion"`
+
+ // Kind of the resource to watch.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind string `json:"kind"`
+}
+
+// APIVersionKindSelector is an APIVersion Kind tuple with a LabelSelector.
+type APIVersionKindSelector struct {
+ // APIVersion - the API version of the resource to watch.
+ APIVersion string `json:"apiVersion"`
+
+ // Kind of the resource to watch.
+ // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ Kind string `json:"kind"`
+
+ // LabelSelector filters this source to objects to those resources pass the
+ // label selector.
+ // More info: http://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
+ // +optional
+ LabelSelector *metav1.LabelSelector `json:"selector,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ApiServerSourceList contains a list of ApiServerSource
+type ApiServerSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ApiServerSource `json:"items"`
+}
+
+// GetStatus retrieves the status of the ApiServerSource . Implements the KRShaped interface.
+func (a *ApiServerSource) GetStatus() *duckv1.Status {
+ return &a.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_validation.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_validation.go
new file mode 100644
index 000000000..3d18b1273
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/apiserver_validation.go
@@ -0,0 +1,96 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "strings"
+
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ "knative.dev/eventing/pkg/apis/feature"
+ "knative.dev/pkg/apis"
+)
+
+const (
+ // ReferenceMode produces payloads of ObjectReference
+ ReferenceMode = "Reference"
+ // ResourceMode produces payloads of ResourceEvent
+ ResourceMode = "Resource"
+)
+
+func (c *ApiServerSource) Validate(ctx context.Context) *apis.FieldError {
+ return c.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (cs *ApiServerSourceSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+
+ // Validate mode, if can be empty or set as certain value
+ switch cs.EventMode {
+ case ReferenceMode, ResourceMode:
+ // EventMode is valid.
+ default:
+ errs = errs.Also(apis.ErrInvalidValue(cs.EventMode, "mode"))
+ }
+
+ // Validate sink
+ errs = errs.Also(cs.Sink.Validate(ctx).ViaField("sink"))
+
+ if len(cs.Resources) == 0 {
+ errs = errs.Also(apis.ErrMissingField("resources"))
+ }
+ for i, res := range cs.Resources {
+ _, err := schema.ParseGroupVersion(res.APIVersion)
+ if err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(res.APIVersion, "apiVersion").ViaFieldIndex("resources", i))
+ }
+ if strings.TrimSpace(res.Kind) == "" {
+ errs = errs.Also(apis.ErrMissingField("kind").ViaFieldIndex("resources", i))
+ }
+ }
+
+ if cs.ResourceOwner != nil {
+ _, err := schema.ParseGroupVersion(cs.ResourceOwner.APIVersion)
+ if err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(cs.ResourceOwner.APIVersion, "apiVersion").ViaField("owner"))
+ }
+ if strings.TrimSpace(cs.ResourceOwner.Kind) == "" {
+ errs = errs.Also(apis.ErrMissingField("kind").ViaField("owner"))
+ }
+ }
+ errs = errs.Also(cs.SourceSpec.Validate(ctx))
+ errs = errs.Also(validateSubscriptionAPIFiltersList(ctx, cs.Filters).ViaField("filters"))
+ return errs
+}
+
+func validateSubscriptionAPIFiltersList(ctx context.Context, filters []eventingv1.SubscriptionsAPIFilter) (errs *apis.FieldError) {
+ if !feature.FromContext(ctx).IsEnabled(feature.NewAPIServerFilters) {
+ if len(filters) != 0 {
+ return errs.Also(apis.ErrGeneric("Filters is not empty but the NewAPIServerFilters feature is disabled."))
+ }
+
+ return nil
+ }
+
+ for i, f := range filters {
+ f := f
+ errs = errs.Also(eventingv1.ValidateSubscriptionAPIFilter(ctx, &f)).ViaIndex(i)
+ }
+ return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_conversion.go
new file mode 100644
index 000000000..d02548af7
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_conversion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+// Converts source from v1.ContainerSource into a higher version.
+func (source *ContainerSource) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+// Converts obj from a higher version into v1.ContainerSource.
+func (sink *ContainerSource) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_defaults.go
new file mode 100644
index 000000000..8789c586a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_defaults.go
@@ -0,0 +1,42 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ corev1 "k8s.io/api/core/v1"
+ "knative.dev/pkg/apis"
+)
+
+func (s *ContainerSource) SetDefaults(ctx context.Context) {
+ withName := apis.WithinParent(ctx, s.ObjectMeta)
+ s.Spec.SetDefaults(withName)
+}
+
+func (ss *ContainerSourceSpec) SetDefaults(ctx context.Context) {
+ containers := make([]corev1.Container, 0, len(ss.Template.Spec.Containers))
+ for i, c := range ss.Template.Spec.Containers {
+ // If the Container specified has no name, then default to "_".
+ if c.Name == "" {
+ c.Name = fmt.Sprintf("%s-%d", apis.ParentMeta(ctx).Name, i)
+ }
+ containers = append(containers, c)
+ }
+ ss.Template.Spec.Containers = containers
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_lifecycle.go
new file mode 100644
index 000000000..79ac2aa2a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_lifecycle.go
@@ -0,0 +1,113 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "knative.dev/pkg/apis"
+)
+
+const (
+ // ContainerSourceConditionReady has status True when the ContainerSource is ready to send events.
+ ContainerSourceConditionReady = apis.ConditionReady
+
+ // ContainerSourceConditionSinkBindingReady has status True when the ContainerSource's SinkBinding is ready.
+ ContainerSourceConditionSinkBindingReady apis.ConditionType = "SinkBindingReady"
+
+ // ContainerSourceConditionReceiveAdapterReady has status True when the ContainerSource's ReceiveAdapter is ready.
+ ContainerSourceConditionReceiveAdapterReady apis.ConditionType = "ReceiveAdapterReady"
+)
+
+var containerCondSet = apis.NewLivingConditionSet(
+ ContainerSourceConditionSinkBindingReady,
+ ContainerSourceConditionReceiveAdapterReady,
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*ContainerSource) GetConditionSet() apis.ConditionSet {
+ return containerCondSet
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *ContainerSourceStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return containerCondSet.Manage(s).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level condition.
+func (s *ContainerSourceStatus) GetTopLevelCondition() *apis.Condition {
+ return containerCondSet.Manage(s).GetTopLevelCondition()
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *ContainerSourceStatus) IsReady() bool {
+ return containerCondSet.Manage(s).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *ContainerSourceStatus) InitializeConditions() {
+ containerCondSet.Manage(s).InitializeConditions()
+}
+
+// PropagateSinkBindingStatus uses the SinkBinding to determine if
+// ContainerSourceConditionSinkBindingReady should be marked as true, false or unknown.
+func (s *ContainerSourceStatus) PropagateSinkBindingStatus(status *SinkBindingStatus) {
+ // Do not copy conditions nor observedGeneration
+ conditions := s.Conditions
+ observedGeneration := s.ObservedGeneration
+ s.SourceStatus = status.SourceStatus
+ s.Conditions = conditions
+ s.ObservedGeneration = observedGeneration
+
+ cond := status.GetCondition(apis.ConditionReady)
+ switch {
+ case cond == nil:
+ containerCondSet.Manage(s).MarkUnknown(ContainerSourceConditionSinkBindingReady, "", "")
+ case cond.Status == corev1.ConditionTrue:
+ containerCondSet.Manage(s).MarkTrue(ContainerSourceConditionSinkBindingReady)
+ case cond.Status == corev1.ConditionFalse:
+ containerCondSet.Manage(s).MarkFalse(ContainerSourceConditionSinkBindingReady, cond.Reason, cond.Message)
+ case cond.Status == corev1.ConditionUnknown:
+ containerCondSet.Manage(s).MarkUnknown(ContainerSourceConditionSinkBindingReady, cond.Reason, cond.Message)
+ default:
+ containerCondSet.Manage(s).MarkUnknown(ContainerSourceConditionSinkBindingReady, cond.Reason, cond.Message)
+ }
+
+ // Propagate SinkBindings AuthStatus to containersources AuthStatus
+ s.Auth = status.Auth
+}
+
+// PropagateReceiveAdapterStatus uses the availability of the provided Deployment to determine if
+// ContainerSourceConditionReceiveAdapterReady should be marked as true or false.
+func (s *ContainerSourceStatus) PropagateReceiveAdapterStatus(d *appsv1.Deployment) {
+ deploymentAvailableFound := false
+ for _, cond := range d.Status.Conditions {
+ if cond.Type == appsv1.DeploymentAvailable {
+ deploymentAvailableFound = true
+ if cond.Status == corev1.ConditionTrue {
+ containerCondSet.Manage(s).MarkTrue(ContainerSourceConditionReceiveAdapterReady)
+ } else if cond.Status == corev1.ConditionFalse {
+ containerCondSet.Manage(s).MarkFalse(ContainerSourceConditionReceiveAdapterReady, cond.Reason, cond.Message)
+ } else if cond.Status == corev1.ConditionUnknown {
+ containerCondSet.Manage(s).MarkUnknown(ContainerSourceConditionReceiveAdapterReady, cond.Reason, cond.Message)
+ }
+ }
+ }
+ if !deploymentAvailableFound {
+ containerCondSet.Manage(s).MarkUnknown(ContainerSourceConditionReceiveAdapterReady, "DeploymentUnavailable", "The Deployment '%s' is unavailable.", d.Name)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_types.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_types.go
new file mode 100644
index 000000000..ab75a451b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_types.go
@@ -0,0 +1,98 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ContainerSource is the Schema for the containersources API
+type ContainerSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec ContainerSourceSpec `json:"spec,omitempty"`
+ Status ContainerSourceStatus `json:"status,omitempty"`
+}
+
+var (
+ _ runtime.Object = (*ContainerSource)(nil)
+ _ kmeta.OwnerRefable = (*ContainerSource)(nil)
+ _ apis.Validatable = (*ContainerSource)(nil)
+ _ apis.Defaultable = (*ContainerSource)(nil)
+ _ apis.HasSpec = (*ContainerSource)(nil)
+ _ duckv1.KRShaped = (*ContainerSource)(nil)
+)
+
+// ContainerSourceSpec defines the desired state of ContainerSource
+type ContainerSourceSpec struct {
+ // inherits duck/v1 SourceSpec, which currently provides:
+ // * Sink - a reference to an object that will resolve to a domain name or
+ // a URI directly to use as the sink.
+ // * CloudEventOverrides - defines overrides to control the output format
+ // and modifications of the event sent to the sink.
+ duckv1.SourceSpec `json:",inline"`
+
+ // Template describes the pods that will be created
+ Template corev1.PodTemplateSpec `json:"template"`
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (*ContainerSource) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("ContainerSource")
+}
+
+// ContainerSourceStatus defines the observed state of ContainerSource
+type ContainerSourceStatus struct {
+ // inherits duck/v1 SourceStatus, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last
+ // processed by the controller.
+ // * Conditions - the latest available observations of a resource's current
+ // state.
+ // * SinkURI - the current active sink URI that has been configured for the
+ // Source.
+ duckv1.SourceStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// ContainerSourceList contains a list of ContainerSource
+type ContainerSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []ContainerSource `json:"items"`
+}
+
+// GetUntypedSpec returns the spec of the ContainerSource.
+func (c *ContainerSource) GetUntypedSpec() interface{} {
+ return c.Spec
+}
+
+// GetStatus retrieves the status of the ContainerSource. Implements the KRShaped interface.
+func (c *ContainerSource) GetStatus() *duckv1.Status {
+ return &c.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_validation.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_validation.go
new file mode 100644
index 000000000..d5d9c5bf2
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/container_validation.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ corev1 "k8s.io/api/core/v1"
+ "knative.dev/pkg/apis"
+)
+
+func (c *ContainerSource) Validate(ctx context.Context) *apis.FieldError {
+ return c.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (cs *ContainerSourceSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+ if fe := cs.Sink.Validate(ctx); fe != nil {
+ errs = errs.Also(fe.ViaField("sink"))
+ }
+
+ // Validate there is at least a container
+ if cs.Template.Spec.Containers == nil || len(cs.Template.Spec.Containers) == 0 {
+ fe := apis.ErrMissingField("containers")
+ errs = errs.Also(fe)
+ } else {
+ for i := range cs.Template.Spec.Containers {
+ if ce := isValidContainer(&cs.Template.Spec.Containers[i]); ce != nil {
+ errs = errs.Also(ce.ViaFieldIndex("containers", i))
+ }
+ }
+ }
+ errs = errs.Also(cs.SourceSpec.Validate(ctx))
+ return errs
+}
+
+func isValidContainer(c *corev1.Container) *apis.FieldError {
+ var errs *apis.FieldError
+ if c.Name == "" {
+ errs = errs.Also(apis.ErrMissingField("name"))
+ }
+ if c.Image == "" {
+ errs = errs.Also(apis.ErrMissingField("image"))
+ }
+ return errs
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/doc.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/doc.go
new file mode 100644
index 000000000..94a692f27
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1 contains API Schema definitions for the sources v1 API group.
+// +k8s:deepcopy-gen=package
+// +groupName=sources.knative.dev
+package v1
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_conversion.go
new file mode 100644
index 000000000..598a344bb
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_conversion.go
@@ -0,0 +1,36 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+// Converts source from v1.PingSource into a higher version.
+func (source *PingSource) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+// Converts source from a higher version into v1.PingSource
+func (sink *PingSource) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_defaults.go
new file mode 100644
index 000000000..15610151b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_defaults.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+)
+
+const (
+ defaultSchedule = "* * * * *"
+)
+
+func (s *PingSource) SetDefaults(ctx context.Context) {
+ s.Spec.SetDefaults(ctx)
+}
+
+func (ss *PingSourceSpec) SetDefaults(ctx context.Context) {
+ if ss.Schedule == "" {
+ ss.Schedule = defaultSchedule
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_lifecycle.go
new file mode 100644
index 000000000..5f7dd5470
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_lifecycle.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "fmt"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+)
+
+const (
+ // PingSourceConditionReady has status True when the PingSource is ready to send events.
+ PingSourceConditionReady = apis.ConditionReady
+
+ // PingSourceConditionSinkProvided has status True when the PingSource has been configured with a sink target.
+ PingSourceConditionSinkProvided apis.ConditionType = "SinkProvided"
+
+ // PingSourceConditionDeployed has status True when the PingSource has had it's receive adapter deployment created.
+ PingSourceConditionDeployed apis.ConditionType = "Deployed"
+
+ // PingSourceConditionOIDCIdentityCreated has status True when the PingSource has had it's OIDC identity created.
+ PingSourceConditionOIDCIdentityCreated apis.ConditionType = "OIDCIdentityCreated"
+)
+
+var PingSourceCondSet = apis.NewLivingConditionSet(
+ PingSourceConditionSinkProvided,
+ PingSourceConditionDeployed,
+ PingSourceConditionOIDCIdentityCreated)
+
+const (
+ // PingSourceEventType is the default PingSource CloudEvent type.
+ PingSourceEventType = "dev.knative.sources.ping"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*PingSource) GetConditionSet() apis.ConditionSet {
+ return PingSourceCondSet
+}
+
+// PingSourceSource returns the PingSource CloudEvent source.
+func PingSourceSource(namespace, name string) string {
+ return fmt.Sprintf("/apis/v1/namespaces/%s/pingsources/%s", namespace, name)
+}
+
+// GetUntypedSpec returns the spec of the PingSource.
+func (s *PingSource) GetUntypedSpec() interface{} {
+ return s.Spec
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (s *PingSource) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("PingSource")
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *PingSourceStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return PingSourceCondSet.Manage(s).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (ps *PingSourceStatus) GetTopLevelCondition() *apis.Condition {
+ return PingSourceCondSet.Manage(ps).GetTopLevelCondition()
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *PingSourceStatus) IsReady() bool {
+ return PingSourceCondSet.Manage(s).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *PingSourceStatus) InitializeConditions() {
+ PingSourceCondSet.Manage(s).InitializeConditions()
+}
+
+// MarkSink sets the condition that the source has a sink configured.
+func (s *PingSourceStatus) MarkSink(uri *duckv1.Addressable) {
+ if uri != nil {
+ s.SinkURI = uri.URL
+ s.SinkCACerts = uri.CACerts
+ s.SinkAudience = uri.Audience
+ PingSourceCondSet.Manage(s).MarkTrue(PingSourceConditionSinkProvided)
+ } else {
+ PingSourceCondSet.Manage(s).MarkFalse(PingSourceConditionSinkProvided, "SinkEmpty", "Sink has resolved to empty.")
+ }
+}
+
+// MarkNoSink sets the condition that the source does not have a sink configured.
+func (s *PingSourceStatus) MarkNoSink(reason, messageFormat string, messageA ...interface{}) {
+ PingSourceCondSet.Manage(s).MarkFalse(PingSourceConditionSinkProvided, reason, messageFormat, messageA...)
+}
+
+// PropagateDeploymentAvailability uses the availability of the provided Deployment to determine if
+// PingSourceConditionDeployed should be marked as true or false.
+func (s *PingSourceStatus) PropagateDeploymentAvailability(d *appsv1.Deployment) {
+ deploymentAvailableFound := false
+ for _, cond := range d.Status.Conditions {
+ if cond.Type == appsv1.DeploymentAvailable {
+ deploymentAvailableFound = true
+ if cond.Status == corev1.ConditionTrue {
+ PingSourceCondSet.Manage(s).MarkTrue(PingSourceConditionDeployed)
+ } else if cond.Status == corev1.ConditionFalse {
+ PingSourceCondSet.Manage(s).MarkFalse(PingSourceConditionDeployed, cond.Reason, cond.Message)
+ } else if cond.Status == corev1.ConditionUnknown {
+ PingSourceCondSet.Manage(s).MarkUnknown(PingSourceConditionDeployed, cond.Reason, cond.Message)
+ }
+ }
+ }
+ if !deploymentAvailableFound {
+ PingSourceCondSet.Manage(s).MarkUnknown(PingSourceConditionDeployed, "DeploymentUnavailable", "The Deployment '%s' is unavailable.", d.Name)
+ }
+}
+
+func (s *PingSourceStatus) MarkOIDCIdentityCreatedSucceeded() {
+ PingSourceCondSet.Manage(s).MarkTrue(PingSourceConditionOIDCIdentityCreated)
+}
+
+func (s *PingSourceStatus) MarkOIDCIdentityCreatedSucceededWithReason(reason, messageFormat string, messageA ...interface{}) {
+ PingSourceCondSet.Manage(s).MarkTrueWithReason(PingSourceConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (s *PingSourceStatus) MarkOIDCIdentityCreatedFailed(reason, messageFormat string, messageA ...interface{}) {
+ PingSourceCondSet.Manage(s).MarkFalse(PingSourceConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (s *PingSourceStatus) MarkOIDCIdentityCreatedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ PingSourceCondSet.Manage(s).MarkUnknown(PingSourceConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_types.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_types.go
new file mode 100644
index 000000000..b911c6911
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_types.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "knative.dev/pkg/apis"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// PingSource is the Schema for the PingSources API.
+type PingSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec PingSourceSpec `json:"spec,omitempty"`
+ Status PingSourceStatus `json:"status,omitempty"`
+}
+
+// Check the interfaces that PingSource should be implementing.
+var (
+ _ runtime.Object = (*PingSource)(nil)
+ _ kmeta.OwnerRefable = (*PingSource)(nil)
+ _ apis.Validatable = (*PingSource)(nil)
+ _ apis.Defaultable = (*PingSource)(nil)
+ _ apis.HasSpec = (*PingSource)(nil)
+ _ duckv1.KRShaped = (*PingSource)(nil)
+)
+
+// PingSourceSpec defines the desired state of the PingSource.
+type PingSourceSpec struct {
+ // inherits duck/v1 SourceSpec, which currently provides:
+ // * Sink - a reference to an object that will resolve to a domain name or
+ // a URI directly to use as the sink.
+ // * CloudEventOverrides - defines overrides to control the output format
+ // and modifications of the event sent to the sink.
+ duckv1.SourceSpec `json:",inline"`
+
+ // Schedule is the cron schedule. Defaults to `* * * * *`.
+ // +optional
+ Schedule string `json:"schedule,omitempty"`
+
+ // Timezone modifies the actual time relative to the specified timezone.
+ // Defaults to the system time zone.
+ // More general information about time zones: https://www.iana.org/time-zones
+ // List of valid timezone values: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
+ Timezone string `json:"timezone,omitempty"`
+
+ // ContentType is the media type of Data or DataBase64. Default is empty.
+ // +optional
+ ContentType string `json:"contentType,omitempty"`
+
+ // Data is data used as the body of the event posted to the sink. Default is empty.
+ // Mutually exclusive with DataBase64.
+ // +optional
+ Data string `json:"data,omitempty"`
+
+ // DataBase64 is the base64-encoded string of the actual event's body posted to the sink. Default is empty.
+ // Mutually exclusive with Data.
+ // +optional
+ DataBase64 string `json:"dataBase64,omitempty"`
+}
+
+// PingSourceStatus defines the observed state of PingSource.
+type PingSourceStatus struct {
+ // inherits duck/v1 SourceStatus, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last
+ // processed by the controller.
+ // * Conditions - the latest available observations of a resource's current
+ // state.
+ // * SinkURI - the current active sink URI that has been configured for the
+ // Source.
+ duckv1.SourceStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PingSourceList contains a list of PingSources.
+type PingSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []PingSource `json:"items"`
+}
+
+// GetStatus retrieves the status of the PingSource. Implements the KRShaped interface.
+func (p *PingSource) GetStatus() *duckv1.Status {
+ return &p.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_validation.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_validation.go
new file mode 100644
index 000000000..b8315ee0d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/ping_validation.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+
+ "github.com/robfig/cron/v3"
+ "knative.dev/pkg/apis"
+
+ "knative.dev/eventing/pkg/apis/sources/config"
+)
+
+func (c *PingSource) Validate(ctx context.Context) *apis.FieldError {
+ return c.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (cs *PingSourceSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+ schedule := cs.Schedule
+
+ errs = validateDescriptor(schedule)
+
+ if cs.Timezone != "" {
+ schedule = "CRON_TZ=" + cs.Timezone + " " + schedule
+ }
+
+ parser := cron.NewParser(
+ cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
+ )
+
+ if _, err := parser.Parse(schedule); err != nil {
+ if strings.HasPrefix(err.Error(), "provided bad location") {
+ fe := apis.ErrInvalidValue(err, "timezone")
+ errs = errs.Also(fe)
+ } else {
+ fe := apis.ErrInvalidValue(err, "schedule")
+ errs = errs.Also(fe)
+ }
+ }
+
+ pingConfig := config.FromContextOrDefaults(ctx)
+ pingDefaults := pingConfig.PingDefaults.GetPingConfig()
+
+ if fe := cs.Sink.Validate(ctx); fe != nil {
+ errs = errs.Also(fe.ViaField("sink"))
+ }
+
+ if cs.Data != "" && cs.DataBase64 != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("data", "dataBase64"))
+ } else if cs.DataBase64 != "" {
+ if bsize := int64(len(cs.DataBase64)); pingDefaults.DataMaxSize > -1 && bsize > pingDefaults.DataMaxSize {
+ fe := apis.ErrInvalidValue(fmt.Sprintf("the dataBase64 length of %d bytes exceeds limit set at %d.", bsize, pingDefaults.DataMaxSize), "dataBase64")
+ errs = errs.Also(fe)
+ }
+ decoded, err := base64.StdEncoding.DecodeString(cs.DataBase64)
+ // invalid base64 string
+ if err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(err, "dataBase64"))
+ } else {
+ // validate if the decoded base64 string is valid JSON
+ if cs.ContentType == cloudevents.ApplicationJSON {
+ if err := validateJSON(string(decoded)); err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(err, "dataBase64"))
+ }
+ }
+ }
+ } else if cs.Data != "" {
+ if bsize := int64(len(cs.Data)); pingDefaults.DataMaxSize > -1 && bsize > pingDefaults.DataMaxSize {
+ fe := apis.ErrInvalidValue(fmt.Sprintf("the data length of %d bytes exceeds limit set at %d.", bsize, pingDefaults.DataMaxSize), "data")
+ errs = errs.Also(fe)
+ }
+ if cs.ContentType == cloudevents.ApplicationJSON {
+ // validate if data is valid JSON
+ if err := validateJSON(cs.Data); err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(err, "data"))
+ }
+ }
+ }
+ errs = errs.Also(cs.SourceSpec.Validate(ctx))
+ return errs
+}
+
+func validateJSON(str string) error {
+ var objmap map[string]interface{}
+ return json.Unmarshal([]byte(str), &objmap)
+}
+
+func validateDescriptor(spec string) *apis.FieldError {
+ if strings.Contains(spec, "@every") {
+ return apis.ErrInvalidValue(errors.New("unsupported descriptor @every"), "schedule")
+ }
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/register.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/register.go
new file mode 100644
index 000000000..d7be9a9cc
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/register.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "knative.dev/eventing/pkg/apis/sources"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: sources.GroupName, Version: "v1"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &ApiServerSource{},
+ &ApiServerSourceList{},
+ &SinkBinding{},
+ &SinkBindingList{},
+ &ContainerSource{},
+ &ContainerSourceList{},
+ &PingSource{},
+ &PingSourceList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_context.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_context.go
new file mode 100644
index 000000000..41c793c30
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_context.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/resolver"
+)
+
+// sinkURIKey is used as the key for associating information
+// with a context.Context.
+type sinkURIKey struct{}
+type resolverKey struct{}
+
+// WithSinkURI notes on the context for binding that the resolved SinkURI
+// is the provided apis.URL.
+func WithSinkURI(ctx context.Context, uri *apis.URL) context.Context {
+ return context.WithValue(ctx, sinkURIKey{}, uri)
+}
+
+func WithURIResolver(ctx context.Context, resolver *resolver.URIResolver) context.Context {
+ return context.WithValue(ctx, resolverKey{}, resolver)
+}
+
+// GetSinkURI accesses the apis.URL for the Sink URI that has been associated
+// with this context.
+func GetSinkURI(ctx context.Context) *apis.URL {
+ value := ctx.Value(sinkURIKey{})
+ if value == nil {
+ return nil
+ }
+ return value.(*apis.URL)
+}
+
+func GetURIResolver(ctx context.Context) *resolver.URIResolver {
+ value := ctx.Value(resolverKey{})
+ if value == nil {
+ return nil
+ }
+ return value.(*resolver.URIResolver)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_conversion.go
new file mode 100644
index 000000000..b5c911830
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_conversion.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "fmt"
+
+ "knative.dev/pkg/apis"
+)
+
+// ConvertTo implements apis.Convertible
+func (source *SinkBinding) ConvertTo(ctx context.Context, sink apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", sink)
+}
+
+// ConvertFrom implements apis.Convertible
+func (sink *SinkBinding) ConvertFrom(ctx context.Context, source apis.Convertible) error {
+ return fmt.Errorf("v1 is the highest known version, got: %T", source)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_defaults.go
new file mode 100644
index 000000000..8ef237307
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_defaults.go
@@ -0,0 +1,34 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+// SetDefaults implements apis.Defaultable
+func (fb *SinkBinding) SetDefaults(ctx context.Context) {
+ if fb.Spec.Subject.Namespace == "" {
+ // Default the subject's namespace to our namespace.
+ fb.Spec.Subject.Namespace = fb.Namespace
+ }
+
+ withNS := apis.WithinParent(ctx, fb.ObjectMeta)
+ fb.Spec.Sink.SetDefaults(withNS)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_lifecycle.go
new file mode 100644
index 000000000..081549a20
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_lifecycle.go
@@ -0,0 +1,325 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "strings"
+
+ "go.uber.org/zap"
+ corev1listers "k8s.io/client-go/listers/core/v1"
+
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/apis/duck"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/logging"
+ "knative.dev/pkg/tracker"
+
+ "knative.dev/eventing/pkg/eventingtls"
+)
+
+const (
+ oidcTokenVolumeName = "oidc-token"
+)
+
+var sbCondSet = apis.NewLivingConditionSet(
+ SinkBindingConditionSinkProvided,
+ SinkBindingConditionOIDCIdentityCreated,
+ SinkBindingConditionOIDCTokenSecretCreated,
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*SinkBinding) GetConditionSet() apis.ConditionSet {
+ return sbCondSet
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (*SinkBinding) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("SinkBinding")
+}
+
+// GetUntypedSpec implements apis.HasSpec
+func (s *SinkBinding) GetUntypedSpec() interface{} {
+ return s.Spec
+}
+
+// GetSubject implements psbinding.Bindable
+func (sb *SinkBinding) GetSubject() tracker.Reference {
+ return sb.Spec.Subject
+}
+
+// GetBindingStatus implements psbinding.Bindable
+func (sb *SinkBinding) GetBindingStatus() duck.BindableStatus {
+ return &sb.Status
+}
+
+// SetObservedGeneration implements psbinding.BindableStatus
+func (sbs *SinkBindingStatus) SetObservedGeneration(gen int64) {
+ sbs.ObservedGeneration = gen
+}
+
+// InitializeConditions populates the SinkBindingStatus's conditions field
+// with all of its conditions configured to Unknown.
+func (sbs *SinkBindingStatus) InitializeConditions() {
+ sbCondSet.Manage(sbs).InitializeConditions()
+}
+
+// MarkBindingUnavailable marks the SinkBinding's Ready condition to False with
+// the provided reason and message.
+func (sbs *SinkBindingStatus) MarkBindingUnavailable(reason, message string) {
+ sbCondSet.Manage(sbs).MarkFalse(SinkBindingConditionReady, reason, message)
+}
+
+// MarkBindingAvailable marks the SinkBinding's Ready condition to True.
+func (sbs *SinkBindingStatus) MarkBindingAvailable() {
+ sbCondSet.Manage(sbs).MarkTrue(SinkBindingConditionReady)
+}
+
+// MarkSink sets the condition that the source has a sink configured.
+func (sbs *SinkBindingStatus) MarkSink(addr *duckv1.Addressable) {
+ if addr != nil {
+ sbs.SinkURI = addr.URL
+ sbs.SinkCACerts = addr.CACerts
+ sbs.SinkAudience = addr.Audience
+ sbCondSet.Manage(sbs).MarkTrue(SinkBindingConditionSinkProvided)
+ } else {
+ sbCondSet.Manage(sbs).MarkFalse(SinkBindingConditionSinkProvided, "SinkEmpty", "Sink has resolved to empty.%s", "")
+ }
+}
+
+func (sbs *SinkBindingStatus) MarkOIDCIdentityCreatedSucceeded() {
+ sbCondSet.Manage(sbs).MarkTrue(SinkBindingConditionOIDCIdentityCreated)
+}
+
+func (sbs *SinkBindingStatus) MarkOIDCIdentityCreatedSucceededWithReason(reason, messageFormat string, messageA ...interface{}) {
+ sbCondSet.Manage(sbs).MarkTrueWithReason(SinkBindingConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (sbs *SinkBindingStatus) MarkOIDCIdentityCreatedFailed(reason, messageFormat string, messageA ...interface{}) {
+ sbCondSet.Manage(sbs).MarkFalse(SinkBindingConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (sbs *SinkBindingStatus) MarkOIDCIdentityCreatedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ sbCondSet.Manage(sbs).MarkUnknown(SinkBindingConditionOIDCIdentityCreated, reason, messageFormat, messageA...)
+}
+
+func (sbs *SinkBindingStatus) MarkOIDCTokenSecretCreatedSuccceeded() {
+ sbCondSet.Manage(sbs).MarkTrue(SinkBindingConditionOIDCTokenSecretCreated)
+}
+
+func (sbs *SinkBindingStatus) MarkOIDCTokenSecretCreatedSuccceededWithReason(reason, messageFormat string, messageA ...interface{}) {
+ sbCondSet.Manage(sbs).MarkTrueWithReason(SinkBindingConditionOIDCTokenSecretCreated, reason, messageFormat, messageA...)
+}
+
+func (sbs *SinkBindingStatus) MarkOIDCTokenSecretCreatedFailed(reason, messageFormat string, messageA ...interface{}) {
+ sbCondSet.Manage(sbs).MarkFalse(SinkBindingConditionOIDCTokenSecretCreated, reason, messageFormat, messageA...)
+}
+
+func (sbs *SinkBindingStatus) MarkOIDCTokenSecretCreatedUnknown(reason, messageFormat string, messageA ...interface{}) {
+ sbCondSet.Manage(sbs).MarkUnknown(SinkBindingConditionOIDCTokenSecretCreated, reason, messageFormat, messageA...)
+}
+
+// Do implements psbinding.Bindable
+func (sb *SinkBinding) Do(ctx context.Context, ps *duckv1.WithPod) {
+ // First undo so that we can just unconditionally append below.
+ sb.Undo(ctx, ps)
+
+ resolver := GetURIResolver(ctx)
+ if resolver == nil {
+ logging.FromContext(ctx).Errorf("No Resolver associated with context for sink: %+v", sb)
+ return
+ }
+ addr, err := resolver.AddressableFromDestinationV1(ctx, sb.Spec.Sink, sb)
+ if err != nil {
+ logging.FromContext(ctx).Errorw("URI could not be extracted from destination: ", zap.Error(err))
+ return
+ }
+ sb.Status.MarkSink(addr)
+
+ var ceOverrides string
+ if sb.Spec.CloudEventOverrides != nil {
+ if co, err := json.Marshal(sb.Spec.SourceSpec.CloudEventOverrides); err != nil {
+ logging.FromContext(ctx).Errorw(fmt.Sprintf("Failed to marshal CloudEventOverrides into JSON for %+v", sb), zap.Error(err))
+ } else if len(co) > 0 {
+ ceOverrides = string(co)
+ }
+ }
+
+ for i := range ps.Spec.Template.Spec.InitContainers {
+ ps.Spec.Template.Spec.InitContainers[i].Env = append(ps.Spec.Template.Spec.InitContainers[i].Env, corev1.EnvVar{
+ Name: "K_SINK",
+ Value: addr.URL.String(),
+ })
+ if addr.CACerts != nil {
+ ps.Spec.Template.Spec.InitContainers[i].Env = append(ps.Spec.Template.Spec.InitContainers[i].Env, corev1.EnvVar{
+ Name: "K_CA_CERTS",
+ Value: *addr.CACerts,
+ })
+ }
+ ps.Spec.Template.Spec.InitContainers[i].Env = append(ps.Spec.Template.Spec.InitContainers[i].Env, corev1.EnvVar{
+ Name: "K_CE_OVERRIDES",
+ Value: ceOverrides,
+ })
+ }
+ for i := range ps.Spec.Template.Spec.Containers {
+ ps.Spec.Template.Spec.Containers[i].Env = append(ps.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{
+ Name: "K_SINK",
+ Value: addr.URL.String(),
+ })
+ if addr.CACerts != nil {
+ ps.Spec.Template.Spec.Containers[i].Env = append(ps.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{
+ Name: "K_CA_CERTS",
+ Value: *addr.CACerts,
+ })
+ }
+ ps.Spec.Template.Spec.Containers[i].Env = append(ps.Spec.Template.Spec.Containers[i].Env, corev1.EnvVar{
+ Name: "K_CE_OVERRIDES",
+ Value: ceOverrides,
+ })
+ }
+
+ pss, err := eventingtls.AddTrustBundleVolumes(GetTrustBundleConfigMapLister(ctx), sb, &ps.Spec.Template.Spec)
+ if err != nil {
+ logging.FromContext(ctx).Errorw("Failed to add trust bundle volumes %s/%s: %+v", zap.Error(err))
+ return
+ }
+ ps.Spec.Template.Spec = *pss
+
+ if sb.Status.OIDCTokenSecretName != nil {
+ ps.Spec.Template.Spec.Volumes = append(ps.Spec.Template.Spec.Volumes, corev1.Volume{
+ Name: oidcTokenVolumeName,
+ VolumeSource: corev1.VolumeSource{
+ Projected: &corev1.ProjectedVolumeSource{
+ Sources: []corev1.VolumeProjection{
+ {
+ Secret: &corev1.SecretProjection{
+ LocalObjectReference: corev1.LocalObjectReference{
+ Name: *sb.Status.OIDCTokenSecretName,
+ },
+ },
+ },
+ },
+ },
+ },
+ })
+
+ for i := range ps.Spec.Template.Spec.Containers {
+ ps.Spec.Template.Spec.Containers[i].VolumeMounts = append(ps.Spec.Template.Spec.Containers[i].VolumeMounts, corev1.VolumeMount{
+ Name: oidcTokenVolumeName,
+ MountPath: "/oidc",
+ })
+ }
+ for i := range ps.Spec.Template.Spec.InitContainers {
+ ps.Spec.Template.Spec.InitContainers[i].VolumeMounts = append(ps.Spec.Template.Spec.InitContainers[i].VolumeMounts, corev1.VolumeMount{
+ Name: oidcTokenVolumeName,
+ MountPath: "/oidc",
+ })
+ }
+ }
+}
+
+func (sb *SinkBinding) Undo(ctx context.Context, ps *duckv1.WithPod) {
+ for i, c := range ps.Spec.Template.Spec.InitContainers {
+ if len(c.Env) > 0 {
+ env := make([]corev1.EnvVar, 0, len(ps.Spec.Template.Spec.InitContainers[i].Env))
+ for j, ev := range c.Env {
+ switch ev.Name {
+ case "K_SINK", "K_CE_OVERRIDES", "K_CA_CERTS":
+ continue
+ default:
+ env = append(env, ps.Spec.Template.Spec.InitContainers[i].Env[j])
+ }
+ }
+ ps.Spec.Template.Spec.InitContainers[i].Env = env
+ }
+
+ if len(ps.Spec.Template.Spec.InitContainers[i].VolumeMounts) > 0 {
+ volumeMounts := make([]corev1.VolumeMount, 0, len(ps.Spec.Template.Spec.InitContainers[i].VolumeMounts))
+ for j, vol := range c.VolumeMounts {
+ if vol.Name == oidcTokenVolumeName {
+ continue
+ }
+ if strings.HasPrefix(vol.Name, eventingtls.TrustBundleVolumeNamePrefix) {
+ continue
+ }
+ volumeMounts = append(volumeMounts, ps.Spec.Template.Spec.InitContainers[i].VolumeMounts[j])
+ }
+ ps.Spec.Template.Spec.InitContainers[i].VolumeMounts = volumeMounts
+ }
+ }
+ for i, c := range ps.Spec.Template.Spec.Containers {
+ if len(c.Env) > 0 {
+ env := make([]corev1.EnvVar, 0, len(ps.Spec.Template.Spec.Containers[i].Env))
+ for j, ev := range c.Env {
+ switch ev.Name {
+ case "K_SINK", "K_CE_OVERRIDES", "K_CA_CERTS":
+ continue
+ default:
+ env = append(env, ps.Spec.Template.Spec.Containers[i].Env[j])
+ }
+ }
+ ps.Spec.Template.Spec.Containers[i].Env = env
+ }
+
+ if len(ps.Spec.Template.Spec.Containers[i].VolumeMounts) > 0 {
+ volumeMounts := make([]corev1.VolumeMount, 0, len(ps.Spec.Template.Spec.Containers[i].VolumeMounts))
+ for j, vol := range c.VolumeMounts {
+ if vol.Name == oidcTokenVolumeName {
+ continue
+ }
+ if strings.HasPrefix(vol.Name, eventingtls.TrustBundleVolumeNamePrefix) {
+ continue
+ }
+ volumeMounts = append(volumeMounts, ps.Spec.Template.Spec.Containers[i].VolumeMounts[j])
+ }
+ ps.Spec.Template.Spec.Containers[i].VolumeMounts = volumeMounts
+ }
+ }
+
+ if len(ps.Spec.Template.Spec.Volumes) > 0 {
+ volumes := make([]corev1.Volume, 0, len(ps.Spec.Template.Spec.Volumes))
+ for i, vol := range ps.Spec.Template.Spec.Volumes {
+ if vol.Name == oidcTokenVolumeName {
+ continue
+ }
+ if strings.HasPrefix(vol.Name, eventingtls.TrustBundleVolumeNamePrefix) {
+ continue
+ }
+ volumes = append(volumes, ps.Spec.Template.Spec.Volumes[i])
+ }
+ ps.Spec.Template.Spec.Volumes = volumes
+ }
+}
+
+type configMapListerKey struct{}
+
+func WithTrustBundleConfigMapLister(ctx context.Context, lister corev1listers.ConfigMapLister) context.Context {
+ return context.WithValue(ctx, configMapListerKey{}, lister)
+}
+
+func GetTrustBundleConfigMapLister(ctx context.Context) corev1listers.ConfigMapLister {
+ value := ctx.Value(configMapListerKey{})
+ if value == nil {
+ panic("No ConfigMapLister found in context.")
+ }
+ return value.(corev1listers.ConfigMapLister)
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_types.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_types.go
new file mode 100644
index 000000000..e5e5d7a21
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_types.go
@@ -0,0 +1,118 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "knative.dev/pkg/apis"
+ "knative.dev/pkg/apis/duck"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// SinkBinding describes a Binding that is also a Source.
+// The `sink` (from the Source duck) is resolved to a URL and
+// then projected into the `subject` by augmenting the runtime
+// contract of the referenced containers to have a `K_SINK`
+// environment variable holding the endpoint to which to send
+// cloud events.
+type SinkBinding struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec SinkBindingSpec `json:"spec"`
+ Status SinkBindingStatus `json:"status"`
+}
+
+// Check the interfaces that SinkBinding should be implementing.
+var (
+ _ runtime.Object = (*SinkBinding)(nil)
+ _ kmeta.OwnerRefable = (*SinkBinding)(nil)
+ _ apis.Validatable = (*SinkBinding)(nil)
+ _ apis.Defaultable = (*SinkBinding)(nil)
+ _ apis.HasSpec = (*SinkBinding)(nil)
+ _ duckv1.KRShaped = (*SinkBinding)(nil)
+ _ duck.Bindable = (*SinkBinding)(nil)
+)
+
+// SinkBindingSpec holds the desired state of the SinkBinding (from the client).
+type SinkBindingSpec struct {
+ // inherits duck/v1 SourceSpec, which currently provides:
+ // * Sink - a reference to an object that will resolve to a domain name or
+ // a URI directly to use as the sink.
+ // * CloudEventOverrides - defines overrides to control the output format
+ // and modifications of the event sent to the sink.
+ duckv1.SourceSpec `json:",inline"`
+
+ // inherits duck/v1 BindingSpec, which currently provides:
+ // * Subject - Subject references the resource(s) whose "runtime contract"
+ // should be augmented by Binding implementations.
+ duckv1.BindingSpec `json:",inline"`
+}
+
+const (
+ // SinkBindingConditionReady is configured to indicate whether the Binding
+ // has been configured for resources subject to its runtime contract.
+ SinkBindingConditionReady = apis.ConditionReady
+
+ // SinkBindingConditionSinkProvided is configured to indicate whether the
+ // sink has been properly extracted from the resolver.
+ SinkBindingConditionSinkProvided apis.ConditionType = "SinkProvided"
+
+ // SinkBindingConditionOIDCIdentityCreated is configured to indicate whether
+ // the OIDC identity has been created for the sink.
+ SinkBindingConditionOIDCIdentityCreated apis.ConditionType = "OIDCIdentityCreated"
+
+ // SinkBindingConditionOIDCTokenSecretCreated is configured to indicate whether
+ // the secret containing the OIDC token has been created for the sink.
+ SinkBindingConditionOIDCTokenSecretCreated apis.ConditionType = "OIDCTokenSecretCreated"
+)
+
+// SinkBindingStatus communicates the observed state of the SinkBinding (from the controller).
+type SinkBindingStatus struct {
+ // inherits duck/v1 SourceStatus, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last
+ // processed by the controller.
+ // * Conditions - the latest available observations of a resource's current
+ // state.
+ // * SinkURI - the current active sink URI that has been configured for the
+ // Source.
+ duckv1.SourceStatus `json:",inline"`
+
+ // OIDCTokenSecretName is the name of the secret containing the token for
+ // this SinkBindings OIDC authentication
+ OIDCTokenSecretName *string `json:"oidcTokenSecretName,omitempty"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// SinkBindingList contains a list of SinkBinding
+type SinkBindingList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []SinkBinding `json:"items"`
+}
+
+// GetStatus retrieves the status of the SinkBinding. Implements the KRShaped interface.
+func (s *SinkBinding) GetStatus() *duckv1.Status {
+ return &s.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_validation.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_validation.go
new file mode 100644
index 000000000..76e4421b7
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/sinkbinding_validation.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+)
+
+// Validate implements apis.Validatable
+func (fb *SinkBinding) Validate(ctx context.Context) *apis.FieldError {
+ err := fb.Spec.Validate(ctx).ViaField("spec")
+ if fb.Spec.Subject.Namespace != "" && fb.Namespace != fb.Spec.Subject.Namespace {
+ err = err.Also(apis.ErrInvalidValue(fb.Spec.Subject.Namespace, "spec.subject.namespace"))
+ }
+ return err
+}
+
+// Validate implements apis.Validatable
+func (fbs *SinkBindingSpec) Validate(ctx context.Context) *apis.FieldError {
+ err := fbs.Subject.Validate(ctx).ViaField("subject").Also(
+ fbs.Sink.Validate(ctx).ViaField("sink"))
+ err = err.Also(fbs.SourceSpec.Validate(ctx))
+ return err
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1/zz_generated.deepcopy.go
new file mode 100644
index 000000000..8de185540
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1/zz_generated.deepcopy.go
@@ -0,0 +1,481 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIVersionKind) DeepCopyInto(out *APIVersionKind) {
+ *out = *in
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersionKind.
+func (in *APIVersionKind) DeepCopy() *APIVersionKind {
+ if in == nil {
+ return nil
+ }
+ out := new(APIVersionKind)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *APIVersionKindSelector) DeepCopyInto(out *APIVersionKindSelector) {
+ *out = *in
+ if in.LabelSelector != nil {
+ in, out := &in.LabelSelector, &out.LabelSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APIVersionKindSelector.
+func (in *APIVersionKindSelector) DeepCopy() *APIVersionKindSelector {
+ if in == nil {
+ return nil
+ }
+ out := new(APIVersionKindSelector)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerSource) DeepCopyInto(out *ApiServerSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerSource.
+func (in *ApiServerSource) DeepCopy() *ApiServerSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ApiServerSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerSourceList) DeepCopyInto(out *ApiServerSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ApiServerSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerSourceList.
+func (in *ApiServerSourceList) DeepCopy() *ApiServerSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ApiServerSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerSourceSpec) DeepCopyInto(out *ApiServerSourceSpec) {
+ *out = *in
+ in.SourceSpec.DeepCopyInto(&out.SourceSpec)
+ if in.Resources != nil {
+ in, out := &in.Resources, &out.Resources
+ *out = make([]APIVersionKindSelector, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ if in.ResourceOwner != nil {
+ in, out := &in.ResourceOwner, &out.ResourceOwner
+ *out = new(APIVersionKind)
+ **out = **in
+ }
+ if in.NamespaceSelector != nil {
+ in, out := &in.NamespaceSelector, &out.NamespaceSelector
+ *out = new(metav1.LabelSelector)
+ (*in).DeepCopyInto(*out)
+ }
+ if in.Filters != nil {
+ in, out := &in.Filters, &out.Filters
+ *out = make([]eventingv1.SubscriptionsAPIFilter, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerSourceSpec.
+func (in *ApiServerSourceSpec) DeepCopy() *ApiServerSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ApiServerSourceStatus) DeepCopyInto(out *ApiServerSourceStatus) {
+ *out = *in
+ in.SourceStatus.DeepCopyInto(&out.SourceStatus)
+ if in.Namespaces != nil {
+ in, out := &in.Namespaces, &out.Namespaces
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ApiServerSourceStatus.
+func (in *ApiServerSourceStatus) DeepCopy() *ApiServerSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ApiServerSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerSource) DeepCopyInto(out *ContainerSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSource.
+func (in *ContainerSource) DeepCopy() *ContainerSource {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ContainerSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerSourceList) DeepCopyInto(out *ContainerSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]ContainerSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSourceList.
+func (in *ContainerSourceList) DeepCopy() *ContainerSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *ContainerSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerSourceSpec) DeepCopyInto(out *ContainerSourceSpec) {
+ *out = *in
+ in.SourceSpec.DeepCopyInto(&out.SourceSpec)
+ in.Template.DeepCopyInto(&out.Template)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSourceSpec.
+func (in *ContainerSourceSpec) DeepCopy() *ContainerSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *ContainerSourceStatus) DeepCopyInto(out *ContainerSourceStatus) {
+ *out = *in
+ in.SourceStatus.DeepCopyInto(&out.SourceStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerSourceStatus.
+func (in *ContainerSourceStatus) DeepCopy() *ContainerSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(ContainerSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PingSource) DeepCopyInto(out *PingSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingSource.
+func (in *PingSource) DeepCopy() *PingSource {
+ if in == nil {
+ return nil
+ }
+ out := new(PingSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PingSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PingSourceList) DeepCopyInto(out *PingSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PingSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingSourceList.
+func (in *PingSourceList) DeepCopy() *PingSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(PingSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PingSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PingSourceSpec) DeepCopyInto(out *PingSourceSpec) {
+ *out = *in
+ in.SourceSpec.DeepCopyInto(&out.SourceSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingSourceSpec.
+func (in *PingSourceSpec) DeepCopy() *PingSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PingSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PingSourceStatus) DeepCopyInto(out *PingSourceStatus) {
+ *out = *in
+ in.SourceStatus.DeepCopyInto(&out.SourceStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingSourceStatus.
+func (in *PingSourceStatus) DeepCopy() *PingSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PingSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SinkBinding) DeepCopyInto(out *SinkBinding) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkBinding.
+func (in *SinkBinding) DeepCopy() *SinkBinding {
+ if in == nil {
+ return nil
+ }
+ out := new(SinkBinding)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SinkBinding) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SinkBindingList) DeepCopyInto(out *SinkBindingList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]SinkBinding, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkBindingList.
+func (in *SinkBindingList) DeepCopy() *SinkBindingList {
+ if in == nil {
+ return nil
+ }
+ out := new(SinkBindingList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *SinkBindingList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SinkBindingSpec) DeepCopyInto(out *SinkBindingSpec) {
+ *out = *in
+ in.SourceSpec.DeepCopyInto(&out.SourceSpec)
+ in.BindingSpec.DeepCopyInto(&out.BindingSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkBindingSpec.
+func (in *SinkBindingSpec) DeepCopy() *SinkBindingSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(SinkBindingSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *SinkBindingStatus) DeepCopyInto(out *SinkBindingStatus) {
+ *out = *in
+ in.SourceStatus.DeepCopyInto(&out.SourceStatus)
+ if in.OIDCTokenSecretName != nil {
+ in, out := &in.OIDCTokenSecretName, &out.OIDCTokenSecretName
+ *out = new(string)
+ **out = **in
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new SinkBindingStatus.
+func (in *SinkBindingStatus) DeepCopy() *SinkBindingStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(SinkBindingStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/doc.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/doc.go
new file mode 100644
index 000000000..c9bd848eb
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package v1beta2 contains API Schema definitions for the sources v1beta2 API group.
+// +k8s:deepcopy-gen=package
+// +groupName=sources.knative.dev
+package v1beta2
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_conversion.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_conversion.go
new file mode 100644
index 000000000..4af7e806f
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_conversion.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "context"
+
+ "knative.dev/pkg/apis"
+
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+)
+
+// ConvertTo implements apis.Convertible
+// Converts source from v1beta2.PingSource into a higher version.
+func (source *PingSource) ConvertTo(ctx context.Context, obj apis.Convertible) error {
+ switch sink := obj.(type) {
+ case *v1.PingSource:
+ sink.ObjectMeta = source.ObjectMeta
+ sink.Status = v1.PingSourceStatus{
+ SourceStatus: source.Status.SourceStatus,
+ }
+ sink.Spec = v1.PingSourceSpec{
+ SourceSpec: source.Spec.SourceSpec,
+ Schedule: source.Spec.Schedule,
+ Timezone: source.Spec.Timezone,
+ ContentType: source.Spec.ContentType,
+ Data: source.Spec.Data,
+ DataBase64: source.Spec.DataBase64,
+ }
+
+ return nil
+ default:
+ return apis.ConvertToViaProxy(ctx, source, &v1.PingSource{}, sink)
+ }
+}
+
+// ConvertFrom implements apis.Convertible
+// Converts source from a higher version into v1beta2.PingSource
+func (sink *PingSource) ConvertFrom(ctx context.Context, obj apis.Convertible) error {
+ switch source := obj.(type) {
+ case *v1.PingSource:
+ sink.ObjectMeta = source.ObjectMeta
+ sink.Status = PingSourceStatus{
+ SourceStatus: source.Status.SourceStatus,
+ }
+
+ sink.Spec = PingSourceSpec{
+ SourceSpec: source.Spec.SourceSpec,
+ Schedule: source.Spec.Schedule,
+ Timezone: source.Spec.Timezone,
+ ContentType: source.Spec.ContentType,
+ Data: source.Spec.Data,
+ DataBase64: source.Spec.DataBase64,
+ }
+
+ return nil
+ default:
+ return apis.ConvertFromViaProxy(ctx, source, &v1.PingSource{}, sink)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_defaults.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_defaults.go
new file mode 100644
index 000000000..ab0cb8eae
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_defaults.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "context"
+)
+
+const (
+ defaultSchedule = "* * * * *"
+)
+
+func (s *PingSource) SetDefaults(ctx context.Context) {
+ s.Spec.SetDefaults(ctx)
+}
+
+func (ss *PingSourceSpec) SetDefaults(ctx context.Context) {
+ if ss.Schedule == "" {
+ ss.Schedule = defaultSchedule
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_lifecycle.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_lifecycle.go
new file mode 100644
index 000000000..4ca010aa7
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_lifecycle.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "fmt"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+ "knative.dev/pkg/apis"
+)
+
+const (
+ // PingSourceConditionReady has status True when the PingSource is ready to send events.
+ PingSourceConditionReady = apis.ConditionReady
+
+ // PingSourceConditionSinkProvided has status True when the PingSource has been configured with a sink target.
+ PingSourceConditionSinkProvided apis.ConditionType = "SinkProvided"
+
+ // PingSourceConditionDeployed has status True when the PingSource has had it's receive adapter deployment created.
+ PingSourceConditionDeployed apis.ConditionType = "Deployed"
+)
+
+var PingSourceCondSet = apis.NewLivingConditionSet(
+ PingSourceConditionSinkProvided,
+ PingSourceConditionDeployed)
+
+const (
+ // PingSourceEventType is the default PingSource CloudEvent type.
+ PingSourceEventType = "dev.knative.sources.ping"
+)
+
+// GetConditionSet retrieves the condition set for this resource. Implements the KRShaped interface.
+func (*PingSource) GetConditionSet() apis.ConditionSet {
+ return PingSourceCondSet
+}
+
+// PingSourceSource returns the PingSource CloudEvent source.
+func PingSourceSource(namespace, name string) string {
+ return fmt.Sprintf("/apis/v1/namespaces/%s/pingsources/%s", namespace, name)
+}
+
+// GetUntypedSpec returns the spec of the PingSource.
+func (s *PingSource) GetUntypedSpec() interface{} {
+ return s.Spec
+}
+
+// GetGroupVersionKind returns the GroupVersionKind.
+func (s *PingSource) GetGroupVersionKind() schema.GroupVersionKind {
+ return SchemeGroupVersion.WithKind("PingSource")
+}
+
+// GetCondition returns the condition currently associated with the given type, or nil.
+func (s *PingSourceStatus) GetCondition(t apis.ConditionType) *apis.Condition {
+ return PingSourceCondSet.Manage(s).GetCondition(t)
+}
+
+// GetTopLevelCondition returns the top level Condition.
+func (ps *PingSourceStatus) GetTopLevelCondition() *apis.Condition {
+ return PingSourceCondSet.Manage(ps).GetTopLevelCondition()
+}
+
+// IsReady returns true if the resource is ready overall.
+func (s *PingSourceStatus) IsReady() bool {
+ return PingSourceCondSet.Manage(s).IsHappy()
+}
+
+// InitializeConditions sets relevant unset conditions to Unknown state.
+func (s *PingSourceStatus) InitializeConditions() {
+ PingSourceCondSet.Manage(s).InitializeConditions()
+}
+
+// MarkSink sets the condition that the source has a sink configured.
+func (s *PingSourceStatus) MarkSink(uri *apis.URL) {
+ s.SinkURI = uri
+ if uri != nil {
+ PingSourceCondSet.Manage(s).MarkTrue(PingSourceConditionSinkProvided)
+ } else {
+ PingSourceCondSet.Manage(s).MarkFalse(PingSourceConditionSinkProvided, "SinkEmpty", "Sink has resolved to empty.")
+ }
+}
+
+// MarkNoSink sets the condition that the source does not have a sink configured.
+func (s *PingSourceStatus) MarkNoSink(reason, messageFormat string, messageA ...interface{}) {
+ PingSourceCondSet.Manage(s).MarkFalse(PingSourceConditionSinkProvided, reason, messageFormat, messageA...)
+}
+
+// PropagateDeploymentAvailability uses the availability of the provided Deployment to determine if
+// PingSourceConditionDeployed should be marked as true or false.
+func (s *PingSourceStatus) PropagateDeploymentAvailability(d *appsv1.Deployment) {
+ deploymentAvailableFound := false
+ for _, cond := range d.Status.Conditions {
+ if cond.Type == appsv1.DeploymentAvailable {
+ deploymentAvailableFound = true
+ if cond.Status == corev1.ConditionTrue {
+ PingSourceCondSet.Manage(s).MarkTrue(PingSourceConditionDeployed)
+ } else if cond.Status == corev1.ConditionFalse {
+ PingSourceCondSet.Manage(s).MarkFalse(PingSourceConditionDeployed, cond.Reason, cond.Message)
+ } else if cond.Status == corev1.ConditionUnknown {
+ PingSourceCondSet.Manage(s).MarkUnknown(PingSourceConditionDeployed, cond.Reason, cond.Message)
+ }
+ }
+ }
+ if !deploymentAvailableFound {
+ PingSourceCondSet.Manage(s).MarkUnknown(PingSourceConditionDeployed, "DeploymentUnavailable", "The Deployment '%s' is unavailable.", d.Name)
+ }
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_types.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_types.go
new file mode 100644
index 000000000..3b23f9622
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_types.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "knative.dev/pkg/apis"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ "knative.dev/pkg/kmeta"
+)
+
+// +genclient
+// +genreconciler
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+// +k8s:defaulter-gen=true
+
+// PingSource is the Schema for the PingSources API.
+type PingSource struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec PingSourceSpec `json:"spec,omitempty"`
+ Status PingSourceStatus `json:"status,omitempty"`
+}
+
+// Check the interfaces that PingSource should be implementing.
+var (
+ _ runtime.Object = (*PingSource)(nil)
+ _ kmeta.OwnerRefable = (*PingSource)(nil)
+ _ apis.Validatable = (*PingSource)(nil)
+ _ apis.Defaultable = (*PingSource)(nil)
+ _ apis.HasSpec = (*PingSource)(nil)
+ _ duckv1.KRShaped = (*PingSource)(nil)
+)
+
+// PingSourceSpec defines the desired state of the PingSource.
+type PingSourceSpec struct {
+ // inherits duck/v1 SourceSpec, which currently provides:
+ // * Sink - a reference to an object that will resolve to a domain name or
+ // a URI directly to use as the sink.
+ // * CloudEventOverrides - defines overrides to control the output format
+ // and modifications of the event sent to the sink.
+ duckv1.SourceSpec `json:",inline"`
+
+ // Schedule is the cron schedule. Defaults to `* * * * *`.
+ // +optional
+ Schedule string `json:"schedule,omitempty"`
+
+ // Timezone modifies the actual time relative to the specified timezone.
+ // Defaults to the system time zone.
+ // More general information about time zones: https://www.iana.org/time-zones
+ // List of valid timezone values: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
+ Timezone string `json:"timezone,omitempty"`
+
+ // ContentType is the media type of Data or DataBase64. Default is empty.
+ // +optional
+ ContentType string `json:"contentType,omitempty"`
+
+ // Data is data used as the body of the event posted to the sink. Default is empty.
+ // Mutually exclusive with DataBase64.
+ // +optional
+ Data string `json:"data,omitempty"`
+
+ // DataBase64 is the base64-encoded string of the actual event's body posted to the sink. Default is empty.
+ // Mutually exclusive with Data.
+ // +optional
+ DataBase64 string `json:"dataBase64,omitempty"`
+}
+
+// PingSourceStatus defines the observed state of PingSource.
+type PingSourceStatus struct {
+ // inherits duck/v1 SourceStatus, which currently provides:
+ // * ObservedGeneration - the 'Generation' of the Service that was last
+ // processed by the controller.
+ // * Conditions - the latest available observations of a resource's current
+ // state.
+ // * SinkURI - the current active sink URI that has been configured for the
+ // Source.
+ duckv1.SourceStatus `json:",inline"`
+}
+
+// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
+
+// PingSourceList contains a list of PingSources.
+type PingSourceList struct {
+ metav1.TypeMeta `json:",inline"`
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []PingSource `json:"items"`
+}
+
+// GetStatus retrieves the status of the PingSource. Implements the KRShaped interface.
+func (p *PingSource) GetStatus() *duckv1.Status {
+ return &p.Status.Status
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_validation.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_validation.go
new file mode 100644
index 000000000..a2cb865f2
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/ping_validation.go
@@ -0,0 +1,115 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "context"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+
+ cloudevents "github.com/cloudevents/sdk-go/v2"
+
+ "github.com/robfig/cron/v3"
+ "knative.dev/pkg/apis"
+
+ "knative.dev/eventing/pkg/apis/sources/config"
+)
+
+func (c *PingSource) Validate(ctx context.Context) *apis.FieldError {
+ return c.Spec.Validate(ctx).ViaField("spec")
+}
+
+func (cs *PingSourceSpec) Validate(ctx context.Context) *apis.FieldError {
+ var errs *apis.FieldError
+ schedule := cs.Schedule
+
+ errs = validateDescriptor(schedule)
+
+ if cs.Timezone != "" {
+ schedule = "CRON_TZ=" + cs.Timezone + " " + schedule
+ }
+
+ parser := cron.NewParser(
+ cron.SecondOptional | cron.Minute | cron.Hour | cron.Dom | cron.Month | cron.Dow | cron.Descriptor,
+ )
+
+ if _, err := parser.Parse(schedule); err != nil {
+ if strings.HasPrefix(err.Error(), "provided bad location") {
+ fe := apis.ErrInvalidValue(err, "timezone")
+ errs = errs.Also(fe)
+ } else {
+ fe := apis.ErrInvalidValue(err, "schedule")
+ errs = errs.Also(fe)
+ }
+ }
+
+ pingConfig := config.FromContextOrDefaults(ctx)
+ pingDefaults := pingConfig.PingDefaults.GetPingConfig()
+
+ if fe := cs.Sink.Validate(ctx); fe != nil {
+ errs = errs.Also(fe.ViaField("sink"))
+ }
+
+ if cs.Data != "" && cs.DataBase64 != "" {
+ errs = errs.Also(apis.ErrMultipleOneOf("data", "dataBase64"))
+ } else if cs.DataBase64 != "" {
+ if bsize := int64(len(cs.DataBase64)); pingDefaults.DataMaxSize > -1 && bsize > pingDefaults.DataMaxSize {
+ fe := apis.ErrInvalidValue(fmt.Sprintf("the dataBase64 length of %d bytes exceeds limit set at %d.", bsize, pingDefaults.DataMaxSize), "dataBase64")
+ errs = errs.Also(fe)
+ }
+ decoded, err := base64.StdEncoding.DecodeString(cs.DataBase64)
+ // invalid base64 string
+ if err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(err, "dataBase64"))
+ } else {
+ // validate if the decoded base64 string is valid JSON
+ if cs.ContentType == cloudevents.ApplicationJSON {
+ if err := validateJSON(string(decoded)); err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(err, "dataBase64"))
+ }
+ }
+ }
+ } else if cs.Data != "" {
+ if bsize := int64(len(cs.Data)); pingDefaults.DataMaxSize > -1 && bsize > pingDefaults.DataMaxSize {
+ fe := apis.ErrInvalidValue(fmt.Sprintf("the data length of %d bytes exceeds limit set at %d.", bsize, pingDefaults.DataMaxSize), "data")
+ errs = errs.Also(fe)
+ }
+ if cs.ContentType == cloudevents.ApplicationJSON {
+ // validate if data is valid JSON
+ if err := validateJSON(cs.Data); err != nil {
+ errs = errs.Also(apis.ErrInvalidValue(err, "data"))
+ }
+ }
+ }
+ errs = errs.Also(cs.SourceSpec.Validate(ctx))
+ return errs
+}
+
+func validateJSON(str string) error {
+ var objmap map[string]interface{}
+ return json.Unmarshal([]byte(str), &objmap)
+}
+
+func validateDescriptor(spec string) *apis.FieldError {
+ if strings.Contains(spec, "@every") {
+ return apis.ErrInvalidValue(errors.New("unsupported descriptor @every"), "schedule")
+ }
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/register.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/register.go
new file mode 100644
index 000000000..76e97b308
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/register.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package v1beta2
+
+import (
+ "knative.dev/eventing/pkg/apis/sources"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/schema"
+)
+
+// SchemeGroupVersion is group version used to register these objects
+var SchemeGroupVersion = schema.GroupVersion{Group: sources.GroupName, Version: "v1beta2"}
+
+// Kind takes an unqualified kind and returns back a Group qualified GroupKind
+func Kind(kind string) schema.GroupKind {
+ return SchemeGroupVersion.WithKind(kind).GroupKind()
+}
+
+// Resource takes an unqualified resource and returns a Group qualified GroupResource
+func Resource(resource string) schema.GroupResource {
+ return SchemeGroupVersion.WithResource(resource).GroupResource()
+}
+
+var (
+ SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
+ AddToScheme = SchemeBuilder.AddToScheme
+)
+
+// Adds the list of known types to Scheme.
+func addKnownTypes(scheme *runtime.Scheme) error {
+ scheme.AddKnownTypes(SchemeGroupVersion,
+ &PingSource{},
+ &PingSourceList{},
+ )
+ metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/zz_generated.deepcopy.go b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/zz_generated.deepcopy.go
new file mode 100644
index 000000000..e0381d7a4
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/apis/sources/v1beta2/zz_generated.deepcopy.go
@@ -0,0 +1,121 @@
+//go:build !ignore_autogenerated
+// +build !ignore_autogenerated
+
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by deepcopy-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ runtime "k8s.io/apimachinery/pkg/runtime"
+)
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PingSource) DeepCopyInto(out *PingSource) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ in.Spec.DeepCopyInto(&out.Spec)
+ in.Status.DeepCopyInto(&out.Status)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingSource.
+func (in *PingSource) DeepCopy() *PingSource {
+ if in == nil {
+ return nil
+ }
+ out := new(PingSource)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PingSource) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PingSourceList) DeepCopyInto(out *PingSourceList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]PingSource, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingSourceList.
+func (in *PingSourceList) DeepCopy() *PingSourceList {
+ if in == nil {
+ return nil
+ }
+ out := new(PingSourceList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *PingSourceList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PingSourceSpec) DeepCopyInto(out *PingSourceSpec) {
+ *out = *in
+ in.SourceSpec.DeepCopyInto(&out.SourceSpec)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingSourceSpec.
+func (in *PingSourceSpec) DeepCopy() *PingSourceSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(PingSourceSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *PingSourceStatus) DeepCopyInto(out *PingSourceStatus) {
+ *out = *in
+ in.SourceStatus.DeepCopyInto(&out.SourceStatus)
+ return
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PingSourceStatus.
+func (in *PingSourceStatus) DeepCopy() *PingSourceStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(PingSourceStatus)
+ in.DeepCopyInto(out)
+ return out
+}
diff --git a/vendor/knative.dev/eventing/pkg/auth/event_policy.go b/vendor/knative.dev/eventing/pkg/auth/event_policy.go
index 56ac38021..7d4fcb1db 100644
--- a/vendor/knative.dev/eventing/pkg/auth/event_policy.go
+++ b/vendor/knative.dev/eventing/pkg/auth/event_policy.go
@@ -35,6 +35,10 @@ import (
"knative.dev/pkg/resolver"
)
+const (
+ kubernetesServiceAccountPrefix = "system:serviceaccount"
+)
+
// GetEventPoliciesForResource returns the applying EventPolicies for a given resource
func GetEventPoliciesForResource(lister listerseventingv1alpha1.EventPolicyLister, resourceGVK schema.GroupVersionKind, resourceObjectMeta metav1.ObjectMeta) ([]*v1alpha1.EventPolicy, error) {
policies, err := lister.EventPolicies(resourceObjectMeta.GetNamespace()).List(labels.Everything())
@@ -194,7 +198,7 @@ func resolveSubjectsFromReference(resolver *resolver.AuthenticatableResolver, re
objFullSANames := make([]string, 0, len(objSAs))
for _, sa := range objSAs {
- objFullSANames = append(objFullSANames, fmt.Sprintf("system:serviceaccount:%s:%s", reference.Namespace, sa))
+ objFullSANames = append(objFullSANames, fmt.Sprintf("%s:%s:%s", kubernetesServiceAccountPrefix, reference.Namespace, sa))
}
return objFullSANames, nil
diff --git a/vendor/knative.dev/eventing/pkg/auth/serviceaccount.go b/vendor/knative.dev/eventing/pkg/auth/serviceaccount.go
index b67666ef6..5b98d61c7 100644
--- a/vendor/knative.dev/eventing/pkg/auth/serviceaccount.go
+++ b/vendor/knative.dev/eventing/pkg/auth/serviceaccount.go
@@ -21,11 +21,13 @@ import (
"fmt"
"strings"
- "knative.dev/eventing/pkg/apis/feature"
+ "k8s.io/apimachinery/pkg/api/equality"
duckv1 "knative.dev/pkg/apis/duck/v1"
"knative.dev/pkg/kmeta"
pkgreconciler "knative.dev/pkg/reconciler"
+ "knative.dev/eventing/pkg/apis/feature"
+
"go.uber.org/zap"
v1 "k8s.io/api/core/v1"
apierrs "k8s.io/apimachinery/pkg/api/errors"
@@ -38,10 +40,10 @@ import (
)
const (
- //OIDCLabelKey is used to filter out all the informers that related to OIDC work
- OIDCLabelKey = "oidc"
+ // OIDCLabelKey is used to filter out all the informers that related to OIDC work
+ OIDCLabelKey = "eventing.knative.dev/oidc"
- // OIDCTokenRoleLabelSelector is the label selector for the OIDC token creator role and rolebinding informers
+ // OIDCLabelSelector is the label selector for the OIDC resources
OIDCLabelSelector = OIDCLabelKey
)
@@ -87,28 +89,38 @@ func EnsureOIDCServiceAccountExistsForResource(ctx context.Context, serviceAccou
saName := GetOIDCServiceAccountNameForResource(gvk, objectMeta)
sa, err := serviceAccountLister.ServiceAccounts(objectMeta.Namespace).Get(saName)
+ expected := GetOIDCServiceAccountForResource(gvk, objectMeta)
+
// If the resource doesn't exist, we'll create it.
if apierrs.IsNotFound(err) {
logging.FromContext(ctx).Debugw("Creating OIDC service account", zap.Error(err))
- expected := GetOIDCServiceAccountForResource(gvk, objectMeta)
-
_, err = kubeclient.CoreV1().ServiceAccounts(objectMeta.Namespace).Create(ctx, expected, metav1.CreateOptions{})
if err != nil {
- return fmt.Errorf("could not create OIDC service account %s/%s for %s: %w", objectMeta.Name, objectMeta.Namespace, gvk.Kind, err)
+ return fmt.Errorf("could not create OIDC service account %s/%s for %s: %w", objectMeta.Namespace, objectMeta.Name, gvk.Kind, err)
}
return nil
}
-
if err != nil {
- return fmt.Errorf("could not get OIDC service account %s/%s for %s: %w", objectMeta.Name, objectMeta.Namespace, gvk.Kind, err)
+ return fmt.Errorf("could not get OIDC service account %s/%s for %s: %w", objectMeta.Namespace, objectMeta.Name, gvk.Kind, err)
}
-
if !metav1.IsControlledBy(&sa.ObjectMeta, &objectMeta) {
return fmt.Errorf("service account %s not owned by %s %s", sa.Name, gvk.Kind, objectMeta.Name)
}
+ if !equality.Semantic.DeepDerivative(expected, sa) {
+ expected.ResourceVersion = sa.ResourceVersion
+
+ _, err = kubeclient.CoreV1().ServiceAccounts(objectMeta.Namespace).Update(ctx, expected, metav1.UpdateOptions{})
+ if err != nil {
+ return fmt.Errorf("could not update OIDC service account %s/%s for %s: %w", objectMeta.Namespace, objectMeta.Name, gvk.Kind, err)
+ }
+
+ return nil
+
+ }
+
return nil
}
diff --git a/vendor/knative.dev/eventing/pkg/auth/token_verifier.go b/vendor/knative.dev/eventing/pkg/auth/token_verifier.go
index 5571b67f2..0d87cf11f 100644
--- a/vendor/knative.dev/eventing/pkg/auth/token_verifier.go
+++ b/vendor/knative.dev/eventing/pkg/auth/token_verifier.go
@@ -22,8 +22,13 @@ import (
"fmt"
"io"
"net/http"
+ "strings"
"time"
+ duckv1 "knative.dev/eventing/pkg/apis/duck/v1"
+ eventpolicyinformer "knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy"
+ "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
+
"github.com/coreos/go-oidc/v3/oidc"
"go.uber.org/zap"
"k8s.io/client-go/rest"
@@ -37,9 +42,10 @@ const (
)
type OIDCTokenVerifier struct {
- logger *zap.SugaredLogger
- restConfig *rest.Config
- provider *oidc.Provider
+ logger *zap.SugaredLogger
+ restConfig *rest.Config
+ provider *oidc.Provider
+ eventPolicyLister v1alpha1.EventPolicyLister
}
type IDToken struct {
@@ -53,8 +59,9 @@ type IDToken struct {
func NewOIDCTokenVerifier(ctx context.Context) *OIDCTokenVerifier {
tokenHandler := &OIDCTokenVerifier{
- logger: logging.FromContext(ctx).With("component", "oidc-token-handler"),
- restConfig: injection.GetConfig(ctx),
+ logger: logging.FromContext(ctx).With("component", "oidc-token-handler"),
+ restConfig: injection.GetConfig(ctx),
+ eventPolicyLister: eventpolicyinformer.Get(ctx).Lister(),
}
if err := tokenHandler.initOIDCProvider(ctx); err != nil {
@@ -64,13 +71,103 @@ func NewOIDCTokenVerifier(ctx context.Context) *OIDCTokenVerifier {
return tokenHandler
}
-// VerifyJWT verifies the given JWT for the expected audience and returns the parsed ID token.
-func (c *OIDCTokenVerifier) VerifyJWT(ctx context.Context, jwt, audience string) (*IDToken, error) {
- if c.provider == nil {
+// VerifyJWTFromRequest verifies if the incoming request contains a correct JWT token
+//
+// Deprecated: use OIDCTokenVerifier.Verify() instead to bundle AuthN and AuthZ verification
+func (v *OIDCTokenVerifier) VerifyJWTFromRequest(ctx context.Context, r *http.Request, audience *string, response http.ResponseWriter) error {
+ _, err := v.verifyAuthN(ctx, audience, r, response)
+
+ return err
+}
+
+// VerifyRequest verifies AuthN and AuthZ in the request. On verification errors, it sets the
+// responses HTTP status and returns an error
+func (v *OIDCTokenVerifier) VerifyRequest(ctx context.Context, features feature.Flags, requiredOIDCAudience *string, resourceNamespace string, policyRefs []duckv1.AppliedEventPolicyRef, req *http.Request, resp http.ResponseWriter) error {
+ if !features.IsOIDCAuthentication() {
+ return nil
+ }
+
+ idToken, err := v.verifyAuthN(ctx, requiredOIDCAudience, req, resp)
+ if err != nil {
+ return fmt.Errorf("authentication of request could not be verified: %w", err)
+ }
+
+ err = v.verifyAuthZ(features, idToken, resourceNamespace, policyRefs, resp)
+ if err != nil {
+ return fmt.Errorf("authorization of request could not be verified: %w", err)
+ }
+
+ return nil
+}
+
+// verifyAuthN verifies if the incoming request contains a correct JWT token
+func (v *OIDCTokenVerifier) verifyAuthN(ctx context.Context, audience *string, req *http.Request, resp http.ResponseWriter) (*IDToken, error) {
+ token := GetJWTFromHeader(req.Header)
+ if token == "" {
+ resp.WriteHeader(http.StatusUnauthorized)
+ return nil, fmt.Errorf("no JWT token found in request")
+ }
+
+ if audience == nil {
+ resp.WriteHeader(http.StatusInternalServerError)
+ return nil, fmt.Errorf("no audience is provided")
+ }
+
+ idToken, err := v.verifyJWT(ctx, token, *audience)
+ if err != nil {
+ resp.WriteHeader(http.StatusUnauthorized)
+ return nil, fmt.Errorf("failed to verify JWT: %w", err)
+ }
+
+ return idToken, nil
+}
+
+// verifyAuthZ verifies if the given idToken is allowed by the resources eventPolicyStatus
+func (v *OIDCTokenVerifier) verifyAuthZ(features feature.Flags, idToken *IDToken, resourceNamespace string, policyRefs []duckv1.AppliedEventPolicyRef, resp http.ResponseWriter) error {
+ if len(policyRefs) > 0 {
+ subjectsFromApplyingPolicies := []string{}
+ for _, p := range policyRefs {
+ policy, err := v.eventPolicyLister.EventPolicies(resourceNamespace).Get(p.Name)
+ if err != nil {
+ resp.WriteHeader(http.StatusInternalServerError)
+ return fmt.Errorf("failed to get eventPolicy: %w", err)
+ }
+
+ subjectsFromApplyingPolicies = append(subjectsFromApplyingPolicies, policy.Status.From...)
+ }
+
+ if !SubjectContained(idToken.Subject, subjectsFromApplyingPolicies) {
+ resp.WriteHeader(http.StatusForbidden)
+ return fmt.Errorf("token is from subject %q, but only %q are part of applying event policies", idToken.Subject, subjectsFromApplyingPolicies)
+ }
+
+ return nil
+ } else {
+ if features.IsAuthorizationDefaultModeDenyAll() {
+ resp.WriteHeader(http.StatusForbidden)
+ return fmt.Errorf("no event policies apply for resource and %s is set to %s", feature.AuthorizationDefaultMode, feature.AuthorizationDenyAll)
+
+ } else if features.IsAuthorizationDefaultModeSameNamespace() {
+ if !strings.HasPrefix(idToken.Subject, fmt.Sprintf("%s:%s:", kubernetesServiceAccountPrefix, resourceNamespace)) {
+ resp.WriteHeader(http.StatusForbidden)
+ return fmt.Errorf("no policies apply for resource. %s is set to %s, but token is from subject %q, which is not part of %q namespace", feature.AuthorizationDefaultMode, feature.AuthorizationDenyAll, idToken.Subject, resourceNamespace)
+ }
+
+ return nil
+ }
+ // else: allow all
+ }
+
+ return nil
+}
+
+// verifyJWT verifies the given JWT for the expected audience and returns the parsed ID token.
+func (v *OIDCTokenVerifier) verifyJWT(ctx context.Context, jwt, audience string) (*IDToken, error) {
+ if v.provider == nil {
return nil, fmt.Errorf("provider is nil. Is the OIDC provider config correct?")
}
- verifier := c.provider.Verifier(&oidc.Config{
+ verifier := v.provider.Verifier(&oidc.Config{
ClientID: audience,
})
@@ -89,8 +186,8 @@ func (c *OIDCTokenVerifier) VerifyJWT(ctx context.Context, jwt, audience string)
}, nil
}
-func (c *OIDCTokenVerifier) initOIDCProvider(ctx context.Context) error {
- discovery, err := c.getKubernetesOIDCDiscovery()
+func (v *OIDCTokenVerifier) initOIDCProvider(ctx context.Context) error {
+ discovery, err := v.getKubernetesOIDCDiscovery()
if err != nil {
return fmt.Errorf("could not load Kubernetes OIDC discovery information: %w", err)
}
@@ -100,25 +197,25 @@ func (c *OIDCTokenVerifier) initOIDCProvider(ctx context.Context) error {
ctx = oidc.InsecureIssuerURLContext(ctx, discovery.Issuer)
}
- httpClient, err := c.getHTTPClientForKubeAPIServer()
+ httpClient, err := v.getHTTPClientForKubeAPIServer()
if err != nil {
return fmt.Errorf("could not get HTTP client with TLS certs of API server: %w", err)
}
ctx = oidc.ClientContext(ctx, httpClient)
// get OIDC provider
- c.provider, err = oidc.NewProvider(ctx, kubernetesOIDCDiscoveryBaseURL)
+ v.provider, err = oidc.NewProvider(ctx, kubernetesOIDCDiscoveryBaseURL)
if err != nil {
return fmt.Errorf("could not get OIDC provider: %w", err)
}
- c.logger.Debug("updated OIDC provider config", zap.Any("discovery-config", discovery))
+ v.logger.Debug("updated OIDC provider config", zap.Any("discovery-config", discovery))
return nil
}
-func (c *OIDCTokenVerifier) getHTTPClientForKubeAPIServer() (*http.Client, error) {
- client, err := rest.HTTPClientFor(c.restConfig)
+func (v *OIDCTokenVerifier) getHTTPClientForKubeAPIServer() (*http.Client, error) {
+ client, err := rest.HTTPClientFor(v.restConfig)
if err != nil {
return nil, fmt.Errorf("could not create HTTP client from rest config: %w", err)
}
@@ -126,8 +223,8 @@ func (c *OIDCTokenVerifier) getHTTPClientForKubeAPIServer() (*http.Client, error
return client, nil
}
-func (c *OIDCTokenVerifier) getKubernetesOIDCDiscovery() (*openIDMetadata, error) {
- client, err := c.getHTTPClientForKubeAPIServer()
+func (v *OIDCTokenVerifier) getKubernetesOIDCDiscovery() (*openIDMetadata, error) {
+ client, err := v.getHTTPClientForKubeAPIServer()
if err != nil {
return nil, fmt.Errorf("could not get HTTP client for API server: %w", err)
}
@@ -151,27 +248,6 @@ func (c *OIDCTokenVerifier) getKubernetesOIDCDiscovery() (*openIDMetadata, error
return openIdConfig, nil
}
-// VerifyJWTFromRequest will verify the incoming request contains the correct JWT token
-func (tokenVerifier *OIDCTokenVerifier) VerifyJWTFromRequest(ctx context.Context, r *http.Request, audience *string, response http.ResponseWriter) error {
- token := GetJWTFromHeader(r.Header)
- if token == "" {
- response.WriteHeader(http.StatusUnauthorized)
- return fmt.Errorf("no JWT token found in request")
- }
-
- if audience == nil {
- response.WriteHeader(http.StatusInternalServerError)
- return fmt.Errorf("no audience is provided")
- }
-
- if _, err := tokenVerifier.VerifyJWT(ctx, token, *audience); err != nil {
- response.WriteHeader(http.StatusUnauthorized)
- return fmt.Errorf("failed to verify JWT: %w", err)
- }
-
- return nil
-}
-
type openIDMetadata struct {
Issuer string `json:"issuer"`
JWKSURI string `json:"jwks_uri"`
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go
new file mode 100644
index 000000000..0ca8d7350
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/clientset.go
@@ -0,0 +1,237 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package versioned
+
+import (
+ "fmt"
+ "net/http"
+
+ discovery "k8s.io/client-go/discovery"
+ rest "k8s.io/client-go/rest"
+ flowcontrol "k8s.io/client-go/util/flowcontrol"
+ eventingv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1"
+ eventingv1beta1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1"
+ eventingv1beta2 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2"
+ eventingv1beta3 "knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3"
+ flowsv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1"
+ messagingv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1"
+ sinksv1alpha1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1"
+ sourcesv1 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1"
+ sourcesv1beta2 "knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2"
+)
+
+type Interface interface {
+ Discovery() discovery.DiscoveryInterface
+ EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface
+ EventingV1beta1() eventingv1beta1.EventingV1beta1Interface
+ EventingV1beta2() eventingv1beta2.EventingV1beta2Interface
+ EventingV1beta3() eventingv1beta3.EventingV1beta3Interface
+ EventingV1() eventingv1.EventingV1Interface
+ FlowsV1() flowsv1.FlowsV1Interface
+ MessagingV1() messagingv1.MessagingV1Interface
+ SinksV1alpha1() sinksv1alpha1.SinksV1alpha1Interface
+ SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface
+ SourcesV1() sourcesv1.SourcesV1Interface
+}
+
+// Clientset contains the clients for groups.
+type Clientset struct {
+ *discovery.DiscoveryClient
+ eventingV1alpha1 *eventingv1alpha1.EventingV1alpha1Client
+ eventingV1beta1 *eventingv1beta1.EventingV1beta1Client
+ eventingV1beta2 *eventingv1beta2.EventingV1beta2Client
+ eventingV1beta3 *eventingv1beta3.EventingV1beta3Client
+ eventingV1 *eventingv1.EventingV1Client
+ flowsV1 *flowsv1.FlowsV1Client
+ messagingV1 *messagingv1.MessagingV1Client
+ sinksV1alpha1 *sinksv1alpha1.SinksV1alpha1Client
+ sourcesV1beta2 *sourcesv1beta2.SourcesV1beta2Client
+ sourcesV1 *sourcesv1.SourcesV1Client
+}
+
+// EventingV1alpha1 retrieves the EventingV1alpha1Client
+func (c *Clientset) EventingV1alpha1() eventingv1alpha1.EventingV1alpha1Interface {
+ return c.eventingV1alpha1
+}
+
+// EventingV1beta1 retrieves the EventingV1beta1Client
+func (c *Clientset) EventingV1beta1() eventingv1beta1.EventingV1beta1Interface {
+ return c.eventingV1beta1
+}
+
+// EventingV1beta2 retrieves the EventingV1beta2Client
+func (c *Clientset) EventingV1beta2() eventingv1beta2.EventingV1beta2Interface {
+ return c.eventingV1beta2
+}
+
+// EventingV1beta3 retrieves the EventingV1beta3Client
+func (c *Clientset) EventingV1beta3() eventingv1beta3.EventingV1beta3Interface {
+ return c.eventingV1beta3
+}
+
+// EventingV1 retrieves the EventingV1Client
+func (c *Clientset) EventingV1() eventingv1.EventingV1Interface {
+ return c.eventingV1
+}
+
+// FlowsV1 retrieves the FlowsV1Client
+func (c *Clientset) FlowsV1() flowsv1.FlowsV1Interface {
+ return c.flowsV1
+}
+
+// MessagingV1 retrieves the MessagingV1Client
+func (c *Clientset) MessagingV1() messagingv1.MessagingV1Interface {
+ return c.messagingV1
+}
+
+// SinksV1alpha1 retrieves the SinksV1alpha1Client
+func (c *Clientset) SinksV1alpha1() sinksv1alpha1.SinksV1alpha1Interface {
+ return c.sinksV1alpha1
+}
+
+// SourcesV1beta2 retrieves the SourcesV1beta2Client
+func (c *Clientset) SourcesV1beta2() sourcesv1beta2.SourcesV1beta2Interface {
+ return c.sourcesV1beta2
+}
+
+// SourcesV1 retrieves the SourcesV1Client
+func (c *Clientset) SourcesV1() sourcesv1.SourcesV1Interface {
+ return c.sourcesV1
+}
+
+// Discovery retrieves the DiscoveryClient
+func (c *Clientset) Discovery() discovery.DiscoveryInterface {
+ if c == nil {
+ return nil
+ }
+ return c.DiscoveryClient
+}
+
+// NewForConfig creates a new Clientset for the given config.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfig will generate a rate-limiter in configShallowCopy.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*Clientset, error) {
+ configShallowCopy := *c
+
+ if configShallowCopy.UserAgent == "" {
+ configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ // share the transport between all clients
+ httpClient, err := rest.HTTPClientFor(&configShallowCopy)
+ if err != nil {
+ return nil, err
+ }
+
+ return NewForConfigAndClient(&configShallowCopy, httpClient)
+}
+
+// NewForConfigAndClient creates a new Clientset for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+// If config's RateLimiter is not set and QPS and Burst are acceptable,
+// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
+func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
+ configShallowCopy := *c
+ if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
+ if configShallowCopy.Burst <= 0 {
+ return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
+ }
+ configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
+ }
+
+ var cs Clientset
+ var err error
+ cs.eventingV1alpha1, err = eventingv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.eventingV1beta1, err = eventingv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.eventingV1beta2, err = eventingv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.eventingV1beta3, err = eventingv1beta3.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.eventingV1, err = eventingv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.flowsV1, err = flowsv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.messagingV1, err = messagingv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.sinksV1alpha1, err = sinksv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.sourcesV1beta2, err = sourcesv1beta2.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ cs.sourcesV1, err = sourcesv1.NewForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+
+ cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
+ if err != nil {
+ return nil, err
+ }
+ return &cs, nil
+}
+
+// NewForConfigOrDie creates a new Clientset for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *Clientset {
+ cs, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return cs
+}
+
+// New creates a new Clientset for the given RESTClient.
+func New(c rest.Interface) *Clientset {
+ var cs Clientset
+ cs.eventingV1alpha1 = eventingv1alpha1.New(c)
+ cs.eventingV1beta1 = eventingv1beta1.New(c)
+ cs.eventingV1beta2 = eventingv1beta2.New(c)
+ cs.eventingV1beta3 = eventingv1beta3.New(c)
+ cs.eventingV1 = eventingv1.New(c)
+ cs.flowsV1 = flowsv1.New(c)
+ cs.messagingV1 = messagingv1.New(c)
+ cs.sinksV1alpha1 = sinksv1alpha1.New(c)
+ cs.sourcesV1beta2 = sourcesv1beta2.New(c)
+ cs.sourcesV1 = sourcesv1.New(c)
+
+ cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
+ return &cs
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/doc.go
new file mode 100644
index 000000000..5768b36ee
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package contains the scheme of the automatically generated clientset.
+package scheme
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go
new file mode 100644
index 000000000..e037c6c17
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/scheme/register.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package scheme
+
+import (
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ serializer "k8s.io/apimachinery/pkg/runtime/serializer"
+ utilruntime "k8s.io/apimachinery/pkg/util/runtime"
+ eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ eventingv1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
+ eventingv1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
+ eventingv1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
+ flowsv1 "knative.dev/eventing/pkg/apis/flows/v1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
+ sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+ sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
+)
+
+var Scheme = runtime.NewScheme()
+var Codecs = serializer.NewCodecFactory(Scheme)
+var ParameterCodec = runtime.NewParameterCodec(Scheme)
+var localSchemeBuilder = runtime.SchemeBuilder{
+ eventingv1alpha1.AddToScheme,
+ eventingv1beta1.AddToScheme,
+ eventingv1beta2.AddToScheme,
+ eventingv1beta3.AddToScheme,
+ eventingv1.AddToScheme,
+ flowsv1.AddToScheme,
+ messagingv1.AddToScheme,
+ sinksv1alpha1.AddToScheme,
+ sourcesv1beta2.AddToScheme,
+ sourcesv1.AddToScheme,
+}
+
+// AddToScheme adds all types of this clientset into the given scheme. This allows composition
+// of clientsets, like in:
+//
+// import (
+// "k8s.io/client-go/kubernetes"
+// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
+// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
+// )
+//
+// kclientset, _ := kubernetes.NewForConfig(c)
+// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
+//
+// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
+// correctly.
+var AddToScheme = localSchemeBuilder.AddToScheme
+
+func init() {
+ v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
+ utilruntime.Must(AddToScheme(Scheme))
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/broker.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/broker.go
new file mode 100644
index 000000000..c6376cbac
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/broker.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// BrokersGetter has a method to return a BrokerInterface.
+// A group's client should implement this interface.
+type BrokersGetter interface {
+ Brokers(namespace string) BrokerInterface
+}
+
+// BrokerInterface has methods to work with Broker resources.
+type BrokerInterface interface {
+ Create(ctx context.Context, broker *v1.Broker, opts metav1.CreateOptions) (*v1.Broker, error)
+ Update(ctx context.Context, broker *v1.Broker, opts metav1.UpdateOptions) (*v1.Broker, error)
+ UpdateStatus(ctx context.Context, broker *v1.Broker, opts metav1.UpdateOptions) (*v1.Broker, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Broker, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.BrokerList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Broker, err error)
+ BrokerExpansion
+}
+
+// brokers implements BrokerInterface
+type brokers struct {
+ client rest.Interface
+ ns string
+}
+
+// newBrokers returns a Brokers
+func newBrokers(c *EventingV1Client, namespace string) *brokers {
+ return &brokers{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the broker, and returns the corresponding broker object, and an error if there is any.
+func (c *brokers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Broker, err error) {
+ result = &v1.Broker{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("brokers").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Brokers that match those selectors.
+func (c *brokers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.BrokerList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.BrokerList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("brokers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested brokers.
+func (c *brokers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("brokers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a broker and creates it. Returns the server's representation of the broker, and an error, if there is any.
+func (c *brokers) Create(ctx context.Context, broker *v1.Broker, opts metav1.CreateOptions) (result *v1.Broker, err error) {
+ result = &v1.Broker{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("brokers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(broker).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a broker and updates it. Returns the server's representation of the broker, and an error, if there is any.
+func (c *brokers) Update(ctx context.Context, broker *v1.Broker, opts metav1.UpdateOptions) (result *v1.Broker, err error) {
+ result = &v1.Broker{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("brokers").
+ Name(broker.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(broker).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *brokers) UpdateStatus(ctx context.Context, broker *v1.Broker, opts metav1.UpdateOptions) (result *v1.Broker, err error) {
+ result = &v1.Broker{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("brokers").
+ Name(broker.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(broker).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the broker and deletes it. Returns an error if one occurs.
+func (c *brokers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("brokers").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *brokers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("brokers").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched broker.
+func (c *brokers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Broker, err error) {
+ result = &v1.Broker{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("brokers").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/doc.go
new file mode 100644
index 000000000..2ce17146a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/eventing_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/eventing_client.go
new file mode 100644
index 000000000..3b3f0dfb3
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/eventing_client.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type EventingV1Interface interface {
+ RESTClient() rest.Interface
+ BrokersGetter
+ TriggersGetter
+}
+
+// EventingV1Client is used to interact with features provided by the eventing.knative.dev group.
+type EventingV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *EventingV1Client) Brokers(namespace string) BrokerInterface {
+ return newBrokers(c, namespace)
+}
+
+func (c *EventingV1Client) Triggers(namespace string) TriggerInterface {
+ return newTriggers(c, namespace)
+}
+
+// NewForConfig creates a new EventingV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*EventingV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new EventingV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventingV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &EventingV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new EventingV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *EventingV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new EventingV1Client for the given RESTClient.
+func New(c rest.Interface) *EventingV1Client {
+ return &EventingV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *EventingV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/generated_expansion.go
new file mode 100644
index 000000000..14d7fe0c0
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/generated_expansion.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type BrokerExpansion interface{}
+
+type TriggerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/trigger.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/trigger.go
new file mode 100644
index 000000000..fca629940
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1/trigger.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// TriggersGetter has a method to return a TriggerInterface.
+// A group's client should implement this interface.
+type TriggersGetter interface {
+ Triggers(namespace string) TriggerInterface
+}
+
+// TriggerInterface has methods to work with Trigger resources.
+type TriggerInterface interface {
+ Create(ctx context.Context, trigger *v1.Trigger, opts metav1.CreateOptions) (*v1.Trigger, error)
+ Update(ctx context.Context, trigger *v1.Trigger, opts metav1.UpdateOptions) (*v1.Trigger, error)
+ UpdateStatus(ctx context.Context, trigger *v1.Trigger, opts metav1.UpdateOptions) (*v1.Trigger, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Trigger, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.TriggerList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Trigger, err error)
+ TriggerExpansion
+}
+
+// triggers implements TriggerInterface
+type triggers struct {
+ client rest.Interface
+ ns string
+}
+
+// newTriggers returns a Triggers
+func newTriggers(c *EventingV1Client, namespace string) *triggers {
+ return &triggers{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the trigger, and returns the corresponding trigger object, and an error if there is any.
+func (c *triggers) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Trigger, err error) {
+ result = &v1.Trigger{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("triggers").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Triggers that match those selectors.
+func (c *triggers) List(ctx context.Context, opts metav1.ListOptions) (result *v1.TriggerList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.TriggerList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("triggers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested triggers.
+func (c *triggers) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("triggers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a trigger and creates it. Returns the server's representation of the trigger, and an error, if there is any.
+func (c *triggers) Create(ctx context.Context, trigger *v1.Trigger, opts metav1.CreateOptions) (result *v1.Trigger, err error) {
+ result = &v1.Trigger{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("triggers").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(trigger).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a trigger and updates it. Returns the server's representation of the trigger, and an error, if there is any.
+func (c *triggers) Update(ctx context.Context, trigger *v1.Trigger, opts metav1.UpdateOptions) (result *v1.Trigger, err error) {
+ result = &v1.Trigger{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("triggers").
+ Name(trigger.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(trigger).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *triggers) UpdateStatus(ctx context.Context, trigger *v1.Trigger, opts metav1.UpdateOptions) (result *v1.Trigger, err error) {
+ result = &v1.Trigger{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("triggers").
+ Name(trigger.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(trigger).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the trigger and deletes it. Returns an error if one occurs.
+func (c *triggers) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("triggers").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *triggers) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("triggers").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched trigger.
+func (c *triggers) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Trigger, err error) {
+ result = &v1.Trigger{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("triggers").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go
new file mode 100644
index 000000000..0b13fd8e0
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
new file mode 100644
index 000000000..e901caf43
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventing_client.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type EventingV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ EventPoliciesGetter
+}
+
+// EventingV1alpha1Client is used to interact with features provided by the eventing.knative.dev group.
+type EventingV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *EventingV1alpha1Client) EventPolicies(namespace string) EventPolicyInterface {
+ return newEventPolicies(c, namespace)
+}
+
+// NewForConfig creates a new EventingV1alpha1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*EventingV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new EventingV1alpha1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventingV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &EventingV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new EventingV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *EventingV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new EventingV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *EventingV1alpha1Client {
+ return &EventingV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *EventingV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventpolicy.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventpolicy.go
new file mode 100644
index 000000000..bb510ab6e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/eventpolicy.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// EventPoliciesGetter has a method to return a EventPolicyInterface.
+// A group's client should implement this interface.
+type EventPoliciesGetter interface {
+ EventPolicies(namespace string) EventPolicyInterface
+}
+
+// EventPolicyInterface has methods to work with EventPolicy resources.
+type EventPolicyInterface interface {
+ Create(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.CreateOptions) (*v1alpha1.EventPolicy, error)
+ Update(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (*v1alpha1.EventPolicy, error)
+ UpdateStatus(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (*v1alpha1.EventPolicy, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.EventPolicy, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.EventPolicyList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EventPolicy, err error)
+ EventPolicyExpansion
+}
+
+// eventPolicies implements EventPolicyInterface
+type eventPolicies struct {
+ client rest.Interface
+ ns string
+}
+
+// newEventPolicies returns a EventPolicies
+func newEventPolicies(c *EventingV1alpha1Client, namespace string) *eventPolicies {
+ return &eventPolicies{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the eventPolicy, and returns the corresponding eventPolicy object, and an error if there is any.
+func (c *eventPolicies) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EventPolicies that match those selectors.
+func (c *eventPolicies) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.EventPolicyList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.EventPolicyList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested eventPolicies.
+func (c *eventPolicies) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a eventPolicy and creates it. Returns the server's representation of the eventPolicy, and an error, if there is any.
+func (c *eventPolicies) Create(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.CreateOptions) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a eventPolicy and updates it. Returns the server's representation of the eventPolicy, and an error, if there is any.
+func (c *eventPolicies) Update(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(eventPolicy.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *eventPolicies) UpdateStatus(ctx context.Context, eventPolicy *v1alpha1.EventPolicy, opts v1.UpdateOptions) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(eventPolicy.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventPolicy).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the eventPolicy and deletes it. Returns an error if one occurs.
+func (c *eventPolicies) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *eventPolicies) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched eventPolicy.
+func (c *eventPolicies) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.EventPolicy, err error) {
+ result = &v1alpha1.EventPolicy{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("eventpolicies").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
new file mode 100644
index 000000000..d5bd1a045
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type EventPolicyExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/doc.go
new file mode 100644
index 000000000..68b15a55e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta1
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/eventing_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/eventing_client.go
new file mode 100644
index 000000000..ab8a0f857
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/eventing_client.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type EventingV1beta1Interface interface {
+ RESTClient() rest.Interface
+ EventTypesGetter
+}
+
+// EventingV1beta1Client is used to interact with features provided by the eventing.knative.dev group.
+type EventingV1beta1Client struct {
+ restClient rest.Interface
+}
+
+func (c *EventingV1beta1Client) EventTypes(namespace string) EventTypeInterface {
+ return newEventTypes(c, namespace)
+}
+
+// NewForConfig creates a new EventingV1beta1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*EventingV1beta1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new EventingV1beta1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventingV1beta1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &EventingV1beta1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new EventingV1beta1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *EventingV1beta1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new EventingV1beta1Client for the given RESTClient.
+func New(c rest.Interface) *EventingV1beta1Client {
+ return &EventingV1beta1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1beta1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *EventingV1beta1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/eventtype.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/eventtype.go
new file mode 100644
index 000000000..7005e3300
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/eventtype.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// EventTypesGetter has a method to return a EventTypeInterface.
+// A group's client should implement this interface.
+type EventTypesGetter interface {
+ EventTypes(namespace string) EventTypeInterface
+}
+
+// EventTypeInterface has methods to work with EventType resources.
+type EventTypeInterface interface {
+ Create(ctx context.Context, eventType *v1beta1.EventType, opts v1.CreateOptions) (*v1beta1.EventType, error)
+ Update(ctx context.Context, eventType *v1beta1.EventType, opts v1.UpdateOptions) (*v1beta1.EventType, error)
+ UpdateStatus(ctx context.Context, eventType *v1beta1.EventType, opts v1.UpdateOptions) (*v1beta1.EventType, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.EventType, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta1.EventTypeList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EventType, err error)
+ EventTypeExpansion
+}
+
+// eventTypes implements EventTypeInterface
+type eventTypes struct {
+ client rest.Interface
+ ns string
+}
+
+// newEventTypes returns a EventTypes
+func newEventTypes(c *EventingV1beta1Client, namespace string) *eventTypes {
+ return &eventTypes{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the eventType, and returns the corresponding eventType object, and an error if there is any.
+func (c *eventTypes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.EventType, err error) {
+ result = &v1beta1.EventType{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EventTypes that match those selectors.
+func (c *eventTypes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.EventTypeList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta1.EventTypeList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested eventTypes.
+func (c *eventTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a eventType and creates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *eventTypes) Create(ctx context.Context, eventType *v1beta1.EventType, opts v1.CreateOptions) (result *v1beta1.EventType, err error) {
+ result = &v1beta1.EventType{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a eventType and updates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *eventTypes) Update(ctx context.Context, eventType *v1beta1.EventType, opts v1.UpdateOptions) (result *v1beta1.EventType, err error) {
+ result = &v1beta1.EventType{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(eventType.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *eventTypes) UpdateStatus(ctx context.Context, eventType *v1beta1.EventType, opts v1.UpdateOptions) (result *v1beta1.EventType, err error) {
+ result = &v1beta1.EventType{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(eventType.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the eventType and deletes it. Returns an error if one occurs.
+func (c *eventTypes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *eventTypes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched eventType.
+func (c *eventTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta1.EventType, err error) {
+ result = &v1beta1.EventType{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/generated_expansion.go
new file mode 100644
index 000000000..cd07c63e1
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta1
+
+type EventTypeExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/doc.go
new file mode 100644
index 000000000..07add715e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta2
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/eventing_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/eventing_client.go
new file mode 100644
index 000000000..ced743631
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/eventing_client.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type EventingV1beta2Interface interface {
+ RESTClient() rest.Interface
+ EventTypesGetter
+}
+
+// EventingV1beta2Client is used to interact with features provided by the eventing.knative.dev group.
+type EventingV1beta2Client struct {
+ restClient rest.Interface
+}
+
+func (c *EventingV1beta2Client) EventTypes(namespace string) EventTypeInterface {
+ return newEventTypes(c, namespace)
+}
+
+// NewForConfig creates a new EventingV1beta2Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*EventingV1beta2Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new EventingV1beta2Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventingV1beta2Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &EventingV1beta2Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new EventingV1beta2Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *EventingV1beta2Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new EventingV1beta2Client for the given RESTClient.
+func New(c rest.Interface) *EventingV1beta2Client {
+ return &EventingV1beta2Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1beta2.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *EventingV1beta2Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/eventtype.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/eventtype.go
new file mode 100644
index 000000000..95ba78732
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/eventtype.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// EventTypesGetter has a method to return a EventTypeInterface.
+// A group's client should implement this interface.
+type EventTypesGetter interface {
+ EventTypes(namespace string) EventTypeInterface
+}
+
+// EventTypeInterface has methods to work with EventType resources.
+type EventTypeInterface interface {
+ Create(ctx context.Context, eventType *v1beta2.EventType, opts v1.CreateOptions) (*v1beta2.EventType, error)
+ Update(ctx context.Context, eventType *v1beta2.EventType, opts v1.UpdateOptions) (*v1beta2.EventType, error)
+ UpdateStatus(ctx context.Context, eventType *v1beta2.EventType, opts v1.UpdateOptions) (*v1beta2.EventType, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.EventType, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta2.EventTypeList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.EventType, err error)
+ EventTypeExpansion
+}
+
+// eventTypes implements EventTypeInterface
+type eventTypes struct {
+ client rest.Interface
+ ns string
+}
+
+// newEventTypes returns a EventTypes
+func newEventTypes(c *EventingV1beta2Client, namespace string) *eventTypes {
+ return &eventTypes{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the eventType, and returns the corresponding eventType object, and an error if there is any.
+func (c *eventTypes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.EventType, err error) {
+ result = &v1beta2.EventType{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EventTypes that match those selectors.
+func (c *eventTypes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.EventTypeList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta2.EventTypeList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested eventTypes.
+func (c *eventTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a eventType and creates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *eventTypes) Create(ctx context.Context, eventType *v1beta2.EventType, opts v1.CreateOptions) (result *v1beta2.EventType, err error) {
+ result = &v1beta2.EventType{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a eventType and updates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *eventTypes) Update(ctx context.Context, eventType *v1beta2.EventType, opts v1.UpdateOptions) (result *v1beta2.EventType, err error) {
+ result = &v1beta2.EventType{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(eventType.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *eventTypes) UpdateStatus(ctx context.Context, eventType *v1beta2.EventType, opts v1.UpdateOptions) (result *v1beta2.EventType, err error) {
+ result = &v1beta2.EventType{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(eventType.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the eventType and deletes it. Returns an error if one occurs.
+func (c *eventTypes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *eventTypes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched eventType.
+func (c *eventTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.EventType, err error) {
+ result = &v1beta2.EventType{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/generated_expansion.go
new file mode 100644
index 000000000..18a7ab381
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+type EventTypeExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/doc.go
new file mode 100644
index 000000000..cb4a80140
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta3
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/eventing_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/eventing_client.go
new file mode 100644
index 000000000..024220425
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/eventing_client.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta3
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type EventingV1beta3Interface interface {
+ RESTClient() rest.Interface
+ EventTypesGetter
+}
+
+// EventingV1beta3Client is used to interact with features provided by the eventing.knative.dev group.
+type EventingV1beta3Client struct {
+ restClient rest.Interface
+}
+
+func (c *EventingV1beta3Client) EventTypes(namespace string) EventTypeInterface {
+ return newEventTypes(c, namespace)
+}
+
+// NewForConfig creates a new EventingV1beta3Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*EventingV1beta3Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new EventingV1beta3Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*EventingV1beta3Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &EventingV1beta3Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new EventingV1beta3Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *EventingV1beta3Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new EventingV1beta3Client for the given RESTClient.
+func New(c rest.Interface) *EventingV1beta3Client {
+ return &EventingV1beta3Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1beta3.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *EventingV1beta3Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/eventtype.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/eventtype.go
new file mode 100644
index 000000000..e76a6e5d6
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/eventtype.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta3
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// EventTypesGetter has a method to return a EventTypeInterface.
+// A group's client should implement this interface.
+type EventTypesGetter interface {
+ EventTypes(namespace string) EventTypeInterface
+}
+
+// EventTypeInterface has methods to work with EventType resources.
+type EventTypeInterface interface {
+ Create(ctx context.Context, eventType *v1beta3.EventType, opts v1.CreateOptions) (*v1beta3.EventType, error)
+ Update(ctx context.Context, eventType *v1beta3.EventType, opts v1.UpdateOptions) (*v1beta3.EventType, error)
+ UpdateStatus(ctx context.Context, eventType *v1beta3.EventType, opts v1.UpdateOptions) (*v1beta3.EventType, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta3.EventType, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta3.EventTypeList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.EventType, err error)
+ EventTypeExpansion
+}
+
+// eventTypes implements EventTypeInterface
+type eventTypes struct {
+ client rest.Interface
+ ns string
+}
+
+// newEventTypes returns a EventTypes
+func newEventTypes(c *EventingV1beta3Client, namespace string) *eventTypes {
+ return &eventTypes{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the eventType, and returns the corresponding eventType object, and an error if there is any.
+func (c *eventTypes) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta3.EventType, err error) {
+ result = &v1beta3.EventType{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of EventTypes that match those selectors.
+func (c *eventTypes) List(ctx context.Context, opts v1.ListOptions) (result *v1beta3.EventTypeList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta3.EventTypeList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested eventTypes.
+func (c *eventTypes) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a eventType and creates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *eventTypes) Create(ctx context.Context, eventType *v1beta3.EventType, opts v1.CreateOptions) (result *v1beta3.EventType, err error) {
+ result = &v1beta3.EventType{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a eventType and updates it. Returns the server's representation of the eventType, and an error, if there is any.
+func (c *eventTypes) Update(ctx context.Context, eventType *v1beta3.EventType, opts v1.UpdateOptions) (result *v1beta3.EventType, err error) {
+ result = &v1beta3.EventType{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(eventType.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *eventTypes) UpdateStatus(ctx context.Context, eventType *v1beta3.EventType, opts v1.UpdateOptions) (result *v1beta3.EventType, err error) {
+ result = &v1beta3.EventType{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(eventType.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(eventType).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the eventType and deletes it. Returns an error if one occurs.
+func (c *eventTypes) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *eventTypes) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("eventtypes").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched eventType.
+func (c *eventTypes) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta3.EventType, err error) {
+ result = &v1beta3.EventType{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("eventtypes").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/generated_expansion.go
new file mode 100644
index 000000000..f0cb48156
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta3
+
+type EventTypeExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/doc.go
new file mode 100644
index 000000000..2ce17146a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/flows_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/flows_client.go
new file mode 100644
index 000000000..6df3b5a86
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/flows_client.go
@@ -0,0 +1,112 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/flows/v1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type FlowsV1Interface interface {
+ RESTClient() rest.Interface
+ ParallelsGetter
+ SequencesGetter
+}
+
+// FlowsV1Client is used to interact with features provided by the flows.knative.dev group.
+type FlowsV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *FlowsV1Client) Parallels(namespace string) ParallelInterface {
+ return newParallels(c, namespace)
+}
+
+func (c *FlowsV1Client) Sequences(namespace string) SequenceInterface {
+ return newSequences(c, namespace)
+}
+
+// NewForConfig creates a new FlowsV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*FlowsV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new FlowsV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*FlowsV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &FlowsV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new FlowsV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *FlowsV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new FlowsV1Client for the given RESTClient.
+func New(c rest.Interface) *FlowsV1Client {
+ return &FlowsV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *FlowsV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/generated_expansion.go
new file mode 100644
index 000000000..16fdb1dcd
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/generated_expansion.go
@@ -0,0 +1,23 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type ParallelExpansion interface{}
+
+type SequenceExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/parallel.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/parallel.go
new file mode 100644
index 000000000..39fc2a0fa
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/parallel.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/flows/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// ParallelsGetter has a method to return a ParallelInterface.
+// A group's client should implement this interface.
+type ParallelsGetter interface {
+ Parallels(namespace string) ParallelInterface
+}
+
+// ParallelInterface has methods to work with Parallel resources.
+type ParallelInterface interface {
+ Create(ctx context.Context, parallel *v1.Parallel, opts metav1.CreateOptions) (*v1.Parallel, error)
+ Update(ctx context.Context, parallel *v1.Parallel, opts metav1.UpdateOptions) (*v1.Parallel, error)
+ UpdateStatus(ctx context.Context, parallel *v1.Parallel, opts metav1.UpdateOptions) (*v1.Parallel, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Parallel, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ParallelList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Parallel, err error)
+ ParallelExpansion
+}
+
+// parallels implements ParallelInterface
+type parallels struct {
+ client rest.Interface
+ ns string
+}
+
+// newParallels returns a Parallels
+func newParallels(c *FlowsV1Client, namespace string) *parallels {
+ return ¶llels{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the parallel, and returns the corresponding parallel object, and an error if there is any.
+func (c *parallels) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Parallel, err error) {
+ result = &v1.Parallel{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("parallels").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Parallels that match those selectors.
+func (c *parallels) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ParallelList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ParallelList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("parallels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested parallels.
+func (c *parallels) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("parallels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a parallel and creates it. Returns the server's representation of the parallel, and an error, if there is any.
+func (c *parallels) Create(ctx context.Context, parallel *v1.Parallel, opts metav1.CreateOptions) (result *v1.Parallel, err error) {
+ result = &v1.Parallel{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("parallels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(parallel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a parallel and updates it. Returns the server's representation of the parallel, and an error, if there is any.
+func (c *parallels) Update(ctx context.Context, parallel *v1.Parallel, opts metav1.UpdateOptions) (result *v1.Parallel, err error) {
+ result = &v1.Parallel{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("parallels").
+ Name(parallel.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(parallel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *parallels) UpdateStatus(ctx context.Context, parallel *v1.Parallel, opts metav1.UpdateOptions) (result *v1.Parallel, err error) {
+ result = &v1.Parallel{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("parallels").
+ Name(parallel.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(parallel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the parallel and deletes it. Returns an error if one occurs.
+func (c *parallels) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("parallels").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *parallels) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("parallels").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched parallel.
+func (c *parallels) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Parallel, err error) {
+ result = &v1.Parallel{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("parallels").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/sequence.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/sequence.go
new file mode 100644
index 000000000..64c723b9b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1/sequence.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/flows/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// SequencesGetter has a method to return a SequenceInterface.
+// A group's client should implement this interface.
+type SequencesGetter interface {
+ Sequences(namespace string) SequenceInterface
+}
+
+// SequenceInterface has methods to work with Sequence resources.
+type SequenceInterface interface {
+ Create(ctx context.Context, sequence *v1.Sequence, opts metav1.CreateOptions) (*v1.Sequence, error)
+ Update(ctx context.Context, sequence *v1.Sequence, opts metav1.UpdateOptions) (*v1.Sequence, error)
+ UpdateStatus(ctx context.Context, sequence *v1.Sequence, opts metav1.UpdateOptions) (*v1.Sequence, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Sequence, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.SequenceList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Sequence, err error)
+ SequenceExpansion
+}
+
+// sequences implements SequenceInterface
+type sequences struct {
+ client rest.Interface
+ ns string
+}
+
+// newSequences returns a Sequences
+func newSequences(c *FlowsV1Client, namespace string) *sequences {
+ return &sequences{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the sequence, and returns the corresponding sequence object, and an error if there is any.
+func (c *sequences) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Sequence, err error) {
+ result = &v1.Sequence{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("sequences").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Sequences that match those selectors.
+func (c *sequences) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SequenceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.SequenceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("sequences").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested sequences.
+func (c *sequences) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("sequences").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a sequence and creates it. Returns the server's representation of the sequence, and an error, if there is any.
+func (c *sequences) Create(ctx context.Context, sequence *v1.Sequence, opts metav1.CreateOptions) (result *v1.Sequence, err error) {
+ result = &v1.Sequence{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("sequences").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(sequence).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a sequence and updates it. Returns the server's representation of the sequence, and an error, if there is any.
+func (c *sequences) Update(ctx context.Context, sequence *v1.Sequence, opts metav1.UpdateOptions) (result *v1.Sequence, err error) {
+ result = &v1.Sequence{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("sequences").
+ Name(sequence.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(sequence).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *sequences) UpdateStatus(ctx context.Context, sequence *v1.Sequence, opts metav1.UpdateOptions) (result *v1.Sequence, err error) {
+ result = &v1.Sequence{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("sequences").
+ Name(sequence.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(sequence).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the sequence and deletes it. Returns an error if one occurs.
+func (c *sequences) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("sequences").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *sequences) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("sequences").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched sequence.
+func (c *sequences) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Sequence, err error) {
+ result = &v1.Sequence{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("sequences").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/channel.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/channel.go
new file mode 100644
index 000000000..5fab4c9bc
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/channel.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// ChannelsGetter has a method to return a ChannelInterface.
+// A group's client should implement this interface.
+type ChannelsGetter interface {
+ Channels(namespace string) ChannelInterface
+}
+
+// ChannelInterface has methods to work with Channel resources.
+type ChannelInterface interface {
+ Create(ctx context.Context, channel *v1.Channel, opts metav1.CreateOptions) (*v1.Channel, error)
+ Update(ctx context.Context, channel *v1.Channel, opts metav1.UpdateOptions) (*v1.Channel, error)
+ UpdateStatus(ctx context.Context, channel *v1.Channel, opts metav1.UpdateOptions) (*v1.Channel, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Channel, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ChannelList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Channel, err error)
+ ChannelExpansion
+}
+
+// channels implements ChannelInterface
+type channels struct {
+ client rest.Interface
+ ns string
+}
+
+// newChannels returns a Channels
+func newChannels(c *MessagingV1Client, namespace string) *channels {
+ return &channels{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the channel, and returns the corresponding channel object, and an error if there is any.
+func (c *channels) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Channel, err error) {
+ result = &v1.Channel{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("channels").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Channels that match those selectors.
+func (c *channels) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ChannelList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ChannelList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("channels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested channels.
+func (c *channels) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("channels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a channel and creates it. Returns the server's representation of the channel, and an error, if there is any.
+func (c *channels) Create(ctx context.Context, channel *v1.Channel, opts metav1.CreateOptions) (result *v1.Channel, err error) {
+ result = &v1.Channel{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("channels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(channel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a channel and updates it. Returns the server's representation of the channel, and an error, if there is any.
+func (c *channels) Update(ctx context.Context, channel *v1.Channel, opts metav1.UpdateOptions) (result *v1.Channel, err error) {
+ result = &v1.Channel{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("channels").
+ Name(channel.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(channel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *channels) UpdateStatus(ctx context.Context, channel *v1.Channel, opts metav1.UpdateOptions) (result *v1.Channel, err error) {
+ result = &v1.Channel{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("channels").
+ Name(channel.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(channel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the channel and deletes it. Returns an error if one occurs.
+func (c *channels) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("channels").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *channels) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("channels").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched channel.
+func (c *channels) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Channel, err error) {
+ result = &v1.Channel{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("channels").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/doc.go
new file mode 100644
index 000000000..2ce17146a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/generated_expansion.go
new file mode 100644
index 000000000..a847be33a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/generated_expansion.go
@@ -0,0 +1,25 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type ChannelExpansion interface{}
+
+type InMemoryChannelExpansion interface{}
+
+type SubscriptionExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/inmemorychannel.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/inmemorychannel.go
new file mode 100644
index 000000000..5c749d3ab
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/inmemorychannel.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// InMemoryChannelsGetter has a method to return a InMemoryChannelInterface.
+// A group's client should implement this interface.
+type InMemoryChannelsGetter interface {
+ InMemoryChannels(namespace string) InMemoryChannelInterface
+}
+
+// InMemoryChannelInterface has methods to work with InMemoryChannel resources.
+type InMemoryChannelInterface interface {
+ Create(ctx context.Context, inMemoryChannel *v1.InMemoryChannel, opts metav1.CreateOptions) (*v1.InMemoryChannel, error)
+ Update(ctx context.Context, inMemoryChannel *v1.InMemoryChannel, opts metav1.UpdateOptions) (*v1.InMemoryChannel, error)
+ UpdateStatus(ctx context.Context, inMemoryChannel *v1.InMemoryChannel, opts metav1.UpdateOptions) (*v1.InMemoryChannel, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.InMemoryChannel, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.InMemoryChannelList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.InMemoryChannel, err error)
+ InMemoryChannelExpansion
+}
+
+// inMemoryChannels implements InMemoryChannelInterface
+type inMemoryChannels struct {
+ client rest.Interface
+ ns string
+}
+
+// newInMemoryChannels returns a InMemoryChannels
+func newInMemoryChannels(c *MessagingV1Client, namespace string) *inMemoryChannels {
+ return &inMemoryChannels{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the inMemoryChannel, and returns the corresponding inMemoryChannel object, and an error if there is any.
+func (c *inMemoryChannels) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.InMemoryChannel, err error) {
+ result = &v1.InMemoryChannel{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of InMemoryChannels that match those selectors.
+func (c *inMemoryChannels) List(ctx context.Context, opts metav1.ListOptions) (result *v1.InMemoryChannelList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.InMemoryChannelList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested inMemoryChannels.
+func (c *inMemoryChannels) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a inMemoryChannel and creates it. Returns the server's representation of the inMemoryChannel, and an error, if there is any.
+func (c *inMemoryChannels) Create(ctx context.Context, inMemoryChannel *v1.InMemoryChannel, opts metav1.CreateOptions) (result *v1.InMemoryChannel, err error) {
+ result = &v1.InMemoryChannel{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(inMemoryChannel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a inMemoryChannel and updates it. Returns the server's representation of the inMemoryChannel, and an error, if there is any.
+func (c *inMemoryChannels) Update(ctx context.Context, inMemoryChannel *v1.InMemoryChannel, opts metav1.UpdateOptions) (result *v1.InMemoryChannel, err error) {
+ result = &v1.InMemoryChannel{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ Name(inMemoryChannel.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(inMemoryChannel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *inMemoryChannels) UpdateStatus(ctx context.Context, inMemoryChannel *v1.InMemoryChannel, opts metav1.UpdateOptions) (result *v1.InMemoryChannel, err error) {
+ result = &v1.InMemoryChannel{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ Name(inMemoryChannel.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(inMemoryChannel).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the inMemoryChannel and deletes it. Returns an error if one occurs.
+func (c *inMemoryChannels) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *inMemoryChannels) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched inMemoryChannel.
+func (c *inMemoryChannels) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.InMemoryChannel, err error) {
+ result = &v1.InMemoryChannel{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("inmemorychannels").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/messaging_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/messaging_client.go
new file mode 100644
index 000000000..bccf1fb35
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/messaging_client.go
@@ -0,0 +1,117 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type MessagingV1Interface interface {
+ RESTClient() rest.Interface
+ ChannelsGetter
+ InMemoryChannelsGetter
+ SubscriptionsGetter
+}
+
+// MessagingV1Client is used to interact with features provided by the messaging.knative.dev group.
+type MessagingV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *MessagingV1Client) Channels(namespace string) ChannelInterface {
+ return newChannels(c, namespace)
+}
+
+func (c *MessagingV1Client) InMemoryChannels(namespace string) InMemoryChannelInterface {
+ return newInMemoryChannels(c, namespace)
+}
+
+func (c *MessagingV1Client) Subscriptions(namespace string) SubscriptionInterface {
+ return newSubscriptions(c, namespace)
+}
+
+// NewForConfig creates a new MessagingV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*MessagingV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new MessagingV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MessagingV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &MessagingV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new MessagingV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *MessagingV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new MessagingV1Client for the given RESTClient.
+func New(c rest.Interface) *MessagingV1Client {
+ return &MessagingV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *MessagingV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/subscription.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/subscription.go
new file mode 100644
index 000000000..abf54a1d5
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1/subscription.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// SubscriptionsGetter has a method to return a SubscriptionInterface.
+// A group's client should implement this interface.
+type SubscriptionsGetter interface {
+ Subscriptions(namespace string) SubscriptionInterface
+}
+
+// SubscriptionInterface has methods to work with Subscription resources.
+type SubscriptionInterface interface {
+ Create(ctx context.Context, subscription *v1.Subscription, opts metav1.CreateOptions) (*v1.Subscription, error)
+ Update(ctx context.Context, subscription *v1.Subscription, opts metav1.UpdateOptions) (*v1.Subscription, error)
+ UpdateStatus(ctx context.Context, subscription *v1.Subscription, opts metav1.UpdateOptions) (*v1.Subscription, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.Subscription, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.SubscriptionList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Subscription, err error)
+ SubscriptionExpansion
+}
+
+// subscriptions implements SubscriptionInterface
+type subscriptions struct {
+ client rest.Interface
+ ns string
+}
+
+// newSubscriptions returns a Subscriptions
+func newSubscriptions(c *MessagingV1Client, namespace string) *subscriptions {
+ return &subscriptions{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the subscription, and returns the corresponding subscription object, and an error if there is any.
+func (c *subscriptions) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.Subscription, err error) {
+ result = &v1.Subscription{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of Subscriptions that match those selectors.
+func (c *subscriptions) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SubscriptionList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.SubscriptionList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested subscriptions.
+func (c *subscriptions) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a subscription and creates it. Returns the server's representation of the subscription, and an error, if there is any.
+func (c *subscriptions) Create(ctx context.Context, subscription *v1.Subscription, opts metav1.CreateOptions) (result *v1.Subscription, err error) {
+ result = &v1.Subscription{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(subscription).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a subscription and updates it. Returns the server's representation of the subscription, and an error, if there is any.
+func (c *subscriptions) Update(ctx context.Context, subscription *v1.Subscription, opts metav1.UpdateOptions) (result *v1.Subscription, err error) {
+ result = &v1.Subscription{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ Name(subscription.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(subscription).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *subscriptions) UpdateStatus(ctx context.Context, subscription *v1.Subscription, opts metav1.UpdateOptions) (result *v1.Subscription, err error) {
+ result = &v1.Subscription{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ Name(subscription.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(subscription).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the subscription and deletes it. Returns an error if one occurs.
+func (c *subscriptions) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *subscriptions) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("subscriptions").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched subscription.
+func (c *subscriptions) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.Subscription, err error) {
+ result = &v1.Subscription{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("subscriptions").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/doc.go
new file mode 100644
index 000000000..0b13fd8e0
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1alpha1
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/generated_expansion.go
new file mode 100644
index 000000000..029b7bee8
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+type JobSinkExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/jobsink.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/jobsink.go
new file mode 100644
index 000000000..71851300a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/jobsink.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// JobSinksGetter has a method to return a JobSinkInterface.
+// A group's client should implement this interface.
+type JobSinksGetter interface {
+ JobSinks(namespace string) JobSinkInterface
+}
+
+// JobSinkInterface has methods to work with JobSink resources.
+type JobSinkInterface interface {
+ Create(ctx context.Context, jobSink *v1alpha1.JobSink, opts v1.CreateOptions) (*v1alpha1.JobSink, error)
+ Update(ctx context.Context, jobSink *v1alpha1.JobSink, opts v1.UpdateOptions) (*v1alpha1.JobSink, error)
+ UpdateStatus(ctx context.Context, jobSink *v1alpha1.JobSink, opts v1.UpdateOptions) (*v1alpha1.JobSink, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.JobSink, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.JobSinkList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.JobSink, err error)
+ JobSinkExpansion
+}
+
+// jobSinks implements JobSinkInterface
+type jobSinks struct {
+ client rest.Interface
+ ns string
+}
+
+// newJobSinks returns a JobSinks
+func newJobSinks(c *SinksV1alpha1Client, namespace string) *jobSinks {
+ return &jobSinks{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the jobSink, and returns the corresponding jobSink object, and an error if there is any.
+func (c *jobSinks) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.JobSink, err error) {
+ result = &v1alpha1.JobSink{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("jobsinks").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of JobSinks that match those selectors.
+func (c *jobSinks) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.JobSinkList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1alpha1.JobSinkList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("jobsinks").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested jobSinks.
+func (c *jobSinks) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("jobsinks").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a jobSink and creates it. Returns the server's representation of the jobSink, and an error, if there is any.
+func (c *jobSinks) Create(ctx context.Context, jobSink *v1alpha1.JobSink, opts v1.CreateOptions) (result *v1alpha1.JobSink, err error) {
+ result = &v1alpha1.JobSink{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("jobsinks").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(jobSink).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a jobSink and updates it. Returns the server's representation of the jobSink, and an error, if there is any.
+func (c *jobSinks) Update(ctx context.Context, jobSink *v1alpha1.JobSink, opts v1.UpdateOptions) (result *v1alpha1.JobSink, err error) {
+ result = &v1alpha1.JobSink{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("jobsinks").
+ Name(jobSink.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(jobSink).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *jobSinks) UpdateStatus(ctx context.Context, jobSink *v1alpha1.JobSink, opts v1.UpdateOptions) (result *v1alpha1.JobSink, err error) {
+ result = &v1alpha1.JobSink{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("jobsinks").
+ Name(jobSink.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(jobSink).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the jobSink and deletes it. Returns an error if one occurs.
+func (c *jobSinks) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("jobsinks").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *jobSinks) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("jobsinks").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched jobSink.
+func (c *jobSinks) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1alpha1.JobSink, err error) {
+ result = &v1alpha1.JobSink{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("jobsinks").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/sinks_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/sinks_client.go
new file mode 100644
index 000000000..2012a8f57
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1/sinks_client.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type SinksV1alpha1Interface interface {
+ RESTClient() rest.Interface
+ JobSinksGetter
+}
+
+// SinksV1alpha1Client is used to interact with features provided by the sinks.knative.dev group.
+type SinksV1alpha1Client struct {
+ restClient rest.Interface
+}
+
+func (c *SinksV1alpha1Client) JobSinks(namespace string) JobSinkInterface {
+ return newJobSinks(c, namespace)
+}
+
+// NewForConfig creates a new SinksV1alpha1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*SinksV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new SinksV1alpha1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SinksV1alpha1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &SinksV1alpha1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new SinksV1alpha1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *SinksV1alpha1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new SinksV1alpha1Client for the given RESTClient.
+func New(c rest.Interface) *SinksV1alpha1Client {
+ return &SinksV1alpha1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1alpha1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *SinksV1alpha1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/apiserversource.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/apiserversource.go
new file mode 100644
index 000000000..3798092b8
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/apiserversource.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// ApiServerSourcesGetter has a method to return a ApiServerSourceInterface.
+// A group's client should implement this interface.
+type ApiServerSourcesGetter interface {
+ ApiServerSources(namespace string) ApiServerSourceInterface
+}
+
+// ApiServerSourceInterface has methods to work with ApiServerSource resources.
+type ApiServerSourceInterface interface {
+ Create(ctx context.Context, apiServerSource *v1.ApiServerSource, opts metav1.CreateOptions) (*v1.ApiServerSource, error)
+ Update(ctx context.Context, apiServerSource *v1.ApiServerSource, opts metav1.UpdateOptions) (*v1.ApiServerSource, error)
+ UpdateStatus(ctx context.Context, apiServerSource *v1.ApiServerSource, opts metav1.UpdateOptions) (*v1.ApiServerSource, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ApiServerSource, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ApiServerSourceList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ApiServerSource, err error)
+ ApiServerSourceExpansion
+}
+
+// apiServerSources implements ApiServerSourceInterface
+type apiServerSources struct {
+ client rest.Interface
+ ns string
+}
+
+// newApiServerSources returns a ApiServerSources
+func newApiServerSources(c *SourcesV1Client, namespace string) *apiServerSources {
+ return &apiServerSources{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the apiServerSource, and returns the corresponding apiServerSource object, and an error if there is any.
+func (c *apiServerSources) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ApiServerSource, err error) {
+ result = &v1.ApiServerSource{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ApiServerSources that match those selectors.
+func (c *apiServerSources) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ApiServerSourceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ApiServerSourceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested apiServerSources.
+func (c *apiServerSources) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a apiServerSource and creates it. Returns the server's representation of the apiServerSource, and an error, if there is any.
+func (c *apiServerSources) Create(ctx context.Context, apiServerSource *v1.ApiServerSource, opts metav1.CreateOptions) (result *v1.ApiServerSource, err error) {
+ result = &v1.ApiServerSource{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(apiServerSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a apiServerSource and updates it. Returns the server's representation of the apiServerSource, and an error, if there is any.
+func (c *apiServerSources) Update(ctx context.Context, apiServerSource *v1.ApiServerSource, opts metav1.UpdateOptions) (result *v1.ApiServerSource, err error) {
+ result = &v1.ApiServerSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(apiServerSource.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(apiServerSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *apiServerSources) UpdateStatus(ctx context.Context, apiServerSource *v1.ApiServerSource, opts metav1.UpdateOptions) (result *v1.ApiServerSource, err error) {
+ result = &v1.ApiServerSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(apiServerSource.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(apiServerSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the apiServerSource and deletes it. Returns an error if one occurs.
+func (c *apiServerSources) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *apiServerSources) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("apiserversources").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched apiServerSource.
+func (c *apiServerSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ApiServerSource, err error) {
+ result = &v1.ApiServerSource{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("apiserversources").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/containersource.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/containersource.go
new file mode 100644
index 000000000..61f142c9c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/containersource.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// ContainerSourcesGetter has a method to return a ContainerSourceInterface.
+// A group's client should implement this interface.
+type ContainerSourcesGetter interface {
+ ContainerSources(namespace string) ContainerSourceInterface
+}
+
+// ContainerSourceInterface has methods to work with ContainerSource resources.
+type ContainerSourceInterface interface {
+ Create(ctx context.Context, containerSource *v1.ContainerSource, opts metav1.CreateOptions) (*v1.ContainerSource, error)
+ Update(ctx context.Context, containerSource *v1.ContainerSource, opts metav1.UpdateOptions) (*v1.ContainerSource, error)
+ UpdateStatus(ctx context.Context, containerSource *v1.ContainerSource, opts metav1.UpdateOptions) (*v1.ContainerSource, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.ContainerSource, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.ContainerSourceList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ContainerSource, err error)
+ ContainerSourceExpansion
+}
+
+// containerSources implements ContainerSourceInterface
+type containerSources struct {
+ client rest.Interface
+ ns string
+}
+
+// newContainerSources returns a ContainerSources
+func newContainerSources(c *SourcesV1Client, namespace string) *containerSources {
+ return &containerSources{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the containerSource, and returns the corresponding containerSource object, and an error if there is any.
+func (c *containerSources) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.ContainerSource, err error) {
+ result = &v1.ContainerSource{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of ContainerSources that match those selectors.
+func (c *containerSources) List(ctx context.Context, opts metav1.ListOptions) (result *v1.ContainerSourceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.ContainerSourceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("containersources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested containerSources.
+func (c *containerSources) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("containersources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a containerSource and creates it. Returns the server's representation of the containerSource, and an error, if there is any.
+func (c *containerSources) Create(ctx context.Context, containerSource *v1.ContainerSource, opts metav1.CreateOptions) (result *v1.ContainerSource, err error) {
+ result = &v1.ContainerSource{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("containersources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(containerSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a containerSource and updates it. Returns the server's representation of the containerSource, and an error, if there is any.
+func (c *containerSources) Update(ctx context.Context, containerSource *v1.ContainerSource, opts metav1.UpdateOptions) (result *v1.ContainerSource, err error) {
+ result = &v1.ContainerSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(containerSource.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(containerSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *containerSources) UpdateStatus(ctx context.Context, containerSource *v1.ContainerSource, opts metav1.UpdateOptions) (result *v1.ContainerSource, err error) {
+ result = &v1.ContainerSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(containerSource.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(containerSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the containerSource and deletes it. Returns an error if one occurs.
+func (c *containerSources) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *containerSources) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("containersources").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched containerSource.
+func (c *containerSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.ContainerSource, err error) {
+ result = &v1.ContainerSource{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("containersources").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/doc.go
new file mode 100644
index 000000000..2ce17146a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/generated_expansion.go
new file mode 100644
index 000000000..6b63704d3
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/generated_expansion.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+type ApiServerSourceExpansion interface{}
+
+type ContainerSourceExpansion interface{}
+
+type PingSourceExpansion interface{}
+
+type SinkBindingExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/pingsource.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/pingsource.go
new file mode 100644
index 000000000..0e3d23f4c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/pingsource.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// PingSourcesGetter has a method to return a PingSourceInterface.
+// A group's client should implement this interface.
+type PingSourcesGetter interface {
+ PingSources(namespace string) PingSourceInterface
+}
+
+// PingSourceInterface has methods to work with PingSource resources.
+type PingSourceInterface interface {
+ Create(ctx context.Context, pingSource *v1.PingSource, opts metav1.CreateOptions) (*v1.PingSource, error)
+ Update(ctx context.Context, pingSource *v1.PingSource, opts metav1.UpdateOptions) (*v1.PingSource, error)
+ UpdateStatus(ctx context.Context, pingSource *v1.PingSource, opts metav1.UpdateOptions) (*v1.PingSource, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PingSource, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.PingSourceList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PingSource, err error)
+ PingSourceExpansion
+}
+
+// pingSources implements PingSourceInterface
+type pingSources struct {
+ client rest.Interface
+ ns string
+}
+
+// newPingSources returns a PingSources
+func newPingSources(c *SourcesV1Client, namespace string) *pingSources {
+ return &pingSources{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the pingSource, and returns the corresponding pingSource object, and an error if there is any.
+func (c *pingSources) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.PingSource, err error) {
+ result = &v1.PingSource{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of PingSources that match those selectors.
+func (c *pingSources) List(ctx context.Context, opts metav1.ListOptions) (result *v1.PingSourceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.PingSourceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("pingsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested pingSources.
+func (c *pingSources) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("pingsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a pingSource and creates it. Returns the server's representation of the pingSource, and an error, if there is any.
+func (c *pingSources) Create(ctx context.Context, pingSource *v1.PingSource, opts metav1.CreateOptions) (result *v1.PingSource, err error) {
+ result = &v1.PingSource{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("pingsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pingSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a pingSource and updates it. Returns the server's representation of the pingSource, and an error, if there is any.
+func (c *pingSources) Update(ctx context.Context, pingSource *v1.PingSource, opts metav1.UpdateOptions) (result *v1.PingSource, err error) {
+ result = &v1.PingSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(pingSource.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pingSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *pingSources) UpdateStatus(ctx context.Context, pingSource *v1.PingSource, opts metav1.UpdateOptions) (result *v1.PingSource, err error) {
+ result = &v1.PingSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(pingSource.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pingSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the pingSource and deletes it. Returns an error if one occurs.
+func (c *pingSources) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *pingSources) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("pingsources").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched pingSource.
+func (c *pingSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.PingSource, err error) {
+ result = &v1.PingSource{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/sinkbinding.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/sinkbinding.go
new file mode 100644
index 000000000..d3613d31b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/sinkbinding.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// SinkBindingsGetter has a method to return a SinkBindingInterface.
+// A group's client should implement this interface.
+type SinkBindingsGetter interface {
+ SinkBindings(namespace string) SinkBindingInterface
+}
+
+// SinkBindingInterface has methods to work with SinkBinding resources.
+type SinkBindingInterface interface {
+ Create(ctx context.Context, sinkBinding *v1.SinkBinding, opts metav1.CreateOptions) (*v1.SinkBinding, error)
+ Update(ctx context.Context, sinkBinding *v1.SinkBinding, opts metav1.UpdateOptions) (*v1.SinkBinding, error)
+ UpdateStatus(ctx context.Context, sinkBinding *v1.SinkBinding, opts metav1.UpdateOptions) (*v1.SinkBinding, error)
+ Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error
+ Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.SinkBinding, error)
+ List(ctx context.Context, opts metav1.ListOptions) (*v1.SinkBindingList, error)
+ Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.SinkBinding, err error)
+ SinkBindingExpansion
+}
+
+// sinkBindings implements SinkBindingInterface
+type sinkBindings struct {
+ client rest.Interface
+ ns string
+}
+
+// newSinkBindings returns a SinkBindings
+func newSinkBindings(c *SourcesV1Client, namespace string) *sinkBindings {
+ return &sinkBindings{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the sinkBinding, and returns the corresponding sinkBinding object, and an error if there is any.
+func (c *sinkBindings) Get(ctx context.Context, name string, options metav1.GetOptions) (result *v1.SinkBinding, err error) {
+ result = &v1.SinkBinding{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of SinkBindings that match those selectors.
+func (c *sinkBindings) List(ctx context.Context, opts metav1.ListOptions) (result *v1.SinkBindingList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1.SinkBindingList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested sinkBindings.
+func (c *sinkBindings) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a sinkBinding and creates it. Returns the server's representation of the sinkBinding, and an error, if there is any.
+func (c *sinkBindings) Create(ctx context.Context, sinkBinding *v1.SinkBinding, opts metav1.CreateOptions) (result *v1.SinkBinding, err error) {
+ result = &v1.SinkBinding{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(sinkBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a sinkBinding and updates it. Returns the server's representation of the sinkBinding, and an error, if there is any.
+func (c *sinkBindings) Update(ctx context.Context, sinkBinding *v1.SinkBinding, opts metav1.UpdateOptions) (result *v1.SinkBinding, err error) {
+ result = &v1.SinkBinding{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ Name(sinkBinding.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(sinkBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *sinkBindings) UpdateStatus(ctx context.Context, sinkBinding *v1.SinkBinding, opts metav1.UpdateOptions) (result *v1.SinkBinding, err error) {
+ result = &v1.SinkBinding{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ Name(sinkBinding.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(sinkBinding).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the sinkBinding and deletes it. Returns an error if one occurs.
+func (c *sinkBindings) Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *sinkBindings) DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched sinkBinding.
+func (c *sinkBindings) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result *v1.SinkBinding, err error) {
+ result = &v1.SinkBinding{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("sinkbindings").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/sources_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/sources_client.go
new file mode 100644
index 000000000..e7bb80092
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1/sources_client.go
@@ -0,0 +1,122 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type SourcesV1Interface interface {
+ RESTClient() rest.Interface
+ ApiServerSourcesGetter
+ ContainerSourcesGetter
+ PingSourcesGetter
+ SinkBindingsGetter
+}
+
+// SourcesV1Client is used to interact with features provided by the sources.knative.dev group.
+type SourcesV1Client struct {
+ restClient rest.Interface
+}
+
+func (c *SourcesV1Client) ApiServerSources(namespace string) ApiServerSourceInterface {
+ return newApiServerSources(c, namespace)
+}
+
+func (c *SourcesV1Client) ContainerSources(namespace string) ContainerSourceInterface {
+ return newContainerSources(c, namespace)
+}
+
+func (c *SourcesV1Client) PingSources(namespace string) PingSourceInterface {
+ return newPingSources(c, namespace)
+}
+
+func (c *SourcesV1Client) SinkBindings(namespace string) SinkBindingInterface {
+ return newSinkBindings(c, namespace)
+}
+
+// NewForConfig creates a new SourcesV1Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*SourcesV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new SourcesV1Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SourcesV1Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &SourcesV1Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new SourcesV1Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *SourcesV1Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new SourcesV1Client for the given RESTClient.
+func New(c rest.Interface) *SourcesV1Client {
+ return &SourcesV1Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *SourcesV1Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/doc.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/doc.go
new file mode 100644
index 000000000..07add715e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/doc.go
@@ -0,0 +1,20 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+// This package has the automatically generated typed clients.
+package v1beta2
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/generated_expansion.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/generated_expansion.go
new file mode 100644
index 000000000..3dcc9777d
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/generated_expansion.go
@@ -0,0 +1,21 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+type PingSourceExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/pingsource.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/pingsource.go
new file mode 100644
index 000000000..f30d025ad
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/pingsource.go
@@ -0,0 +1,195 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ types "k8s.io/apimachinery/pkg/types"
+ watch "k8s.io/apimachinery/pkg/watch"
+ rest "k8s.io/client-go/rest"
+ v1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
+ scheme "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+// PingSourcesGetter has a method to return a PingSourceInterface.
+// A group's client should implement this interface.
+type PingSourcesGetter interface {
+ PingSources(namespace string) PingSourceInterface
+}
+
+// PingSourceInterface has methods to work with PingSource resources.
+type PingSourceInterface interface {
+ Create(ctx context.Context, pingSource *v1beta2.PingSource, opts v1.CreateOptions) (*v1beta2.PingSource, error)
+ Update(ctx context.Context, pingSource *v1beta2.PingSource, opts v1.UpdateOptions) (*v1beta2.PingSource, error)
+ UpdateStatus(ctx context.Context, pingSource *v1beta2.PingSource, opts v1.UpdateOptions) (*v1beta2.PingSource, error)
+ Delete(ctx context.Context, name string, opts v1.DeleteOptions) error
+ DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error
+ Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta2.PingSource, error)
+ List(ctx context.Context, opts v1.ListOptions) (*v1beta2.PingSourceList, error)
+ Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error)
+ Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PingSource, err error)
+ PingSourceExpansion
+}
+
+// pingSources implements PingSourceInterface
+type pingSources struct {
+ client rest.Interface
+ ns string
+}
+
+// newPingSources returns a PingSources
+func newPingSources(c *SourcesV1beta2Client, namespace string) *pingSources {
+ return &pingSources{
+ client: c.RESTClient(),
+ ns: namespace,
+ }
+}
+
+// Get takes name of the pingSource, and returns the corresponding pingSource object, and an error if there is any.
+func (c *pingSources) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta2.PingSource, err error) {
+ result = &v1beta2.PingSource{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(name).
+ VersionedParams(&options, scheme.ParameterCodec).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// List takes label and field selectors, and returns the list of PingSources that match those selectors.
+func (c *pingSources) List(ctx context.Context, opts v1.ListOptions) (result *v1beta2.PingSourceList, err error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ result = &v1beta2.PingSourceList{}
+ err = c.client.Get().
+ Namespace(c.ns).
+ Resource("pingsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Watch returns a watch.Interface that watches the requested pingSources.
+func (c *pingSources) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) {
+ var timeout time.Duration
+ if opts.TimeoutSeconds != nil {
+ timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
+ }
+ opts.Watch = true
+ return c.client.Get().
+ Namespace(c.ns).
+ Resource("pingsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Watch(ctx)
+}
+
+// Create takes the representation of a pingSource and creates it. Returns the server's representation of the pingSource, and an error, if there is any.
+func (c *pingSources) Create(ctx context.Context, pingSource *v1beta2.PingSource, opts v1.CreateOptions) (result *v1beta2.PingSource, err error) {
+ result = &v1beta2.PingSource{}
+ err = c.client.Post().
+ Namespace(c.ns).
+ Resource("pingsources").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pingSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Update takes the representation of a pingSource and updates it. Returns the server's representation of the pingSource, and an error, if there is any.
+func (c *pingSources) Update(ctx context.Context, pingSource *v1beta2.PingSource, opts v1.UpdateOptions) (result *v1beta2.PingSource, err error) {
+ result = &v1beta2.PingSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(pingSource.Name).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pingSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// UpdateStatus was generated because the type contains a Status member.
+// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
+func (c *pingSources) UpdateStatus(ctx context.Context, pingSource *v1beta2.PingSource, opts v1.UpdateOptions) (result *v1beta2.PingSource, err error) {
+ result = &v1beta2.PingSource{}
+ err = c.client.Put().
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(pingSource.Name).
+ SubResource("status").
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(pingSource).
+ Do(ctx).
+ Into(result)
+ return
+}
+
+// Delete takes name of the pingSource and deletes it. Returns an error if one occurs.
+func (c *pingSources) Delete(ctx context.Context, name string, opts v1.DeleteOptions) error {
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(name).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// DeleteCollection deletes a collection of objects.
+func (c *pingSources) DeleteCollection(ctx context.Context, opts v1.DeleteOptions, listOpts v1.ListOptions) error {
+ var timeout time.Duration
+ if listOpts.TimeoutSeconds != nil {
+ timeout = time.Duration(*listOpts.TimeoutSeconds) * time.Second
+ }
+ return c.client.Delete().
+ Namespace(c.ns).
+ Resource("pingsources").
+ VersionedParams(&listOpts, scheme.ParameterCodec).
+ Timeout(timeout).
+ Body(&opts).
+ Do(ctx).
+ Error()
+}
+
+// Patch applies the patch and returns the patched pingSource.
+func (c *pingSources) Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts v1.PatchOptions, subresources ...string) (result *v1beta2.PingSource, err error) {
+ result = &v1beta2.PingSource{}
+ err = c.client.Patch(pt).
+ Namespace(c.ns).
+ Resource("pingsources").
+ Name(name).
+ SubResource(subresources...).
+ VersionedParams(&opts, scheme.ParameterCodec).
+ Body(data).
+ Do(ctx).
+ Into(result)
+ return
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/sources_client.go b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/sources_client.go
new file mode 100644
index 000000000..d5df4f545
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2/sources_client.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by client-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "net/http"
+
+ rest "k8s.io/client-go/rest"
+ v1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
+ "knative.dev/eventing/pkg/client/clientset/versioned/scheme"
+)
+
+type SourcesV1beta2Interface interface {
+ RESTClient() rest.Interface
+ PingSourcesGetter
+}
+
+// SourcesV1beta2Client is used to interact with features provided by the sources.knative.dev group.
+type SourcesV1beta2Client struct {
+ restClient rest.Interface
+}
+
+func (c *SourcesV1beta2Client) PingSources(namespace string) PingSourceInterface {
+ return newPingSources(c, namespace)
+}
+
+// NewForConfig creates a new SourcesV1beta2Client for the given config.
+// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
+// where httpClient was generated with rest.HTTPClientFor(c).
+func NewForConfig(c *rest.Config) (*SourcesV1beta2Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ httpClient, err := rest.HTTPClientFor(&config)
+ if err != nil {
+ return nil, err
+ }
+ return NewForConfigAndClient(&config, httpClient)
+}
+
+// NewForConfigAndClient creates a new SourcesV1beta2Client for the given config and http client.
+// Note the http client provided takes precedence over the configured transport values.
+func NewForConfigAndClient(c *rest.Config, h *http.Client) (*SourcesV1beta2Client, error) {
+ config := *c
+ if err := setConfigDefaults(&config); err != nil {
+ return nil, err
+ }
+ client, err := rest.RESTClientForConfigAndClient(&config, h)
+ if err != nil {
+ return nil, err
+ }
+ return &SourcesV1beta2Client{client}, nil
+}
+
+// NewForConfigOrDie creates a new SourcesV1beta2Client for the given config and
+// panics if there is an error in the config.
+func NewForConfigOrDie(c *rest.Config) *SourcesV1beta2Client {
+ client, err := NewForConfig(c)
+ if err != nil {
+ panic(err)
+ }
+ return client
+}
+
+// New creates a new SourcesV1beta2Client for the given RESTClient.
+func New(c rest.Interface) *SourcesV1beta2Client {
+ return &SourcesV1beta2Client{c}
+}
+
+func setConfigDefaults(config *rest.Config) error {
+ gv := v1beta2.SchemeGroupVersion
+ config.GroupVersion = &gv
+ config.APIPath = "/apis"
+ config.NegotiatedSerializer = scheme.Codecs.WithoutConversion()
+
+ if config.UserAgent == "" {
+ config.UserAgent = rest.DefaultKubernetesUserAgent()
+ }
+
+ return nil
+}
+
+// RESTClient returns a RESTClient that is used to communicate
+// with API server by this client implementation.
+func (c *SourcesV1beta2Client) RESTClient() rest.Interface {
+ if c == nil {
+ return nil
+ }
+ return c.restClient
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/interface.go
new file mode 100644
index 000000000..ccfc67a42
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/interface.go
@@ -0,0 +1,78 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package eventing
+
+import (
+ v1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1"
+ v1alpha1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ v1beta1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1"
+ v1beta2 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2"
+ v1beta3 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1alpha1 provides access to shared informers for resources in V1alpha1.
+ V1alpha1() v1alpha1.Interface
+ // V1beta1 provides access to shared informers for resources in V1beta1.
+ V1beta1() v1beta1.Interface
+ // V1beta2 provides access to shared informers for resources in V1beta2.
+ V1beta2() v1beta2.Interface
+ // V1beta3 provides access to shared informers for resources in V1beta3.
+ V1beta3() v1beta3.Interface
+ // V1 provides access to shared informers for resources in V1.
+ V1() v1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1alpha1 returns a new v1alpha1.Interface.
+func (g *group) V1alpha1() v1alpha1.Interface {
+ return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
+// V1beta1 returns a new v1beta1.Interface.
+func (g *group) V1beta1() v1beta1.Interface {
+ return v1beta1.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
+// V1beta2 returns a new v1beta2.Interface.
+func (g *group) V1beta2() v1beta2.Interface {
+ return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
+// V1beta3 returns a new v1beta3.Interface.
+func (g *group) V1beta3() v1beta3.Interface {
+ return v1beta3.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
+// V1 returns a new v1.Interface.
+func (g *group) V1() v1.Interface {
+ return v1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/broker.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/broker.go
new file mode 100644
index 000000000..a57851c82
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/broker.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/eventing/v1"
+)
+
+// BrokerInformer provides access to a shared informer and lister for
+// Brokers.
+type BrokerInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.BrokerLister
+}
+
+type brokerInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewBrokerInformer constructs a new informer for Broker type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewBrokerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredBrokerInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredBrokerInformer constructs a new informer for Broker type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredBrokerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1().Brokers(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1().Brokers(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &eventingv1.Broker{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *brokerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredBrokerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *brokerInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventingv1.Broker{}, f.defaultInformer)
+}
+
+func (f *brokerInformer) Lister() v1.BrokerLister {
+ return v1.NewBrokerLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/interface.go
new file mode 100644
index 000000000..e5e26d821
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/interface.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // Brokers returns a BrokerInformer.
+ Brokers() BrokerInformer
+ // Triggers returns a TriggerInformer.
+ Triggers() TriggerInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// Brokers returns a BrokerInformer.
+func (v *version) Brokers() BrokerInformer {
+ return &brokerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// Triggers returns a TriggerInformer.
+func (v *version) Triggers() TriggerInformer {
+ return &triggerInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/trigger.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/trigger.go
new file mode 100644
index 000000000..1faa3422a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1/trigger.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ eventingv1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/eventing/v1"
+)
+
+// TriggerInformer provides access to a shared informer and lister for
+// Triggers.
+type TriggerInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.TriggerLister
+}
+
+type triggerInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewTriggerInformer constructs a new informer for Trigger type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewTriggerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredTriggerInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredTriggerInformer constructs a new informer for Trigger type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredTriggerInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1().Triggers(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1().Triggers(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &eventingv1.Trigger{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *triggerInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredTriggerInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *triggerInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventingv1.Trigger{}, f.defaultInformer)
+}
+
+func (f *triggerInformer) Lister() v1.TriggerLister {
+ return v1.NewTriggerLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/eventpolicy.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/eventpolicy.go
new file mode 100644
index 000000000..df3b47e26
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/eventpolicy.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ eventingv1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "knative.dev/eventing/pkg/client/listers/eventing/v1alpha1"
+)
+
+// EventPolicyInformer provides access to a shared informer and lister for
+// EventPolicies.
+type EventPolicyInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.EventPolicyLister
+}
+
+type eventPolicyInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewEventPolicyInformer constructs a new informer for EventPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewEventPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredEventPolicyInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredEventPolicyInformer constructs a new informer for EventPolicy type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredEventPolicyInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1alpha1().EventPolicies(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1alpha1().EventPolicies(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &eventingv1alpha1.EventPolicy{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *eventPolicyInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredEventPolicyInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *eventPolicyInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventingv1alpha1.EventPolicy{}, f.defaultInformer)
+}
+
+func (f *eventPolicyInformer) Lister() v1alpha1.EventPolicyLister {
+ return v1alpha1.NewEventPolicyLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
new file mode 100644
index 000000000..89263c258
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // EventPolicies returns a EventPolicyInformer.
+ EventPolicies() EventPolicyInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// EventPolicies returns a EventPolicyInformer.
+func (v *version) EventPolicies() EventPolicyInformer {
+ return &eventPolicyInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1/eventtype.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1/eventtype.go
new file mode 100644
index 000000000..920a7005e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1/eventtype.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "context"
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ eventingv1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1beta1 "knative.dev/eventing/pkg/client/listers/eventing/v1beta1"
+)
+
+// EventTypeInformer provides access to a shared informer and lister for
+// EventTypes.
+type EventTypeInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta1.EventTypeLister
+}
+
+type eventTypeInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewEventTypeInformer constructs a new informer for EventType type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewEventTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredEventTypeInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredEventTypeInformer constructs a new informer for EventType type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredEventTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1beta1().EventTypes(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1beta1().EventTypes(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &eventingv1beta1.EventType{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *eventTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredEventTypeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *eventTypeInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventingv1beta1.EventType{}, f.defaultInformer)
+}
+
+func (f *eventTypeInformer) Lister() v1beta1.EventTypeLister {
+ return v1beta1.NewEventTypeLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1/interface.go
new file mode 100644
index 000000000..a9473f823
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // EventTypes returns a EventTypeInformer.
+ EventTypes() EventTypeInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// EventTypes returns a EventTypeInformer.
+func (v *version) EventTypes() EventTypeInformer {
+ return &eventTypeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2/eventtype.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2/eventtype.go
new file mode 100644
index 000000000..9a4c65255
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2/eventtype.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ eventingv1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1beta2 "knative.dev/eventing/pkg/client/listers/eventing/v1beta2"
+)
+
+// EventTypeInformer provides access to a shared informer and lister for
+// EventTypes.
+type EventTypeInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta2.EventTypeLister
+}
+
+type eventTypeInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewEventTypeInformer constructs a new informer for EventType type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewEventTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredEventTypeInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredEventTypeInformer constructs a new informer for EventType type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredEventTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1beta2().EventTypes(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1beta2().EventTypes(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &eventingv1beta2.EventType{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *eventTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredEventTypeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *eventTypeInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventingv1beta2.EventType{}, f.defaultInformer)
+}
+
+func (f *eventTypeInformer) Lister() v1beta2.EventTypeLister {
+ return v1beta2.NewEventTypeLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2/interface.go
new file mode 100644
index 000000000..98f69b73e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // EventTypes returns a EventTypeInformer.
+ EventTypes() EventTypeInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// EventTypes returns a EventTypeInformer.
+func (v *version) EventTypes() EventTypeInformer {
+ return &eventTypeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3/eventtype.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3/eventtype.go
new file mode 100644
index 000000000..46eb0753f
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3/eventtype.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta3
+
+import (
+ "context"
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ eventingv1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1beta3 "knative.dev/eventing/pkg/client/listers/eventing/v1beta3"
+)
+
+// EventTypeInformer provides access to a shared informer and lister for
+// EventTypes.
+type EventTypeInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta3.EventTypeLister
+}
+
+type eventTypeInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewEventTypeInformer constructs a new informer for EventType type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewEventTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredEventTypeInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredEventTypeInformer constructs a new informer for EventType type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredEventTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1beta3().EventTypes(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.EventingV1beta3().EventTypes(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &eventingv1beta3.EventType{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *eventTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredEventTypeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *eventTypeInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&eventingv1beta3.EventType{}, f.defaultInformer)
+}
+
+func (f *eventTypeInformer) Lister() v1beta3.EventTypeLister {
+ return v1beta3.NewEventTypeLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3/interface.go
new file mode 100644
index 000000000..791ed817e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta3
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // EventTypes returns a EventTypeInformer.
+ EventTypes() EventTypeInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// EventTypes returns a EventTypeInformer.
+func (v *version) EventTypes() EventTypeInformer {
+ return &eventTypeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/factory.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/factory.go
new file mode 100644
index 000000000..634f7f6a3
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/factory.go
@@ -0,0 +1,285 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package externalversions
+
+import (
+ reflect "reflect"
+ sync "sync"
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ eventing "knative.dev/eventing/pkg/client/informers/externalversions/eventing"
+ flows "knative.dev/eventing/pkg/client/informers/externalversions/flows"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ messaging "knative.dev/eventing/pkg/client/informers/externalversions/messaging"
+ sinks "knative.dev/eventing/pkg/client/informers/externalversions/sinks"
+ sources "knative.dev/eventing/pkg/client/informers/externalversions/sources"
+)
+
+// SharedInformerOption defines the functional option type for SharedInformerFactory.
+type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
+
+type sharedInformerFactory struct {
+ client versioned.Interface
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ lock sync.Mutex
+ defaultResync time.Duration
+ customResync map[reflect.Type]time.Duration
+ transform cache.TransformFunc
+
+ informers map[reflect.Type]cache.SharedIndexInformer
+ // startedInformers is used for tracking which informers have been started.
+ // This allows Start() to be called multiple times safely.
+ startedInformers map[reflect.Type]bool
+ // wg tracks how many goroutines were started.
+ wg sync.WaitGroup
+ // shuttingDown is true when Shutdown has been called. It may still be running
+ // because it needs to wait for goroutines.
+ shuttingDown bool
+}
+
+// WithCustomResyncConfig sets a custom resync period for the specified informer types.
+func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ for k, v := range resyncConfig {
+ factory.customResync[reflect.TypeOf(k)] = v
+ }
+ return factory
+ }
+}
+
+// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
+func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.tweakListOptions = tweakListOptions
+ return factory
+ }
+}
+
+// WithNamespace limits the SharedInformerFactory to the specified namespace.
+func WithNamespace(namespace string) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.namespace = namespace
+ return factory
+ }
+}
+
+// WithTransform sets a transform on all informers.
+func WithTransform(transform cache.TransformFunc) SharedInformerOption {
+ return func(factory *sharedInformerFactory) *sharedInformerFactory {
+ factory.transform = transform
+ return factory
+ }
+}
+
+// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
+func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
+ return NewSharedInformerFactoryWithOptions(client, defaultResync)
+}
+
+// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
+// Listers obtained via this SharedInformerFactory will be subject to the same filters
+// as specified here.
+// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
+func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
+ return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
+}
+
+// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
+func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
+ factory := &sharedInformerFactory{
+ client: client,
+ namespace: v1.NamespaceAll,
+ defaultResync: defaultResync,
+ informers: make(map[reflect.Type]cache.SharedIndexInformer),
+ startedInformers: make(map[reflect.Type]bool),
+ customResync: make(map[reflect.Type]time.Duration),
+ }
+
+ // Apply all options
+ for _, opt := range options {
+ factory = opt(factory)
+ }
+
+ return factory
+}
+
+func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ if f.shuttingDown {
+ return
+ }
+
+ for informerType, informer := range f.informers {
+ if !f.startedInformers[informerType] {
+ f.wg.Add(1)
+ // We need a new variable in each loop iteration,
+ // otherwise the goroutine would use the loop variable
+ // and that keeps changing.
+ informer := informer
+ go func() {
+ defer f.wg.Done()
+ informer.Run(stopCh)
+ }()
+ f.startedInformers[informerType] = true
+ }
+ }
+}
+
+func (f *sharedInformerFactory) Shutdown() {
+ f.lock.Lock()
+ f.shuttingDown = true
+ f.lock.Unlock()
+
+ // Will return immediately if there is nothing to wait for.
+ f.wg.Wait()
+}
+
+func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
+ informers := func() map[reflect.Type]cache.SharedIndexInformer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ informers := map[reflect.Type]cache.SharedIndexInformer{}
+ for informerType, informer := range f.informers {
+ if f.startedInformers[informerType] {
+ informers[informerType] = informer
+ }
+ }
+ return informers
+ }()
+
+ res := map[reflect.Type]bool{}
+ for informType, informer := range informers {
+ res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
+ }
+ return res
+}
+
+// InformerFor returns the SharedIndexInformer for obj using an internal
+// client.
+func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
+ f.lock.Lock()
+ defer f.lock.Unlock()
+
+ informerType := reflect.TypeOf(obj)
+ informer, exists := f.informers[informerType]
+ if exists {
+ return informer
+ }
+
+ resyncPeriod, exists := f.customResync[informerType]
+ if !exists {
+ resyncPeriod = f.defaultResync
+ }
+
+ informer = newFunc(f.client, resyncPeriod)
+ informer.SetTransform(f.transform)
+ f.informers[informerType] = informer
+
+ return informer
+}
+
+// SharedInformerFactory provides shared informers for resources in all known
+// API group versions.
+//
+// It is typically used like this:
+//
+// ctx, cancel := context.Background()
+// defer cancel()
+// factory := NewSharedInformerFactory(client, resyncPeriod)
+// defer factory.WaitForStop() // Returns immediately if nothing was started.
+// genericInformer := factory.ForResource(resource)
+// typedInformer := factory.SomeAPIGroup().V1().SomeType()
+// factory.Start(ctx.Done()) // Start processing these informers.
+// synced := factory.WaitForCacheSync(ctx.Done())
+// for v, ok := range synced {
+// if !ok {
+// fmt.Fprintf(os.Stderr, "caches failed to sync: %v", v)
+// return
+// }
+// }
+//
+// // Creating informers can also be created after Start, but then
+// // Start must be called again:
+// anotherGenericInformer := factory.ForResource(resource)
+// factory.Start(ctx.Done())
+type SharedInformerFactory interface {
+ internalinterfaces.SharedInformerFactory
+
+ // Start initializes all requested informers. They are handled in goroutines
+ // which run until the stop channel gets closed.
+ Start(stopCh <-chan struct{})
+
+ // Shutdown marks a factory as shutting down. At that point no new
+ // informers can be started anymore and Start will return without
+ // doing anything.
+ //
+ // In addition, Shutdown blocks until all goroutines have terminated. For that
+ // to happen, the close channel(s) that they were started with must be closed,
+ // either before Shutdown gets called or while it is waiting.
+ //
+ // Shutdown may be called multiple times, even concurrently. All such calls will
+ // block until all goroutines have terminated.
+ Shutdown()
+
+ // WaitForCacheSync blocks until all started informers' caches were synced
+ // or the stop channel gets closed.
+ WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
+
+ // ForResource gives generic access to a shared informer of the matching type.
+ ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
+
+ // InformerFor returns the SharedIndexInformer for obj using an internal
+ // client.
+ InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer
+
+ Eventing() eventing.Interface
+ Flows() flows.Interface
+ Messaging() messaging.Interface
+ Sinks() sinks.Interface
+ Sources() sources.Interface
+}
+
+func (f *sharedInformerFactory) Eventing() eventing.Interface {
+ return eventing.New(f, f.namespace, f.tweakListOptions)
+}
+
+func (f *sharedInformerFactory) Flows() flows.Interface {
+ return flows.New(f, f.namespace, f.tweakListOptions)
+}
+
+func (f *sharedInformerFactory) Messaging() messaging.Interface {
+ return messaging.New(f, f.namespace, f.tweakListOptions)
+}
+
+func (f *sharedInformerFactory) Sinks() sinks.Interface {
+ return sinks.New(f, f.namespace, f.tweakListOptions)
+}
+
+func (f *sharedInformerFactory) Sources() sources.Interface {
+ return sources.New(f, f.namespace, f.tweakListOptions)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/interface.go
new file mode 100644
index 000000000..99a7163ed
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/interface.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package flows
+
+import (
+ v1 "knative.dev/eventing/pkg/client/informers/externalversions/flows/v1"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1 provides access to shared informers for resources in V1.
+ V1() v1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1 returns a new v1.Interface.
+func (g *group) V1() v1.Interface {
+ return v1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/interface.go
new file mode 100644
index 000000000..520cd36c6
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/interface.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // Parallels returns a ParallelInformer.
+ Parallels() ParallelInformer
+ // Sequences returns a SequenceInformer.
+ Sequences() SequenceInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// Parallels returns a ParallelInformer.
+func (v *version) Parallels() ParallelInformer {
+ return ¶llelInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// Sequences returns a SequenceInformer.
+func (v *version) Sequences() SequenceInformer {
+ return &sequenceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/parallel.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/parallel.go
new file mode 100644
index 000000000..886563397
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/parallel.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ flowsv1 "knative.dev/eventing/pkg/apis/flows/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/flows/v1"
+)
+
+// ParallelInformer provides access to a shared informer and lister for
+// Parallels.
+type ParallelInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ParallelLister
+}
+
+type parallelInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewParallelInformer constructs a new informer for Parallel type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewParallelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredParallelInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredParallelInformer constructs a new informer for Parallel type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredParallelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.FlowsV1().Parallels(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.FlowsV1().Parallels(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &flowsv1.Parallel{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *parallelInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredParallelInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *parallelInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&flowsv1.Parallel{}, f.defaultInformer)
+}
+
+func (f *parallelInformer) Lister() v1.ParallelLister {
+ return v1.NewParallelLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/sequence.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/sequence.go
new file mode 100644
index 000000000..1275dda3b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/flows/v1/sequence.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ flowsv1 "knative.dev/eventing/pkg/apis/flows/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/flows/v1"
+)
+
+// SequenceInformer provides access to a shared informer and lister for
+// Sequences.
+type SequenceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.SequenceLister
+}
+
+type sequenceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewSequenceInformer constructs a new informer for Sequence type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewSequenceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredSequenceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredSequenceInformer constructs a new informer for Sequence type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredSequenceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.FlowsV1().Sequences(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.FlowsV1().Sequences(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &flowsv1.Sequence{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *sequenceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredSequenceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *sequenceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&flowsv1.Sequence{}, f.defaultInformer)
+}
+
+func (f *sequenceInformer) Lister() v1.SequenceLister {
+ return v1.NewSequenceLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/generic.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/generic.go
new file mode 100644
index 000000000..41c490e06
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/generic.go
@@ -0,0 +1,121 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package externalversions
+
+import (
+ "fmt"
+
+ schema "k8s.io/apimachinery/pkg/runtime/schema"
+ cache "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/eventing/v1"
+ v1alpha1 "knative.dev/eventing/pkg/apis/eventing/v1alpha1"
+ v1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
+ v1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
+ v1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
+ flowsv1 "knative.dev/eventing/pkg/apis/flows/v1"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
+ sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+ sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
+)
+
+// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
+// sharedInformers based on type
+type GenericInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() cache.GenericLister
+}
+
+type genericInformer struct {
+ informer cache.SharedIndexInformer
+ resource schema.GroupResource
+}
+
+// Informer returns the SharedIndexInformer.
+func (f *genericInformer) Informer() cache.SharedIndexInformer {
+ return f.informer
+}
+
+// Lister returns the GenericLister.
+func (f *genericInformer) Lister() cache.GenericLister {
+ return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
+}
+
+// ForResource gives generic access to a shared informer of the matching type
+// TODO extend this to unknown resources with a client pool
+func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
+ switch resource {
+ // Group=eventing.knative.dev, Version=v1
+ case v1.SchemeGroupVersion.WithResource("brokers"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1().Brokers().Informer()}, nil
+ case v1.SchemeGroupVersion.WithResource("triggers"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1().Triggers().Informer()}, nil
+
+ // Group=eventing.knative.dev, Version=v1alpha1
+ case v1alpha1.SchemeGroupVersion.WithResource("eventpolicies"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1alpha1().EventPolicies().Informer()}, nil
+
+ // Group=eventing.knative.dev, Version=v1beta1
+ case v1beta1.SchemeGroupVersion.WithResource("eventtypes"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1beta1().EventTypes().Informer()}, nil
+
+ // Group=eventing.knative.dev, Version=v1beta2
+ case v1beta2.SchemeGroupVersion.WithResource("eventtypes"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1beta2().EventTypes().Informer()}, nil
+
+ // Group=eventing.knative.dev, Version=v1beta3
+ case v1beta3.SchemeGroupVersion.WithResource("eventtypes"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Eventing().V1beta3().EventTypes().Informer()}, nil
+
+ // Group=flows.knative.dev, Version=v1
+ case flowsv1.SchemeGroupVersion.WithResource("parallels"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Flows().V1().Parallels().Informer()}, nil
+ case flowsv1.SchemeGroupVersion.WithResource("sequences"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Flows().V1().Sequences().Informer()}, nil
+
+ // Group=messaging.knative.dev, Version=v1
+ case messagingv1.SchemeGroupVersion.WithResource("channels"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1().Channels().Informer()}, nil
+ case messagingv1.SchemeGroupVersion.WithResource("inmemorychannels"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1().InMemoryChannels().Informer()}, nil
+ case messagingv1.SchemeGroupVersion.WithResource("subscriptions"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Messaging().V1().Subscriptions().Informer()}, nil
+
+ // Group=sinks.knative.dev, Version=v1alpha1
+ case sinksv1alpha1.SchemeGroupVersion.WithResource("jobsinks"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sinks().V1alpha1().JobSinks().Informer()}, nil
+
+ // Group=sources.knative.dev, Version=v1
+ case sourcesv1.SchemeGroupVersion.WithResource("apiserversources"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1().ApiServerSources().Informer()}, nil
+ case sourcesv1.SchemeGroupVersion.WithResource("containersources"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1().ContainerSources().Informer()}, nil
+ case sourcesv1.SchemeGroupVersion.WithResource("pingsources"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1().PingSources().Informer()}, nil
+ case sourcesv1.SchemeGroupVersion.WithResource("sinkbindings"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1().SinkBindings().Informer()}, nil
+
+ // Group=sources.knative.dev, Version=v1beta2
+ case sourcesv1beta2.SchemeGroupVersion.WithResource("pingsources"):
+ return &genericInformer{resource: resource.GroupResource(), informer: f.Sources().V1beta2().PingSources().Informer()}, nil
+
+ }
+
+ return nil, fmt.Errorf("no informer found for %v", resource)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go
new file mode 100644
index 000000000..e38af6306
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces/factory_interfaces.go
@@ -0,0 +1,40 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package internalinterfaces
+
+import (
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ cache "k8s.io/client-go/tools/cache"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+)
+
+// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
+type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
+
+// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
+type SharedInformerFactory interface {
+ Start(stopCh <-chan struct{})
+ InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
+}
+
+// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
+type TweakListOptionsFunc func(*v1.ListOptions)
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/interface.go
new file mode 100644
index 000000000..8a38f95bf
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/interface.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package messaging
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1 provides access to shared informers for resources in V1.
+ V1() v1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1 returns a new v1.Interface.
+func (g *group) V1() v1.Interface {
+ return v1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/channel.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/channel.go
new file mode 100644
index 000000000..78daf2848
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/channel.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/messaging/v1"
+)
+
+// ChannelInformer provides access to a shared informer and lister for
+// Channels.
+type ChannelInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ChannelLister
+}
+
+type channelInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewChannelInformer constructs a new informer for Channel type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredChannelInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredChannelInformer constructs a new informer for Channel type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.MessagingV1().Channels(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.MessagingV1().Channels(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &messagingv1.Channel{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *channelInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredChannelInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *channelInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&messagingv1.Channel{}, f.defaultInformer)
+}
+
+func (f *channelInformer) Lister() v1.ChannelLister {
+ return v1.NewChannelLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/inmemorychannel.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/inmemorychannel.go
new file mode 100644
index 000000000..2d19c29ef
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/inmemorychannel.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/messaging/v1"
+)
+
+// InMemoryChannelInformer provides access to a shared informer and lister for
+// InMemoryChannels.
+type InMemoryChannelInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.InMemoryChannelLister
+}
+
+type inMemoryChannelInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewInMemoryChannelInformer constructs a new informer for InMemoryChannel type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewInMemoryChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredInMemoryChannelInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredInMemoryChannelInformer constructs a new informer for InMemoryChannel type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredInMemoryChannelInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.MessagingV1().InMemoryChannels(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.MessagingV1().InMemoryChannels(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &messagingv1.InMemoryChannel{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *inMemoryChannelInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredInMemoryChannelInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *inMemoryChannelInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&messagingv1.InMemoryChannel{}, f.defaultInformer)
+}
+
+func (f *inMemoryChannelInformer) Lister() v1.InMemoryChannelLister {
+ return v1.NewInMemoryChannelLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/interface.go
new file mode 100644
index 000000000..2165d9cc6
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/interface.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // Channels returns a ChannelInformer.
+ Channels() ChannelInformer
+ // InMemoryChannels returns a InMemoryChannelInformer.
+ InMemoryChannels() InMemoryChannelInformer
+ // Subscriptions returns a SubscriptionInformer.
+ Subscriptions() SubscriptionInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// Channels returns a ChannelInformer.
+func (v *version) Channels() ChannelInformer {
+ return &channelInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// InMemoryChannels returns a InMemoryChannelInformer.
+func (v *version) InMemoryChannels() InMemoryChannelInformer {
+ return &inMemoryChannelInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// Subscriptions returns a SubscriptionInformer.
+func (v *version) Subscriptions() SubscriptionInformer {
+ return &subscriptionInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/subscription.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/subscription.go
new file mode 100644
index 000000000..bedff1663
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1/subscription.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ messagingv1 "knative.dev/eventing/pkg/apis/messaging/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/messaging/v1"
+)
+
+// SubscriptionInformer provides access to a shared informer and lister for
+// Subscriptions.
+type SubscriptionInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.SubscriptionLister
+}
+
+type subscriptionInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewSubscriptionInformer constructs a new informer for Subscription type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewSubscriptionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredSubscriptionInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredSubscriptionInformer constructs a new informer for Subscription type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredSubscriptionInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.MessagingV1().Subscriptions(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.MessagingV1().Subscriptions(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &messagingv1.Subscription{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *subscriptionInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredSubscriptionInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *subscriptionInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&messagingv1.Subscription{}, f.defaultInformer)
+}
+
+func (f *subscriptionInformer) Lister() v1.SubscriptionLister {
+ return v1.NewSubscriptionLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/interface.go
new file mode 100644
index 000000000..f09f6846b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/interface.go
@@ -0,0 +1,46 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package sinks
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1alpha1 provides access to shared informers for resources in V1alpha1.
+ V1alpha1() v1alpha1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1alpha1 returns a new v1alpha1.Interface.
+func (g *group) V1alpha1() v1alpha1.Interface {
+ return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/interface.go
new file mode 100644
index 000000000..68ed173ac
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // JobSinks returns a JobSinkInformer.
+ JobSinks() JobSinkInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// JobSinks returns a JobSinkInformer.
+func (v *version) JobSinks() JobSinkInformer {
+ return &jobSinkInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/jobsink.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/jobsink.go
new file mode 100644
index 000000000..5278c858b
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1/jobsink.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "context"
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ sinksv1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1alpha1 "knative.dev/eventing/pkg/client/listers/sinks/v1alpha1"
+)
+
+// JobSinkInformer provides access to a shared informer and lister for
+// JobSinks.
+type JobSinkInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1alpha1.JobSinkLister
+}
+
+type jobSinkInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewJobSinkInformer constructs a new informer for JobSink type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewJobSinkInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredJobSinkInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredJobSinkInformer constructs a new informer for JobSink type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredJobSinkInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SinksV1alpha1().JobSinks(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SinksV1alpha1().JobSinks(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &sinksv1alpha1.JobSink{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *jobSinkInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredJobSinkInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *jobSinkInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sinksv1alpha1.JobSink{}, f.defaultInformer)
+}
+
+func (f *jobSinkInformer) Lister() v1alpha1.JobSinkLister {
+ return v1alpha1.NewJobSinkLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go
new file mode 100644
index 000000000..eb406b621
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/interface.go
@@ -0,0 +1,54 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package sources
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/informers/externalversions/sources/v1"
+ v1beta2 "knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2"
+)
+
+// Interface provides access to each of this group's versions.
+type Interface interface {
+ // V1beta2 provides access to shared informers for resources in V1beta2.
+ V1beta2() v1beta2.Interface
+ // V1 provides access to shared informers for resources in V1.
+ V1() v1.Interface
+}
+
+type group struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// V1beta2 returns a new v1beta2.Interface.
+func (g *group) V1beta2() v1beta2.Interface {
+ return v1beta2.New(g.factory, g.namespace, g.tweakListOptions)
+}
+
+// V1 returns a new v1.Interface.
+func (g *group) V1() v1.Interface {
+ return v1.New(g.factory, g.namespace, g.tweakListOptions)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/apiserversource.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/apiserversource.go
new file mode 100644
index 000000000..501c2cdf6
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/apiserversource.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/sources/v1"
+)
+
+// ApiServerSourceInformer provides access to a shared informer and lister for
+// ApiServerSources.
+type ApiServerSourceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ApiServerSourceLister
+}
+
+type apiServerSourceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewApiServerSourceInformer constructs a new informer for ApiServerSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewApiServerSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredApiServerSourceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredApiServerSourceInformer constructs a new informer for ApiServerSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredApiServerSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1().ApiServerSources(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1().ApiServerSources(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &sourcesv1.ApiServerSource{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *apiServerSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredApiServerSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *apiServerSourceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sourcesv1.ApiServerSource{}, f.defaultInformer)
+}
+
+func (f *apiServerSourceInformer) Lister() v1.ApiServerSourceLister {
+ return v1.NewApiServerSourceLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/containersource.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/containersource.go
new file mode 100644
index 000000000..67de322d6
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/containersource.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/sources/v1"
+)
+
+// ContainerSourceInformer provides access to a shared informer and lister for
+// ContainerSources.
+type ContainerSourceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.ContainerSourceLister
+}
+
+type containerSourceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewContainerSourceInformer constructs a new informer for ContainerSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewContainerSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredContainerSourceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredContainerSourceInformer constructs a new informer for ContainerSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredContainerSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1().ContainerSources(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1().ContainerSources(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &sourcesv1.ContainerSource{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *containerSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredContainerSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *containerSourceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sourcesv1.ContainerSource{}, f.defaultInformer)
+}
+
+func (f *containerSourceInformer) Lister() v1.ContainerSourceLister {
+ return v1.NewContainerSourceLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/interface.go
new file mode 100644
index 000000000..f5fd4ee59
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/interface.go
@@ -0,0 +1,66 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // ApiServerSources returns a ApiServerSourceInformer.
+ ApiServerSources() ApiServerSourceInformer
+ // ContainerSources returns a ContainerSourceInformer.
+ ContainerSources() ContainerSourceInformer
+ // PingSources returns a PingSourceInformer.
+ PingSources() PingSourceInformer
+ // SinkBindings returns a SinkBindingInformer.
+ SinkBindings() SinkBindingInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// ApiServerSources returns a ApiServerSourceInformer.
+func (v *version) ApiServerSources() ApiServerSourceInformer {
+ return &apiServerSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// ContainerSources returns a ContainerSourceInformer.
+func (v *version) ContainerSources() ContainerSourceInformer {
+ return &containerSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// PingSources returns a PingSourceInformer.
+func (v *version) PingSources() PingSourceInformer {
+ return &pingSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
+
+// SinkBindings returns a SinkBindingInformer.
+func (v *version) SinkBindings() SinkBindingInformer {
+ return &sinkBindingInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/pingsource.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/pingsource.go
new file mode 100644
index 000000000..f4a274ad1
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/pingsource.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/sources/v1"
+)
+
+// PingSourceInformer provides access to a shared informer and lister for
+// PingSources.
+type PingSourceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.PingSourceLister
+}
+
+type pingSourceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewPingSourceInformer constructs a new informer for PingSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewPingSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredPingSourceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredPingSourceInformer constructs a new informer for PingSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredPingSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1().PingSources(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1().PingSources(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &sourcesv1.PingSource{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *pingSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredPingSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *pingSourceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sourcesv1.PingSource{}, f.defaultInformer)
+}
+
+func (f *pingSourceInformer) Lister() v1.PingSourceLister {
+ return v1.NewPingSourceLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/sinkbinding.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/sinkbinding.go
new file mode 100644
index 000000000..035805262
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1/sinkbinding.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "context"
+ time "time"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ sourcesv1 "knative.dev/eventing/pkg/apis/sources/v1"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1 "knative.dev/eventing/pkg/client/listers/sources/v1"
+)
+
+// SinkBindingInformer provides access to a shared informer and lister for
+// SinkBindings.
+type SinkBindingInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1.SinkBindingLister
+}
+
+type sinkBindingInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewSinkBindingInformer constructs a new informer for SinkBinding type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewSinkBindingInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredSinkBindingInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredSinkBindingInformer constructs a new informer for SinkBinding type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredSinkBindingInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1().SinkBindings(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1().SinkBindings(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &sourcesv1.SinkBinding{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *sinkBindingInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredSinkBindingInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *sinkBindingInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sourcesv1.SinkBinding{}, f.defaultInformer)
+}
+
+func (f *sinkBindingInformer) Lister() v1.SinkBindingLister {
+ return v1.NewSinkBindingLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2/interface.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2/interface.go
new file mode 100644
index 000000000..14c34076e
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2/interface.go
@@ -0,0 +1,45 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+)
+
+// Interface provides access to all the informers in this group version.
+type Interface interface {
+ // PingSources returns a PingSourceInformer.
+ PingSources() PingSourceInformer
+}
+
+type version struct {
+ factory internalinterfaces.SharedInformerFactory
+ namespace string
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+}
+
+// New returns a new Interface.
+func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
+ return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
+}
+
+// PingSources returns a PingSourceInformer.
+func (v *version) PingSources() PingSourceInformer {
+ return &pingSourceInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2/pingsource.go b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2/pingsource.go
new file mode 100644
index 000000000..41a264784
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2/pingsource.go
@@ -0,0 +1,90 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by informer-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "context"
+ time "time"
+
+ v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ runtime "k8s.io/apimachinery/pkg/runtime"
+ watch "k8s.io/apimachinery/pkg/watch"
+ cache "k8s.io/client-go/tools/cache"
+ sourcesv1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ internalinterfaces "knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces"
+ v1beta2 "knative.dev/eventing/pkg/client/listers/sources/v1beta2"
+)
+
+// PingSourceInformer provides access to a shared informer and lister for
+// PingSources.
+type PingSourceInformer interface {
+ Informer() cache.SharedIndexInformer
+ Lister() v1beta2.PingSourceLister
+}
+
+type pingSourceInformer struct {
+ factory internalinterfaces.SharedInformerFactory
+ tweakListOptions internalinterfaces.TweakListOptionsFunc
+ namespace string
+}
+
+// NewPingSourceInformer constructs a new informer for PingSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewPingSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
+ return NewFilteredPingSourceInformer(client, namespace, resyncPeriod, indexers, nil)
+}
+
+// NewFilteredPingSourceInformer constructs a new informer for PingSource type.
+// Always prefer using an informer factory to get a shared informer instead of getting an independent
+// one. This reduces memory footprint and number of connections to the server.
+func NewFilteredPingSourceInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
+ return cache.NewSharedIndexInformer(
+ &cache.ListWatch{
+ ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1beta2().PingSources(namespace).List(context.TODO(), options)
+ },
+ WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
+ if tweakListOptions != nil {
+ tweakListOptions(&options)
+ }
+ return client.SourcesV1beta2().PingSources(namespace).Watch(context.TODO(), options)
+ },
+ },
+ &sourcesv1beta2.PingSource{},
+ resyncPeriod,
+ indexers,
+ )
+}
+
+func (f *pingSourceInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
+ return NewFilteredPingSourceInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
+}
+
+func (f *pingSourceInformer) Informer() cache.SharedIndexInformer {
+ return f.factory.InformerFor(&sourcesv1beta2.PingSource{}, f.defaultInformer)
+}
+
+func (f *pingSourceInformer) Lister() v1beta2.PingSourceLister {
+ return v1beta2.NewPingSourceLister(f.Informer().GetIndexer())
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/injection/client/client.go b/vendor/knative.dev/eventing/pkg/client/injection/client/client.go
new file mode 100644
index 000000000..8637eb0f4
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/injection/client/client.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package client
+
+import (
+ context "context"
+
+ rest "k8s.io/client-go/rest"
+ versioned "knative.dev/eventing/pkg/client/clientset/versioned"
+ injection "knative.dev/pkg/injection"
+ logging "knative.dev/pkg/logging"
+)
+
+func init() {
+ injection.Default.RegisterClient(withClientFromConfig)
+ injection.Default.RegisterClientFetcher(func(ctx context.Context) interface{} {
+ return Get(ctx)
+ })
+}
+
+// Key is used as the key for associating information with a context.Context.
+type Key struct{}
+
+func withClientFromConfig(ctx context.Context, cfg *rest.Config) context.Context {
+ return context.WithValue(ctx, Key{}, versioned.NewForConfigOrDie(cfg))
+}
+
+// Get extracts the versioned.Interface client from the context.
+func Get(ctx context.Context) versioned.Interface {
+ untyped := ctx.Value(Key{})
+ if untyped == nil {
+ if injection.GetConfig(ctx) == nil {
+ logging.FromContext(ctx).Panic(
+ "Unable to fetch knative.dev/eventing/pkg/client/clientset/versioned.Interface from context. This context is not the application context (which is typically given to constructors via sharedmain).")
+ } else {
+ logging.FromContext(ctx).Panic(
+ "Unable to fetch knative.dev/eventing/pkg/client/clientset/versioned.Interface from context.")
+ }
+ }
+ return untyped.(versioned.Interface)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/eventpolicy.go b/vendor/knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/eventpolicy.go
new file mode 100644
index 000000000..c6da95f0a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy/eventpolicy.go
@@ -0,0 +1,52 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package eventpolicy
+
+import (
+ context "context"
+
+ v1alpha1 "knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1"
+ factory "knative.dev/eventing/pkg/client/injection/informers/factory"
+ controller "knative.dev/pkg/controller"
+ injection "knative.dev/pkg/injection"
+ logging "knative.dev/pkg/logging"
+)
+
+func init() {
+ injection.Default.RegisterInformer(withInformer)
+}
+
+// Key is used for associating the Informer inside the context.Context.
+type Key struct{}
+
+func withInformer(ctx context.Context) (context.Context, controller.Informer) {
+ f := factory.Get(ctx)
+ inf := f.Eventing().V1alpha1().EventPolicies()
+ return context.WithValue(ctx, Key{}, inf), inf.Informer()
+}
+
+// Get extracts the typed informer from the context.
+func Get(ctx context.Context) v1alpha1.EventPolicyInformer {
+ untyped := ctx.Value(Key{})
+ if untyped == nil {
+ logging.FromContext(ctx).Panic(
+ "Unable to fetch knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1.EventPolicyInformer from context.")
+ }
+ return untyped.(v1alpha1.EventPolicyInformer)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/injection/informers/factory/factory.go b/vendor/knative.dev/eventing/pkg/client/injection/informers/factory/factory.go
new file mode 100644
index 000000000..66413ebac
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/injection/informers/factory/factory.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by injection-gen. DO NOT EDIT.
+
+package factory
+
+import (
+ context "context"
+
+ externalversions "knative.dev/eventing/pkg/client/informers/externalversions"
+ client "knative.dev/eventing/pkg/client/injection/client"
+ controller "knative.dev/pkg/controller"
+ injection "knative.dev/pkg/injection"
+ logging "knative.dev/pkg/logging"
+)
+
+func init() {
+ injection.Default.RegisterInformerFactory(withInformerFactory)
+}
+
+// Key is used as the key for associating information with a context.Context.
+type Key struct{}
+
+func withInformerFactory(ctx context.Context) context.Context {
+ c := client.Get(ctx)
+ opts := make([]externalversions.SharedInformerOption, 0, 1)
+ if injection.HasNamespaceScope(ctx) {
+ opts = append(opts, externalversions.WithNamespace(injection.GetNamespaceScope(ctx)))
+ }
+ return context.WithValue(ctx, Key{},
+ externalversions.NewSharedInformerFactoryWithOptions(c, controller.GetResyncPeriod(ctx), opts...))
+}
+
+// Get extracts the InformerFactory from the context.
+func Get(ctx context.Context) externalversions.SharedInformerFactory {
+ untyped := ctx.Value(Key{})
+ if untyped == nil {
+ logging.FromContext(ctx).Panic(
+ "Unable to fetch knative.dev/eventing/pkg/client/informers/externalversions.SharedInformerFactory from context.")
+ }
+ return untyped.(externalversions.SharedInformerFactory)
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/broker.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/broker.go
new file mode 100644
index 000000000..a34e9a01f
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/broker.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/eventing/v1"
+)
+
+// BrokerLister helps list Brokers.
+// All objects returned here must be treated as read-only.
+type BrokerLister interface {
+ // List lists all Brokers in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Broker, err error)
+ // Brokers returns an object that can list and get Brokers.
+ Brokers(namespace string) BrokerNamespaceLister
+ BrokerListerExpansion
+}
+
+// brokerLister implements the BrokerLister interface.
+type brokerLister struct {
+ indexer cache.Indexer
+}
+
+// NewBrokerLister returns a new BrokerLister.
+func NewBrokerLister(indexer cache.Indexer) BrokerLister {
+ return &brokerLister{indexer: indexer}
+}
+
+// List lists all Brokers in the indexer.
+func (s *brokerLister) List(selector labels.Selector) (ret []*v1.Broker, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Broker))
+ })
+ return ret, err
+}
+
+// Brokers returns an object that can list and get Brokers.
+func (s *brokerLister) Brokers(namespace string) BrokerNamespaceLister {
+ return brokerNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// BrokerNamespaceLister helps list and get Brokers.
+// All objects returned here must be treated as read-only.
+type BrokerNamespaceLister interface {
+ // List lists all Brokers in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Broker, err error)
+ // Get retrieves the Broker from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Broker, error)
+ BrokerNamespaceListerExpansion
+}
+
+// brokerNamespaceLister implements the BrokerNamespaceLister
+// interface.
+type brokerNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Brokers in the indexer for a given namespace.
+func (s brokerNamespaceLister) List(selector labels.Selector) (ret []*v1.Broker, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Broker))
+ })
+ return ret, err
+}
+
+// Get retrieves the Broker from the indexer for a given namespace and name.
+func (s brokerNamespaceLister) Get(name string) (*v1.Broker, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("broker"), name)
+ }
+ return obj.(*v1.Broker), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/expansion_generated.go
new file mode 100644
index 000000000..249909354
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/expansion_generated.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+// BrokerListerExpansion allows custom methods to be added to
+// BrokerLister.
+type BrokerListerExpansion interface{}
+
+// BrokerNamespaceListerExpansion allows custom methods to be added to
+// BrokerNamespaceLister.
+type BrokerNamespaceListerExpansion interface{}
+
+// TriggerListerExpansion allows custom methods to be added to
+// TriggerLister.
+type TriggerListerExpansion interface{}
+
+// TriggerNamespaceListerExpansion allows custom methods to be added to
+// TriggerNamespaceLister.
+type TriggerNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/trigger.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/trigger.go
new file mode 100644
index 000000000..28aef21d2
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1/trigger.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/eventing/v1"
+)
+
+// TriggerLister helps list Triggers.
+// All objects returned here must be treated as read-only.
+type TriggerLister interface {
+ // List lists all Triggers in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Trigger, err error)
+ // Triggers returns an object that can list and get Triggers.
+ Triggers(namespace string) TriggerNamespaceLister
+ TriggerListerExpansion
+}
+
+// triggerLister implements the TriggerLister interface.
+type triggerLister struct {
+ indexer cache.Indexer
+}
+
+// NewTriggerLister returns a new TriggerLister.
+func NewTriggerLister(indexer cache.Indexer) TriggerLister {
+ return &triggerLister{indexer: indexer}
+}
+
+// List lists all Triggers in the indexer.
+func (s *triggerLister) List(selector labels.Selector) (ret []*v1.Trigger, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Trigger))
+ })
+ return ret, err
+}
+
+// Triggers returns an object that can list and get Triggers.
+func (s *triggerLister) Triggers(namespace string) TriggerNamespaceLister {
+ return triggerNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// TriggerNamespaceLister helps list and get Triggers.
+// All objects returned here must be treated as read-only.
+type TriggerNamespaceLister interface {
+ // List lists all Triggers in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Trigger, err error)
+ // Get retrieves the Trigger from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Trigger, error)
+ TriggerNamespaceListerExpansion
+}
+
+// triggerNamespaceLister implements the TriggerNamespaceLister
+// interface.
+type triggerNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Triggers in the indexer for a given namespace.
+func (s triggerNamespaceLister) List(selector labels.Selector) (ret []*v1.Trigger, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Trigger))
+ })
+ return ret, err
+}
+
+// Get retrieves the Trigger from the indexer for a given namespace and name.
+func (s triggerNamespaceLister) Get(name string) (*v1.Trigger, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("trigger"), name)
+ }
+ return obj.(*v1.Trigger), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta1/eventtype.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta1/eventtype.go
new file mode 100644
index 000000000..3dbf79b88
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta1/eventtype.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1beta1 "knative.dev/eventing/pkg/apis/eventing/v1beta1"
+)
+
+// EventTypeLister helps list EventTypes.
+// All objects returned here must be treated as read-only.
+type EventTypeLister interface {
+ // List lists all EventTypes in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.EventType, err error)
+ // EventTypes returns an object that can list and get EventTypes.
+ EventTypes(namespace string) EventTypeNamespaceLister
+ EventTypeListerExpansion
+}
+
+// eventTypeLister implements the EventTypeLister interface.
+type eventTypeLister struct {
+ indexer cache.Indexer
+}
+
+// NewEventTypeLister returns a new EventTypeLister.
+func NewEventTypeLister(indexer cache.Indexer) EventTypeLister {
+ return &eventTypeLister{indexer: indexer}
+}
+
+// List lists all EventTypes in the indexer.
+func (s *eventTypeLister) List(selector labels.Selector) (ret []*v1beta1.EventType, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.EventType))
+ })
+ return ret, err
+}
+
+// EventTypes returns an object that can list and get EventTypes.
+func (s *eventTypeLister) EventTypes(namespace string) EventTypeNamespaceLister {
+ return eventTypeNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// EventTypeNamespaceLister helps list and get EventTypes.
+// All objects returned here must be treated as read-only.
+type EventTypeNamespaceLister interface {
+ // List lists all EventTypes in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta1.EventType, err error)
+ // Get retrieves the EventType from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta1.EventType, error)
+ EventTypeNamespaceListerExpansion
+}
+
+// eventTypeNamespaceLister implements the EventTypeNamespaceLister
+// interface.
+type eventTypeNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all EventTypes in the indexer for a given namespace.
+func (s eventTypeNamespaceLister) List(selector labels.Selector) (ret []*v1beta1.EventType, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta1.EventType))
+ })
+ return ret, err
+}
+
+// Get retrieves the EventType from the indexer for a given namespace and name.
+func (s eventTypeNamespaceLister) Get(name string) (*v1beta1.EventType, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta1.Resource("eventtype"), name)
+ }
+ return obj.(*v1beta1.EventType), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta1/expansion_generated.go
new file mode 100644
index 000000000..8048b1f4c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta1/expansion_generated.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta1
+
+// EventTypeListerExpansion allows custom methods to be added to
+// EventTypeLister.
+type EventTypeListerExpansion interface{}
+
+// EventTypeNamespaceListerExpansion allows custom methods to be added to
+// EventTypeNamespaceLister.
+type EventTypeNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta2/eventtype.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta2/eventtype.go
new file mode 100644
index 000000000..4f84cd924
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta2/eventtype.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1beta2 "knative.dev/eventing/pkg/apis/eventing/v1beta2"
+)
+
+// EventTypeLister helps list EventTypes.
+// All objects returned here must be treated as read-only.
+type EventTypeLister interface {
+ // List lists all EventTypes in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta2.EventType, err error)
+ // EventTypes returns an object that can list and get EventTypes.
+ EventTypes(namespace string) EventTypeNamespaceLister
+ EventTypeListerExpansion
+}
+
+// eventTypeLister implements the EventTypeLister interface.
+type eventTypeLister struct {
+ indexer cache.Indexer
+}
+
+// NewEventTypeLister returns a new EventTypeLister.
+func NewEventTypeLister(indexer cache.Indexer) EventTypeLister {
+ return &eventTypeLister{indexer: indexer}
+}
+
+// List lists all EventTypes in the indexer.
+func (s *eventTypeLister) List(selector labels.Selector) (ret []*v1beta2.EventType, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta2.EventType))
+ })
+ return ret, err
+}
+
+// EventTypes returns an object that can list and get EventTypes.
+func (s *eventTypeLister) EventTypes(namespace string) EventTypeNamespaceLister {
+ return eventTypeNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// EventTypeNamespaceLister helps list and get EventTypes.
+// All objects returned here must be treated as read-only.
+type EventTypeNamespaceLister interface {
+ // List lists all EventTypes in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta2.EventType, err error)
+ // Get retrieves the EventType from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta2.EventType, error)
+ EventTypeNamespaceListerExpansion
+}
+
+// eventTypeNamespaceLister implements the EventTypeNamespaceLister
+// interface.
+type eventTypeNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all EventTypes in the indexer for a given namespace.
+func (s eventTypeNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.EventType, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta2.EventType))
+ })
+ return ret, err
+}
+
+// Get retrieves the EventType from the indexer for a given namespace and name.
+func (s eventTypeNamespaceLister) Get(name string) (*v1beta2.EventType, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta2.Resource("eventtype"), name)
+ }
+ return obj.(*v1beta2.EventType), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta2/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta2/expansion_generated.go
new file mode 100644
index 000000000..4363c0140
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta2/expansion_generated.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta2
+
+// EventTypeListerExpansion allows custom methods to be added to
+// EventTypeLister.
+type EventTypeListerExpansion interface{}
+
+// EventTypeNamespaceListerExpansion allows custom methods to be added to
+// EventTypeNamespaceLister.
+type EventTypeNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta3/eventtype.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta3/eventtype.go
new file mode 100644
index 000000000..76d1efb30
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta3/eventtype.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta3
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1beta3 "knative.dev/eventing/pkg/apis/eventing/v1beta3"
+)
+
+// EventTypeLister helps list EventTypes.
+// All objects returned here must be treated as read-only.
+type EventTypeLister interface {
+ // List lists all EventTypes in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta3.EventType, err error)
+ // EventTypes returns an object that can list and get EventTypes.
+ EventTypes(namespace string) EventTypeNamespaceLister
+ EventTypeListerExpansion
+}
+
+// eventTypeLister implements the EventTypeLister interface.
+type eventTypeLister struct {
+ indexer cache.Indexer
+}
+
+// NewEventTypeLister returns a new EventTypeLister.
+func NewEventTypeLister(indexer cache.Indexer) EventTypeLister {
+ return &eventTypeLister{indexer: indexer}
+}
+
+// List lists all EventTypes in the indexer.
+func (s *eventTypeLister) List(selector labels.Selector) (ret []*v1beta3.EventType, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta3.EventType))
+ })
+ return ret, err
+}
+
+// EventTypes returns an object that can list and get EventTypes.
+func (s *eventTypeLister) EventTypes(namespace string) EventTypeNamespaceLister {
+ return eventTypeNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// EventTypeNamespaceLister helps list and get EventTypes.
+// All objects returned here must be treated as read-only.
+type EventTypeNamespaceLister interface {
+ // List lists all EventTypes in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta3.EventType, err error)
+ // Get retrieves the EventType from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta3.EventType, error)
+ EventTypeNamespaceListerExpansion
+}
+
+// eventTypeNamespaceLister implements the EventTypeNamespaceLister
+// interface.
+type eventTypeNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all EventTypes in the indexer for a given namespace.
+func (s eventTypeNamespaceLister) List(selector labels.Selector) (ret []*v1beta3.EventType, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta3.EventType))
+ })
+ return ret, err
+}
+
+// Get retrieves the EventType from the indexer for a given namespace and name.
+func (s eventTypeNamespaceLister) Get(name string) (*v1beta3.EventType, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta3.Resource("eventtype"), name)
+ }
+ return obj.(*v1beta3.EventType), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta3/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta3/expansion_generated.go
new file mode 100644
index 000000000..465a79ef9
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/eventing/v1beta3/expansion_generated.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta3
+
+// EventTypeListerExpansion allows custom methods to be added to
+// EventTypeLister.
+type EventTypeListerExpansion interface{}
+
+// EventTypeNamespaceListerExpansion allows custom methods to be added to
+// EventTypeNamespaceLister.
+type EventTypeNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/expansion_generated.go
new file mode 100644
index 000000000..3bbe4519a
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/expansion_generated.go
@@ -0,0 +1,35 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+// ParallelListerExpansion allows custom methods to be added to
+// ParallelLister.
+type ParallelListerExpansion interface{}
+
+// ParallelNamespaceListerExpansion allows custom methods to be added to
+// ParallelNamespaceLister.
+type ParallelNamespaceListerExpansion interface{}
+
+// SequenceListerExpansion allows custom methods to be added to
+// SequenceLister.
+type SequenceListerExpansion interface{}
+
+// SequenceNamespaceListerExpansion allows custom methods to be added to
+// SequenceNamespaceLister.
+type SequenceNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/parallel.go b/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/parallel.go
new file mode 100644
index 000000000..8ef7f1af9
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/parallel.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/flows/v1"
+)
+
+// ParallelLister helps list Parallels.
+// All objects returned here must be treated as read-only.
+type ParallelLister interface {
+ // List lists all Parallels in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Parallel, err error)
+ // Parallels returns an object that can list and get Parallels.
+ Parallels(namespace string) ParallelNamespaceLister
+ ParallelListerExpansion
+}
+
+// parallelLister implements the ParallelLister interface.
+type parallelLister struct {
+ indexer cache.Indexer
+}
+
+// NewParallelLister returns a new ParallelLister.
+func NewParallelLister(indexer cache.Indexer) ParallelLister {
+ return ¶llelLister{indexer: indexer}
+}
+
+// List lists all Parallels in the indexer.
+func (s *parallelLister) List(selector labels.Selector) (ret []*v1.Parallel, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Parallel))
+ })
+ return ret, err
+}
+
+// Parallels returns an object that can list and get Parallels.
+func (s *parallelLister) Parallels(namespace string) ParallelNamespaceLister {
+ return parallelNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ParallelNamespaceLister helps list and get Parallels.
+// All objects returned here must be treated as read-only.
+type ParallelNamespaceLister interface {
+ // List lists all Parallels in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Parallel, err error)
+ // Get retrieves the Parallel from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Parallel, error)
+ ParallelNamespaceListerExpansion
+}
+
+// parallelNamespaceLister implements the ParallelNamespaceLister
+// interface.
+type parallelNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Parallels in the indexer for a given namespace.
+func (s parallelNamespaceLister) List(selector labels.Selector) (ret []*v1.Parallel, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Parallel))
+ })
+ return ret, err
+}
+
+// Get retrieves the Parallel from the indexer for a given namespace and name.
+func (s parallelNamespaceLister) Get(name string) (*v1.Parallel, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("parallel"), name)
+ }
+ return obj.(*v1.Parallel), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/sequence.go b/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/sequence.go
new file mode 100644
index 000000000..9645a7f13
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/flows/v1/sequence.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/flows/v1"
+)
+
+// SequenceLister helps list Sequences.
+// All objects returned here must be treated as read-only.
+type SequenceLister interface {
+ // List lists all Sequences in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Sequence, err error)
+ // Sequences returns an object that can list and get Sequences.
+ Sequences(namespace string) SequenceNamespaceLister
+ SequenceListerExpansion
+}
+
+// sequenceLister implements the SequenceLister interface.
+type sequenceLister struct {
+ indexer cache.Indexer
+}
+
+// NewSequenceLister returns a new SequenceLister.
+func NewSequenceLister(indexer cache.Indexer) SequenceLister {
+ return &sequenceLister{indexer: indexer}
+}
+
+// List lists all Sequences in the indexer.
+func (s *sequenceLister) List(selector labels.Selector) (ret []*v1.Sequence, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Sequence))
+ })
+ return ret, err
+}
+
+// Sequences returns an object that can list and get Sequences.
+func (s *sequenceLister) Sequences(namespace string) SequenceNamespaceLister {
+ return sequenceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// SequenceNamespaceLister helps list and get Sequences.
+// All objects returned here must be treated as read-only.
+type SequenceNamespaceLister interface {
+ // List lists all Sequences in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Sequence, err error)
+ // Get retrieves the Sequence from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Sequence, error)
+ SequenceNamespaceListerExpansion
+}
+
+// sequenceNamespaceLister implements the SequenceNamespaceLister
+// interface.
+type sequenceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Sequences in the indexer for a given namespace.
+func (s sequenceNamespaceLister) List(selector labels.Selector) (ret []*v1.Sequence, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Sequence))
+ })
+ return ret, err
+}
+
+// Get retrieves the Sequence from the indexer for a given namespace and name.
+func (s sequenceNamespaceLister) Get(name string) (*v1.Sequence, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("sequence"), name)
+ }
+ return obj.(*v1.Sequence), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/channel.go b/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/channel.go
new file mode 100644
index 000000000..541b1bb75
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/channel.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/messaging/v1"
+)
+
+// ChannelLister helps list Channels.
+// All objects returned here must be treated as read-only.
+type ChannelLister interface {
+ // List lists all Channels in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Channel, err error)
+ // Channels returns an object that can list and get Channels.
+ Channels(namespace string) ChannelNamespaceLister
+ ChannelListerExpansion
+}
+
+// channelLister implements the ChannelLister interface.
+type channelLister struct {
+ indexer cache.Indexer
+}
+
+// NewChannelLister returns a new ChannelLister.
+func NewChannelLister(indexer cache.Indexer) ChannelLister {
+ return &channelLister{indexer: indexer}
+}
+
+// List lists all Channels in the indexer.
+func (s *channelLister) List(selector labels.Selector) (ret []*v1.Channel, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Channel))
+ })
+ return ret, err
+}
+
+// Channels returns an object that can list and get Channels.
+func (s *channelLister) Channels(namespace string) ChannelNamespaceLister {
+ return channelNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ChannelNamespaceLister helps list and get Channels.
+// All objects returned here must be treated as read-only.
+type ChannelNamespaceLister interface {
+ // List lists all Channels in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Channel, err error)
+ // Get retrieves the Channel from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Channel, error)
+ ChannelNamespaceListerExpansion
+}
+
+// channelNamespaceLister implements the ChannelNamespaceLister
+// interface.
+type channelNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Channels in the indexer for a given namespace.
+func (s channelNamespaceLister) List(selector labels.Selector) (ret []*v1.Channel, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Channel))
+ })
+ return ret, err
+}
+
+// Get retrieves the Channel from the indexer for a given namespace and name.
+func (s channelNamespaceLister) Get(name string) (*v1.Channel, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("channel"), name)
+ }
+ return obj.(*v1.Channel), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/expansion_generated.go
new file mode 100644
index 000000000..26d579bde
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/expansion_generated.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+// ChannelListerExpansion allows custom methods to be added to
+// ChannelLister.
+type ChannelListerExpansion interface{}
+
+// ChannelNamespaceListerExpansion allows custom methods to be added to
+// ChannelNamespaceLister.
+type ChannelNamespaceListerExpansion interface{}
+
+// InMemoryChannelListerExpansion allows custom methods to be added to
+// InMemoryChannelLister.
+type InMemoryChannelListerExpansion interface{}
+
+// InMemoryChannelNamespaceListerExpansion allows custom methods to be added to
+// InMemoryChannelNamespaceLister.
+type InMemoryChannelNamespaceListerExpansion interface{}
+
+// SubscriptionListerExpansion allows custom methods to be added to
+// SubscriptionLister.
+type SubscriptionListerExpansion interface{}
+
+// SubscriptionNamespaceListerExpansion allows custom methods to be added to
+// SubscriptionNamespaceLister.
+type SubscriptionNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/inmemorychannel.go b/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/inmemorychannel.go
new file mode 100644
index 000000000..e6b86840c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/inmemorychannel.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/messaging/v1"
+)
+
+// InMemoryChannelLister helps list InMemoryChannels.
+// All objects returned here must be treated as read-only.
+type InMemoryChannelLister interface {
+ // List lists all InMemoryChannels in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.InMemoryChannel, err error)
+ // InMemoryChannels returns an object that can list and get InMemoryChannels.
+ InMemoryChannels(namespace string) InMemoryChannelNamespaceLister
+ InMemoryChannelListerExpansion
+}
+
+// inMemoryChannelLister implements the InMemoryChannelLister interface.
+type inMemoryChannelLister struct {
+ indexer cache.Indexer
+}
+
+// NewInMemoryChannelLister returns a new InMemoryChannelLister.
+func NewInMemoryChannelLister(indexer cache.Indexer) InMemoryChannelLister {
+ return &inMemoryChannelLister{indexer: indexer}
+}
+
+// List lists all InMemoryChannels in the indexer.
+func (s *inMemoryChannelLister) List(selector labels.Selector) (ret []*v1.InMemoryChannel, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.InMemoryChannel))
+ })
+ return ret, err
+}
+
+// InMemoryChannels returns an object that can list and get InMemoryChannels.
+func (s *inMemoryChannelLister) InMemoryChannels(namespace string) InMemoryChannelNamespaceLister {
+ return inMemoryChannelNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// InMemoryChannelNamespaceLister helps list and get InMemoryChannels.
+// All objects returned here must be treated as read-only.
+type InMemoryChannelNamespaceLister interface {
+ // List lists all InMemoryChannels in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.InMemoryChannel, err error)
+ // Get retrieves the InMemoryChannel from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.InMemoryChannel, error)
+ InMemoryChannelNamespaceListerExpansion
+}
+
+// inMemoryChannelNamespaceLister implements the InMemoryChannelNamespaceLister
+// interface.
+type inMemoryChannelNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all InMemoryChannels in the indexer for a given namespace.
+func (s inMemoryChannelNamespaceLister) List(selector labels.Selector) (ret []*v1.InMemoryChannel, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.InMemoryChannel))
+ })
+ return ret, err
+}
+
+// Get retrieves the InMemoryChannel from the indexer for a given namespace and name.
+func (s inMemoryChannelNamespaceLister) Get(name string) (*v1.InMemoryChannel, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("inmemorychannel"), name)
+ }
+ return obj.(*v1.InMemoryChannel), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/subscription.go b/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/subscription.go
new file mode 100644
index 000000000..d50d620db
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/messaging/v1/subscription.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/messaging/v1"
+)
+
+// SubscriptionLister helps list Subscriptions.
+// All objects returned here must be treated as read-only.
+type SubscriptionLister interface {
+ // List lists all Subscriptions in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Subscription, err error)
+ // Subscriptions returns an object that can list and get Subscriptions.
+ Subscriptions(namespace string) SubscriptionNamespaceLister
+ SubscriptionListerExpansion
+}
+
+// subscriptionLister implements the SubscriptionLister interface.
+type subscriptionLister struct {
+ indexer cache.Indexer
+}
+
+// NewSubscriptionLister returns a new SubscriptionLister.
+func NewSubscriptionLister(indexer cache.Indexer) SubscriptionLister {
+ return &subscriptionLister{indexer: indexer}
+}
+
+// List lists all Subscriptions in the indexer.
+func (s *subscriptionLister) List(selector labels.Selector) (ret []*v1.Subscription, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Subscription))
+ })
+ return ret, err
+}
+
+// Subscriptions returns an object that can list and get Subscriptions.
+func (s *subscriptionLister) Subscriptions(namespace string) SubscriptionNamespaceLister {
+ return subscriptionNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// SubscriptionNamespaceLister helps list and get Subscriptions.
+// All objects returned here must be treated as read-only.
+type SubscriptionNamespaceLister interface {
+ // List lists all Subscriptions in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.Subscription, err error)
+ // Get retrieves the Subscription from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.Subscription, error)
+ SubscriptionNamespaceListerExpansion
+}
+
+// subscriptionNamespaceLister implements the SubscriptionNamespaceLister
+// interface.
+type subscriptionNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all Subscriptions in the indexer for a given namespace.
+func (s subscriptionNamespaceLister) List(selector labels.Selector) (ret []*v1.Subscription, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.Subscription))
+ })
+ return ret, err
+}
+
+// Get retrieves the Subscription from the indexer for a given namespace and name.
+func (s subscriptionNamespaceLister) Get(name string) (*v1.Subscription, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("subscription"), name)
+ }
+ return obj.(*v1.Subscription), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/expansion_generated.go
new file mode 100644
index 000000000..48dbd0326
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/expansion_generated.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+// JobSinkListerExpansion allows custom methods to be added to
+// JobSinkLister.
+type JobSinkListerExpansion interface{}
+
+// JobSinkNamespaceListerExpansion allows custom methods to be added to
+// JobSinkNamespaceLister.
+type JobSinkNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/jobsink.go b/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/jobsink.go
new file mode 100644
index 000000000..4dcd9c82c
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sinks/v1alpha1/jobsink.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1alpha1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1alpha1 "knative.dev/eventing/pkg/apis/sinks/v1alpha1"
+)
+
+// JobSinkLister helps list JobSinks.
+// All objects returned here must be treated as read-only.
+type JobSinkLister interface {
+ // List lists all JobSinks in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.JobSink, err error)
+ // JobSinks returns an object that can list and get JobSinks.
+ JobSinks(namespace string) JobSinkNamespaceLister
+ JobSinkListerExpansion
+}
+
+// jobSinkLister implements the JobSinkLister interface.
+type jobSinkLister struct {
+ indexer cache.Indexer
+}
+
+// NewJobSinkLister returns a new JobSinkLister.
+func NewJobSinkLister(indexer cache.Indexer) JobSinkLister {
+ return &jobSinkLister{indexer: indexer}
+}
+
+// List lists all JobSinks in the indexer.
+func (s *jobSinkLister) List(selector labels.Selector) (ret []*v1alpha1.JobSink, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.JobSink))
+ })
+ return ret, err
+}
+
+// JobSinks returns an object that can list and get JobSinks.
+func (s *jobSinkLister) JobSinks(namespace string) JobSinkNamespaceLister {
+ return jobSinkNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// JobSinkNamespaceLister helps list and get JobSinks.
+// All objects returned here must be treated as read-only.
+type JobSinkNamespaceLister interface {
+ // List lists all JobSinks in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1alpha1.JobSink, err error)
+ // Get retrieves the JobSink from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1alpha1.JobSink, error)
+ JobSinkNamespaceListerExpansion
+}
+
+// jobSinkNamespaceLister implements the JobSinkNamespaceLister
+// interface.
+type jobSinkNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all JobSinks in the indexer for a given namespace.
+func (s jobSinkNamespaceLister) List(selector labels.Selector) (ret []*v1alpha1.JobSink, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1alpha1.JobSink))
+ })
+ return ret, err
+}
+
+// Get retrieves the JobSink from the indexer for a given namespace and name.
+func (s jobSinkNamespaceLister) Get(name string) (*v1alpha1.JobSink, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1alpha1.Resource("jobsink"), name)
+ }
+ return obj.(*v1alpha1.JobSink), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/apiserversource.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/apiserversource.go
new file mode 100644
index 000000000..4c8fb52f1
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/apiserversource.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+)
+
+// ApiServerSourceLister helps list ApiServerSources.
+// All objects returned here must be treated as read-only.
+type ApiServerSourceLister interface {
+ // List lists all ApiServerSources in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ApiServerSource, err error)
+ // ApiServerSources returns an object that can list and get ApiServerSources.
+ ApiServerSources(namespace string) ApiServerSourceNamespaceLister
+ ApiServerSourceListerExpansion
+}
+
+// apiServerSourceLister implements the ApiServerSourceLister interface.
+type apiServerSourceLister struct {
+ indexer cache.Indexer
+}
+
+// NewApiServerSourceLister returns a new ApiServerSourceLister.
+func NewApiServerSourceLister(indexer cache.Indexer) ApiServerSourceLister {
+ return &apiServerSourceLister{indexer: indexer}
+}
+
+// List lists all ApiServerSources in the indexer.
+func (s *apiServerSourceLister) List(selector labels.Selector) (ret []*v1.ApiServerSource, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ApiServerSource))
+ })
+ return ret, err
+}
+
+// ApiServerSources returns an object that can list and get ApiServerSources.
+func (s *apiServerSourceLister) ApiServerSources(namespace string) ApiServerSourceNamespaceLister {
+ return apiServerSourceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ApiServerSourceNamespaceLister helps list and get ApiServerSources.
+// All objects returned here must be treated as read-only.
+type ApiServerSourceNamespaceLister interface {
+ // List lists all ApiServerSources in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ApiServerSource, err error)
+ // Get retrieves the ApiServerSource from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.ApiServerSource, error)
+ ApiServerSourceNamespaceListerExpansion
+}
+
+// apiServerSourceNamespaceLister implements the ApiServerSourceNamespaceLister
+// interface.
+type apiServerSourceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all ApiServerSources in the indexer for a given namespace.
+func (s apiServerSourceNamespaceLister) List(selector labels.Selector) (ret []*v1.ApiServerSource, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ApiServerSource))
+ })
+ return ret, err
+}
+
+// Get retrieves the ApiServerSource from the indexer for a given namespace and name.
+func (s apiServerSourceNamespaceLister) Get(name string) (*v1.ApiServerSource, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("apiserversource"), name)
+ }
+ return obj.(*v1.ApiServerSource), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/containersource.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/containersource.go
new file mode 100644
index 000000000..12d1e51cd
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/containersource.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+)
+
+// ContainerSourceLister helps list ContainerSources.
+// All objects returned here must be treated as read-only.
+type ContainerSourceLister interface {
+ // List lists all ContainerSources in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ContainerSource, err error)
+ // ContainerSources returns an object that can list and get ContainerSources.
+ ContainerSources(namespace string) ContainerSourceNamespaceLister
+ ContainerSourceListerExpansion
+}
+
+// containerSourceLister implements the ContainerSourceLister interface.
+type containerSourceLister struct {
+ indexer cache.Indexer
+}
+
+// NewContainerSourceLister returns a new ContainerSourceLister.
+func NewContainerSourceLister(indexer cache.Indexer) ContainerSourceLister {
+ return &containerSourceLister{indexer: indexer}
+}
+
+// List lists all ContainerSources in the indexer.
+func (s *containerSourceLister) List(selector labels.Selector) (ret []*v1.ContainerSource, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ContainerSource))
+ })
+ return ret, err
+}
+
+// ContainerSources returns an object that can list and get ContainerSources.
+func (s *containerSourceLister) ContainerSources(namespace string) ContainerSourceNamespaceLister {
+ return containerSourceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// ContainerSourceNamespaceLister helps list and get ContainerSources.
+// All objects returned here must be treated as read-only.
+type ContainerSourceNamespaceLister interface {
+ // List lists all ContainerSources in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.ContainerSource, err error)
+ // Get retrieves the ContainerSource from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.ContainerSource, error)
+ ContainerSourceNamespaceListerExpansion
+}
+
+// containerSourceNamespaceLister implements the ContainerSourceNamespaceLister
+// interface.
+type containerSourceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all ContainerSources in the indexer for a given namespace.
+func (s containerSourceNamespaceLister) List(selector labels.Selector) (ret []*v1.ContainerSource, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.ContainerSource))
+ })
+ return ret, err
+}
+
+// Get retrieves the ContainerSource from the indexer for a given namespace and name.
+func (s containerSourceNamespaceLister) Get(name string) (*v1.ContainerSource, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("containersource"), name)
+ }
+ return obj.(*v1.ContainerSource), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/expansion_generated.go
new file mode 100644
index 000000000..7bc0e2952
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/expansion_generated.go
@@ -0,0 +1,51 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+// ApiServerSourceListerExpansion allows custom methods to be added to
+// ApiServerSourceLister.
+type ApiServerSourceListerExpansion interface{}
+
+// ApiServerSourceNamespaceListerExpansion allows custom methods to be added to
+// ApiServerSourceNamespaceLister.
+type ApiServerSourceNamespaceListerExpansion interface{}
+
+// ContainerSourceListerExpansion allows custom methods to be added to
+// ContainerSourceLister.
+type ContainerSourceListerExpansion interface{}
+
+// ContainerSourceNamespaceListerExpansion allows custom methods to be added to
+// ContainerSourceNamespaceLister.
+type ContainerSourceNamespaceListerExpansion interface{}
+
+// PingSourceListerExpansion allows custom methods to be added to
+// PingSourceLister.
+type PingSourceListerExpansion interface{}
+
+// PingSourceNamespaceListerExpansion allows custom methods to be added to
+// PingSourceNamespaceLister.
+type PingSourceNamespaceListerExpansion interface{}
+
+// SinkBindingListerExpansion allows custom methods to be added to
+// SinkBindingLister.
+type SinkBindingListerExpansion interface{}
+
+// SinkBindingNamespaceListerExpansion allows custom methods to be added to
+// SinkBindingNamespaceLister.
+type SinkBindingNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/pingsource.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/pingsource.go
new file mode 100644
index 000000000..54918c9d5
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/pingsource.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+)
+
+// PingSourceLister helps list PingSources.
+// All objects returned here must be treated as read-only.
+type PingSourceLister interface {
+ // List lists all PingSources in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.PingSource, err error)
+ // PingSources returns an object that can list and get PingSources.
+ PingSources(namespace string) PingSourceNamespaceLister
+ PingSourceListerExpansion
+}
+
+// pingSourceLister implements the PingSourceLister interface.
+type pingSourceLister struct {
+ indexer cache.Indexer
+}
+
+// NewPingSourceLister returns a new PingSourceLister.
+func NewPingSourceLister(indexer cache.Indexer) PingSourceLister {
+ return &pingSourceLister{indexer: indexer}
+}
+
+// List lists all PingSources in the indexer.
+func (s *pingSourceLister) List(selector labels.Selector) (ret []*v1.PingSource, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.PingSource))
+ })
+ return ret, err
+}
+
+// PingSources returns an object that can list and get PingSources.
+func (s *pingSourceLister) PingSources(namespace string) PingSourceNamespaceLister {
+ return pingSourceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// PingSourceNamespaceLister helps list and get PingSources.
+// All objects returned here must be treated as read-only.
+type PingSourceNamespaceLister interface {
+ // List lists all PingSources in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.PingSource, err error)
+ // Get retrieves the PingSource from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.PingSource, error)
+ PingSourceNamespaceListerExpansion
+}
+
+// pingSourceNamespaceLister implements the PingSourceNamespaceLister
+// interface.
+type pingSourceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all PingSources in the indexer for a given namespace.
+func (s pingSourceNamespaceLister) List(selector labels.Selector) (ret []*v1.PingSource, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.PingSource))
+ })
+ return ret, err
+}
+
+// Get retrieves the PingSource from the indexer for a given namespace and name.
+func (s pingSourceNamespaceLister) Get(name string) (*v1.PingSource, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("pingsource"), name)
+ }
+ return obj.(*v1.PingSource), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/sinkbinding.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/sinkbinding.go
new file mode 100644
index 000000000..e0441b874
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1/sinkbinding.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1 "knative.dev/eventing/pkg/apis/sources/v1"
+)
+
+// SinkBindingLister helps list SinkBindings.
+// All objects returned here must be treated as read-only.
+type SinkBindingLister interface {
+ // List lists all SinkBindings in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.SinkBinding, err error)
+ // SinkBindings returns an object that can list and get SinkBindings.
+ SinkBindings(namespace string) SinkBindingNamespaceLister
+ SinkBindingListerExpansion
+}
+
+// sinkBindingLister implements the SinkBindingLister interface.
+type sinkBindingLister struct {
+ indexer cache.Indexer
+}
+
+// NewSinkBindingLister returns a new SinkBindingLister.
+func NewSinkBindingLister(indexer cache.Indexer) SinkBindingLister {
+ return &sinkBindingLister{indexer: indexer}
+}
+
+// List lists all SinkBindings in the indexer.
+func (s *sinkBindingLister) List(selector labels.Selector) (ret []*v1.SinkBinding, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.SinkBinding))
+ })
+ return ret, err
+}
+
+// SinkBindings returns an object that can list and get SinkBindings.
+func (s *sinkBindingLister) SinkBindings(namespace string) SinkBindingNamespaceLister {
+ return sinkBindingNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// SinkBindingNamespaceLister helps list and get SinkBindings.
+// All objects returned here must be treated as read-only.
+type SinkBindingNamespaceLister interface {
+ // List lists all SinkBindings in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1.SinkBinding, err error)
+ // Get retrieves the SinkBinding from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1.SinkBinding, error)
+ SinkBindingNamespaceListerExpansion
+}
+
+// sinkBindingNamespaceLister implements the SinkBindingNamespaceLister
+// interface.
+type sinkBindingNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all SinkBindings in the indexer for a given namespace.
+func (s sinkBindingNamespaceLister) List(selector labels.Selector) (ret []*v1.SinkBinding, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1.SinkBinding))
+ })
+ return ret, err
+}
+
+// Get retrieves the SinkBinding from the indexer for a given namespace and name.
+func (s sinkBindingNamespaceLister) Get(name string) (*v1.SinkBinding, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1.Resource("sinkbinding"), name)
+ }
+ return obj.(*v1.SinkBinding), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1beta2/expansion_generated.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1beta2/expansion_generated.go
new file mode 100644
index 000000000..83a507e82
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1beta2/expansion_generated.go
@@ -0,0 +1,27 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta2
+
+// PingSourceListerExpansion allows custom methods to be added to
+// PingSourceLister.
+type PingSourceListerExpansion interface{}
+
+// PingSourceNamespaceListerExpansion allows custom methods to be added to
+// PingSourceNamespaceLister.
+type PingSourceNamespaceListerExpansion interface{}
diff --git a/vendor/knative.dev/eventing/pkg/client/listers/sources/v1beta2/pingsource.go b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1beta2/pingsource.go
new file mode 100644
index 000000000..14c549b23
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/client/listers/sources/v1beta2/pingsource.go
@@ -0,0 +1,99 @@
+/*
+Copyright 2021 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Code generated by lister-gen. DO NOT EDIT.
+
+package v1beta2
+
+import (
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/labels"
+ "k8s.io/client-go/tools/cache"
+ v1beta2 "knative.dev/eventing/pkg/apis/sources/v1beta2"
+)
+
+// PingSourceLister helps list PingSources.
+// All objects returned here must be treated as read-only.
+type PingSourceLister interface {
+ // List lists all PingSources in the indexer.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta2.PingSource, err error)
+ // PingSources returns an object that can list and get PingSources.
+ PingSources(namespace string) PingSourceNamespaceLister
+ PingSourceListerExpansion
+}
+
+// pingSourceLister implements the PingSourceLister interface.
+type pingSourceLister struct {
+ indexer cache.Indexer
+}
+
+// NewPingSourceLister returns a new PingSourceLister.
+func NewPingSourceLister(indexer cache.Indexer) PingSourceLister {
+ return &pingSourceLister{indexer: indexer}
+}
+
+// List lists all PingSources in the indexer.
+func (s *pingSourceLister) List(selector labels.Selector) (ret []*v1beta2.PingSource, err error) {
+ err = cache.ListAll(s.indexer, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta2.PingSource))
+ })
+ return ret, err
+}
+
+// PingSources returns an object that can list and get PingSources.
+func (s *pingSourceLister) PingSources(namespace string) PingSourceNamespaceLister {
+ return pingSourceNamespaceLister{indexer: s.indexer, namespace: namespace}
+}
+
+// PingSourceNamespaceLister helps list and get PingSources.
+// All objects returned here must be treated as read-only.
+type PingSourceNamespaceLister interface {
+ // List lists all PingSources in the indexer for a given namespace.
+ // Objects returned here must be treated as read-only.
+ List(selector labels.Selector) (ret []*v1beta2.PingSource, err error)
+ // Get retrieves the PingSource from the indexer for a given namespace and name.
+ // Objects returned here must be treated as read-only.
+ Get(name string) (*v1beta2.PingSource, error)
+ PingSourceNamespaceListerExpansion
+}
+
+// pingSourceNamespaceLister implements the PingSourceNamespaceLister
+// interface.
+type pingSourceNamespaceLister struct {
+ indexer cache.Indexer
+ namespace string
+}
+
+// List lists all PingSources in the indexer for a given namespace.
+func (s pingSourceNamespaceLister) List(selector labels.Selector) (ret []*v1beta2.PingSource, err error) {
+ err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
+ ret = append(ret, m.(*v1beta2.PingSource))
+ })
+ return ret, err
+}
+
+// Get retrieves the PingSource from the indexer for a given namespace and name.
+func (s pingSourceNamespaceLister) Get(name string) (*v1beta2.PingSource, error) {
+ obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
+ if err != nil {
+ return nil, err
+ }
+ if !exists {
+ return nil, errors.NewNotFound(v1beta2.Resource("pingsource"), name)
+ }
+ return obj.(*v1beta2.PingSource), nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/crossnamespace/validation.go b/vendor/knative.dev/eventing/pkg/crossnamespace/validation.go
new file mode 100644
index 000000000..c7be092e1
--- /dev/null
+++ b/vendor/knative.dev/eventing/pkg/crossnamespace/validation.go
@@ -0,0 +1,101 @@
+/*
+Copyright 2024 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package crossnamespace
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ authv1 "k8s.io/api/authorization/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "knative.dev/pkg/apis"
+ duckv1 "knative.dev/pkg/apis/duck/v1"
+ kubeclient "knative.dev/pkg/client/injection/kube/client"
+)
+
+type ResourceInfo interface {
+ duckv1.KRShaped
+ GetCrossNamespaceRef() duckv1.KReference
+}
+
+func CheckNamespace(ctx context.Context, r ResourceInfo) *apis.FieldError {
+ targetKind := r.GetCrossNamespaceRef().Kind
+ targetGroup := r.GetCrossNamespaceRef().Group
+ targetName := r.GetCrossNamespaceRef().Name
+ targetNamespace := r.GetCrossNamespaceRef().Namespace
+ targetFieldName := fmt.Sprintf("spec.%sNamespace", targetKind)
+
+ if targetGroup == "" {
+ targetGroup = strings.Split(r.GetCrossNamespaceRef().APIVersion, "/")[0]
+ }
+
+ // If the target namespace is empty or the same as the object namespace, this function is skipped
+ if targetNamespace == "" || targetNamespace == r.GetNamespace() {
+ return nil
+ }
+
+ // convert the kind (Broker or Channel) into a resource (brokers or channels)
+ targetResource := strings.ToLower(targetKind) + "s"
+
+ // GetUserInfo accesses the UserInfo attached to the webhook context.
+ userInfo := apis.GetUserInfo(ctx)
+ if userInfo == nil {
+ return &apis.FieldError{
+ Paths: []string{targetFieldName},
+ Message: "failed to get userInfo, which is needed to validate access to the target namespace",
+ }
+ }
+
+ client := kubeclient.Get(ctx)
+
+ // SubjectAccessReview checks if the user is authorized to perform an action.
+ action := authv1.ResourceAttributes{
+ Name: targetName,
+ Namespace: targetNamespace,
+ Verb: "knsubscribe",
+ Group: targetGroup,
+ Resource: targetResource,
+ }
+
+ // Create the SubjectAccessReview
+ check := authv1.SubjectAccessReview{
+ Spec: authv1.SubjectAccessReviewSpec{
+ ResourceAttributes: &action,
+ User: userInfo.Username,
+ Groups: userInfo.Groups,
+ },
+ }
+
+ resp, err := client.AuthorizationV1().SubjectAccessReviews().Create(ctx, &check, metav1.CreateOptions{})
+
+ if err != nil {
+ return &apis.FieldError{
+ Paths: []string{targetFieldName},
+ Message: fmt.Sprintf("failed to make authorization request to see if user can subscribe to resources in namespace: %s", err.Error()),
+ }
+ }
+
+ if !resp.Status.Allowed {
+ return &apis.FieldError{
+ Paths: []string{targetFieldName},
+ Message: fmt.Sprintf("user %s is not authorized to get target resource in namespace: %s", userInfo.Username, targetNamespace),
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/knative.dev/eventing/pkg/metrics/metrics.go b/vendor/knative.dev/eventing/pkg/metrics/metrics.go
index 363673b10..beedaaa6b 100644
--- a/vendor/knative.dev/eventing/pkg/metrics/metrics.go
+++ b/vendor/knative.dev/eventing/pkg/metrics/metrics.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/metrics/source/stats_reporter.go b/vendor/knative.dev/eventing/pkg/metrics/source/stats_reporter.go
index 3c39c5823..88883768b 100644
--- a/vendor/knative.dev/eventing/pkg/metrics/source/stats_reporter.go
+++ b/vendor/knative.dev/eventing/pkg/metrics/source/stats_reporter.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/observability/attributes.go b/vendor/knative.dev/eventing/pkg/observability/attributes.go
index 72703f4f0..519f824d2 100644
--- a/vendor/knative.dev/eventing/pkg/observability/attributes.go
+++ b/vendor/knative.dev/eventing/pkg/observability/attributes.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/observability/client/observability_service.go b/vendor/knative.dev/eventing/pkg/observability/client/observability_service.go
index bbd6bd1a4..222e1cee9 100644
--- a/vendor/knative.dev/eventing/pkg/observability/client/observability_service.go
+++ b/vendor/knative.dev/eventing/pkg/observability/client/observability_service.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/observability/client/observable.go b/vendor/knative.dev/eventing/pkg/observability/client/observable.go
index 8804671e5..3ef02d99c 100644
--- a/vendor/knative.dev/eventing/pkg/observability/client/observable.go
+++ b/vendor/knative.dev/eventing/pkg/observability/client/observable.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/observability/context.go b/vendor/knative.dev/eventing/pkg/observability/context.go
index 4f3a5c9bd..49e7b4449 100644
--- a/vendor/knative.dev/eventing/pkg/observability/context.go
+++ b/vendor/knative.dev/eventing/pkg/observability/context.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/eventing/pkg/reconciler/source/config_watcher.go b/vendor/knative.dev/eventing/pkg/reconciler/source/config_watcher.go
index a89271072..ed33a0df1 100644
--- a/vendor/knative.dev/eventing/pkg/reconciler/source/config_watcher.go
+++ b/vendor/knative.dev/eventing/pkg/reconciler/source/config_watcher.go
@@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
- http://www.apache.org/licenses/LICENSE-2.0
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
diff --git a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go
index 769370d76..b9b3876bb 100644
--- a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go
+++ b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go
@@ -305,6 +305,7 @@ func ContainerMask(in *corev1.Container) *corev1.Container {
out.ReadinessProbe = in.ReadinessProbe
out.Resources = in.Resources
out.SecurityContext = in.SecurityContext
+ out.StartupProbe = in.StartupProbe
out.TerminationMessagePath = in.TerminationMessagePath
out.TerminationMessagePolicy = in.TerminationMessagePolicy
out.VolumeMounts = in.VolumeMounts
diff --git a/vendor/modules.txt b/vendor/modules.txt
index e27a9ad8a..12c722b62 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -7,6 +7,9 @@ contrib.go.opencensus.io/exporter/prometheus
# contrib.go.opencensus.io/exporter/zipkin v0.1.2
## explicit
contrib.go.opencensus.io/exporter/zipkin
+# github.com/antlr/antlr4/runtime/Go/antlr v1.4.10
+## explicit; go 1.16
+github.com/antlr/antlr4/runtime/Go/antlr
# github.com/beorn7/perks v1.0.1
## explicit; go 1.11
github.com/beorn7/perks/quantile
@@ -24,13 +27,23 @@ github.com/census-instrumentation/opencensus-proto/gen-go/agent/trace/v1
github.com/census-instrumentation/opencensus-proto/gen-go/metrics/v1
github.com/census-instrumentation/opencensus-proto/gen-go/resource/v1
github.com/census-instrumentation/opencensus-proto/gen-go/trace/v1
-# github.com/cespare/xxhash/v2 v2.2.0
+# github.com/cespare/xxhash/v2 v2.3.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
# github.com/cloudevents/sdk-go/observability/opencensus/v2 v2.15.2
## explicit; go 1.18
github.com/cloudevents/sdk-go/observability/opencensus/v2/client
github.com/cloudevents/sdk-go/observability/opencensus/v2/http
+# github.com/cloudevents/sdk-go/sql/v2 v2.0.0-20240712172937-3ce6b2f1f011
+## explicit; go 1.18
+github.com/cloudevents/sdk-go/sql/v2
+github.com/cloudevents/sdk-go/sql/v2/errors
+github.com/cloudevents/sdk-go/sql/v2/expression
+github.com/cloudevents/sdk-go/sql/v2/function
+github.com/cloudevents/sdk-go/sql/v2/gen
+github.com/cloudevents/sdk-go/sql/v2/parser
+github.com/cloudevents/sdk-go/sql/v2/runtime
+github.com/cloudevents/sdk-go/sql/v2/utils
# github.com/cloudevents/sdk-go/v2 v2.15.2
## explicit; go 1.18
github.com/cloudevents/sdk-go/v2
@@ -223,6 +236,9 @@ github.com/rickb777/date/period
# github.com/rickb777/plural v1.2.1
## explicit; go 1.14
github.com/rickb777/plural
+# github.com/robfig/cron/v3 v3.0.1
+## explicit; go 1.12
+github.com/robfig/cron/v3
# github.com/spf13/pflag v1.0.5
## explicit; go 1.12
github.com/spf13/pflag
@@ -275,15 +291,15 @@ go.uber.org/zap/internal/stacktrace
go.uber.org/zap/internal/ztest
go.uber.org/zap/zapcore
go.uber.org/zap/zaptest
-# golang.org/x/crypto v0.24.0
-## explicit; go 1.18
+# golang.org/x/crypto v0.25.0
+## explicit; go 1.20
golang.org/x/crypto/pbkdf2
-# golang.org/x/mod v0.18.0
+# golang.org/x/mod v0.19.0
## explicit; go 1.18
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.26.0
+# golang.org/x/net v0.27.0
## explicit; go 1.18
golang.org/x/net/http/httpguts
golang.org/x/net/http2
@@ -300,12 +316,12 @@ golang.org/x/oauth2/internal
## explicit; go 1.18
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.21.0
+# golang.org/x/sys v0.22.0
## explicit; go 1.18
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
-# golang.org/x/term v0.21.0
+# golang.org/x/term v0.22.0
## explicit; go 1.18
golang.org/x/term
# golang.org/x/text v0.16.0
@@ -317,7 +333,7 @@ golang.org/x/text/unicode/norm
# golang.org/x/time v0.5.0
## explicit; go 1.18
golang.org/x/time/rate
-# golang.org/x/tools v0.22.0
+# golang.org/x/tools v0.23.0
## explicit; go 1.19
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/imports
@@ -341,14 +357,15 @@ google.golang.org/genproto/googleapis/api/httpbody
# google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157
## explicit; go 1.20
google.golang.org/genproto/googleapis/rpc/status
-# google.golang.org/grpc v1.64.0
-## explicit; go 1.19
+# google.golang.org/grpc v1.65.0
+## explicit; go 1.21
google.golang.org/grpc
google.golang.org/grpc/attributes
google.golang.org/grpc/backoff
google.golang.org/grpc/balancer
google.golang.org/grpc/balancer/base
google.golang.org/grpc/balancer/grpclb/state
+google.golang.org/grpc/balancer/pickfirst
google.golang.org/grpc/balancer/roundrobin
google.golang.org/grpc/binarylog/grpc_binarylog_v1
google.golang.org/grpc/channelz
@@ -370,7 +387,6 @@ google.golang.org/grpc/internal/channelz
google.golang.org/grpc/internal/credentials
google.golang.org/grpc/internal/envconfig
google.golang.org/grpc/internal/grpclog
-google.golang.org/grpc/internal/grpcrand
google.golang.org/grpc/internal/grpcsync
google.golang.org/grpc/internal/grpcutil
google.golang.org/grpc/internal/idle
@@ -546,6 +562,7 @@ k8s.io/apimachinery/pkg/util/managedfields/internal
k8s.io/apimachinery/pkg/util/mergepatch
k8s.io/apimachinery/pkg/util/naming
k8s.io/apimachinery/pkg/util/net
+k8s.io/apimachinery/pkg/util/rand
k8s.io/apimachinery/pkg/util/runtime
k8s.io/apimachinery/pkg/util/sets
k8s.io/apimachinery/pkg/util/strategicpatch
@@ -558,6 +575,9 @@ k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
k8s.io/apimachinery/third_party/forked/golang/reflect
+# k8s.io/apiserver v0.29.2
+## explicit; go 1.21
+k8s.io/apiserver/pkg/storage/names
# k8s.io/client-go v0.29.2
## explicit; go 1.21
k8s.io/client-go/applyconfigurations/admissionregistration/v1
@@ -951,34 +971,92 @@ k8s.io/utils/pointer
k8s.io/utils/ptr
k8s.io/utils/strings/slices
k8s.io/utils/trace
-# knative.dev/eventing v0.41.1-0.20240627060150-a6ac8111e82f
+# knative.dev/eventing v0.42.0
## explicit; go 1.22
knative.dev/eventing/pkg/adapter/v2
knative.dev/eventing/pkg/adapter/v2/test
knative.dev/eventing/pkg/adapter/v2/util/crstatusevent
knative.dev/eventing/pkg/apis
knative.dev/eventing/pkg/apis/config
+knative.dev/eventing/pkg/apis/duck
knative.dev/eventing/pkg/apis/duck/v1
knative.dev/eventing/pkg/apis/eventing
+knative.dev/eventing/pkg/apis/eventing/v1
knative.dev/eventing/pkg/apis/eventing/v1alpha1
+knative.dev/eventing/pkg/apis/eventing/v1beta1
+knative.dev/eventing/pkg/apis/eventing/v1beta2
+knative.dev/eventing/pkg/apis/eventing/v1beta3
knative.dev/eventing/pkg/apis/feature
+knative.dev/eventing/pkg/apis/flows
+knative.dev/eventing/pkg/apis/flows/v1
+knative.dev/eventing/pkg/apis/messaging
+knative.dev/eventing/pkg/apis/messaging/config
+knative.dev/eventing/pkg/apis/messaging/v1
+knative.dev/eventing/pkg/apis/sinks
+knative.dev/eventing/pkg/apis/sinks/v1alpha1
+knative.dev/eventing/pkg/apis/sources
+knative.dev/eventing/pkg/apis/sources/config
+knative.dev/eventing/pkg/apis/sources/v1
+knative.dev/eventing/pkg/apis/sources/v1beta2
knative.dev/eventing/pkg/auth
+knative.dev/eventing/pkg/client/clientset/versioned
+knative.dev/eventing/pkg/client/clientset/versioned/scheme
+knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1
+knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1alpha1
+knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta1
+knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta2
+knative.dev/eventing/pkg/client/clientset/versioned/typed/eventing/v1beta3
+knative.dev/eventing/pkg/client/clientset/versioned/typed/flows/v1
+knative.dev/eventing/pkg/client/clientset/versioned/typed/messaging/v1
+knative.dev/eventing/pkg/client/clientset/versioned/typed/sinks/v1alpha1
+knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1
+knative.dev/eventing/pkg/client/clientset/versioned/typed/sources/v1beta2
+knative.dev/eventing/pkg/client/informers/externalversions
+knative.dev/eventing/pkg/client/informers/externalversions/eventing
+knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1
+knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1alpha1
+knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta1
+knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta2
+knative.dev/eventing/pkg/client/informers/externalversions/eventing/v1beta3
+knative.dev/eventing/pkg/client/informers/externalversions/flows
+knative.dev/eventing/pkg/client/informers/externalversions/flows/v1
+knative.dev/eventing/pkg/client/informers/externalversions/internalinterfaces
+knative.dev/eventing/pkg/client/informers/externalversions/messaging
+knative.dev/eventing/pkg/client/informers/externalversions/messaging/v1
+knative.dev/eventing/pkg/client/informers/externalversions/sinks
+knative.dev/eventing/pkg/client/informers/externalversions/sinks/v1alpha1
+knative.dev/eventing/pkg/client/informers/externalversions/sources
+knative.dev/eventing/pkg/client/informers/externalversions/sources/v1
+knative.dev/eventing/pkg/client/informers/externalversions/sources/v1beta2
+knative.dev/eventing/pkg/client/injection/client
+knative.dev/eventing/pkg/client/injection/informers/eventing/v1alpha1/eventpolicy
+knative.dev/eventing/pkg/client/injection/informers/factory
+knative.dev/eventing/pkg/client/listers/eventing/v1
knative.dev/eventing/pkg/client/listers/eventing/v1alpha1
+knative.dev/eventing/pkg/client/listers/eventing/v1beta1
+knative.dev/eventing/pkg/client/listers/eventing/v1beta2
+knative.dev/eventing/pkg/client/listers/eventing/v1beta3
+knative.dev/eventing/pkg/client/listers/flows/v1
+knative.dev/eventing/pkg/client/listers/messaging/v1
+knative.dev/eventing/pkg/client/listers/sinks/v1alpha1
+knative.dev/eventing/pkg/client/listers/sources/v1
+knative.dev/eventing/pkg/client/listers/sources/v1beta2
+knative.dev/eventing/pkg/crossnamespace
knative.dev/eventing/pkg/eventingtls
knative.dev/eventing/pkg/metrics
knative.dev/eventing/pkg/metrics/source
knative.dev/eventing/pkg/observability
knative.dev/eventing/pkg/observability/client
knative.dev/eventing/pkg/reconciler/source
-# knative.dev/hack v0.0.0-20240607132042-09143140a254
+# knative.dev/hack v0.0.0-20240704013904-b9799599afcf
## explicit; go 1.18
knative.dev/hack
-# knative.dev/networking v0.0.0-20240611072033-3b8764c0bb4c
-## explicit; go 1.21
+# knative.dev/networking v0.0.0-20240716111826-bab7f2a3e556
+## explicit; go 1.22
knative.dev/networking/pkg/apis/networking
knative.dev/networking/pkg/apis/networking/v1alpha1
knative.dev/networking/pkg/config
-# knative.dev/pkg v0.0.0-20240626134149-3f6a546ac3a4
+# knative.dev/pkg v0.0.0-20240716082220-4355f0c73608
## explicit; go 1.22
knative.dev/pkg/apis
knative.dev/pkg/apis/duck
@@ -1042,7 +1120,7 @@ knative.dev/pkg/webhook/psbinding
knative.dev/pkg/webhook/resourcesemantics
knative.dev/pkg/webhook/resourcesemantics/defaulting
knative.dev/pkg/webhook/resourcesemantics/validation
-# knative.dev/serving v0.41.1-0.20240626185720-a043ddf2770a
+# knative.dev/serving v0.42.0
## explicit; go 1.22
knative.dev/serving/pkg/apis/autoscaling
knative.dev/serving/pkg/apis/autoscaling/v1alpha1