diff --git a/go.mod b/go.mod index 57ccf19..f04b989 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,11 @@ module github.com/openfaas/cron-connector -go 1.13 +go 1.15 require ( - github.com/ewilde/faas-federation v0.0.0-20200206161705-23d8f6d639f1 // indirect - github.com/openfaas-incubator/connector-sdk v0.0.0-20190125151851-d722c9f72ad0 - github.com/openfaas/faas v0.0.0-20190104165101-a65df4795bc6 - github.com/openfaas/faas-provider v0.0.0-20181216160432-220324e98f5d // indirect - github.com/pkg/errors v0.8.1 + github.com/openfaas-incubator/connector-sdk v0.0.0-20200902074656-7f648543d4aa + github.com/openfaas/faas-cli v0.0.0-20201119120128-c9d284d0c5bd + github.com/openfaas/faas-provider v0.15.1 + github.com/pkg/errors v0.9.1 github.com/robfig/cron/v3 v3.0.0 - gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5 // indirect ) diff --git a/go.sum b/go.sum index 13c97e7..553ef25 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,172 @@ -github.com/ewilde/faas-federation v0.0.0-20200206161705-23d8f6d639f1/go.mod h1:KUZSQ/D7+BBykfBCx2cPn8LZdvRLPHwBLhKxF94jRWw= -github.com/openfaas-incubator/connector-sdk v0.0.0-20190125151851-d722c9f72ad0 h1:K7UydMie+pPYUtyA/Ele4xMst5q2D8yYPNg5NU2cs3U= -github.com/openfaas-incubator/connector-sdk v0.0.0-20190125151851-d722c9f72ad0/go.mod h1:jHCtd1HCZwhuwdPy4OB8CQU7ZqzxBdWNhIhH/khCJqQ= -github.com/openfaas/cron-connector v0.2.2/go.mod h1:G+g/i/KXbkd3A/W0FogoRnGxG0uOOhOkshPhuWWdUnQ= -github.com/openfaas/faas v0.0.0-20190104165101-a65df4795bc6 h1:eCf/7VwjpIOfLecWQoO0W9UMu1g5g9Ocwp+Sg7jkGzg= -github.com/openfaas/faas v0.0.0-20190104165101-a65df4795bc6/go.mod h1:E0m2rLup0Vvxg53BKxGgaYAGcZa3Xl+vvL7vSi5yQ14= -github.com/openfaas/faas-provider v0.0.0-20181216160432-220324e98f5d h1:mPAUhyVGlBfGVQQUngtyGEH9uidFUzq9R5QkrpqGJug= -github.com/openfaas/faas-provider v0.0.0-20181216160432-220324e98f5d/go.mod h1:W4OIp33RUOpR7wW+omJB/7GhIydRmYXvKf/VqUKI4yM= -github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alexellis/go-execute v0.0.0-20191207085904-961405ea7544/go.mod h1:zfRbgnPVxXCSpiKrg1CE72hNUWInqxExiaz2D9ppTts= +github.com/alexellis/hmac v0.0.0-20180624210714-d5d71edd7bc7/go.mod h1:uAbpy8G7sjNB4qYdY6ymf5OIQ+TLDPApBYiR0Vc3lhk= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/docker/docker v17.12.0-ce-rc1.0.20200618181300-9dc6525e6118+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/drone/envsubst v1.0.2 h1:dpYLMAspQHW0a8dZpLRKe9jCNvIGZPhCPrycZzIHdqo= +github.com/drone/envsubst v1.0.2/go.mod h1:bkZbnc/2vh1M12Ecn7EYScpI4YGYU0etwLJICOWi8Z0= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/gotestyourself/gotestyourself v1.4.0/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/openfaas-incubator/connector-sdk v0.0.0-20200902074656-7f648543d4aa h1:PHIdq6gnfkLLwdDnG3MWlDs2kNaAh61C0xom3qoL4d4= +github.com/openfaas-incubator/connector-sdk v0.0.0-20200902074656-7f648543d4aa/go.mod h1:xQK4clyPbZ1MfdIk3cU212TiS8VUwuX8RZvwtFBtacA= +github.com/openfaas/faas v0.0.0-20200422142642-18f6c720b50d h1:mlI37Hd4Htedk7iCdyp/J95RDCOLLXiRoO6JuXzUP6A= +github.com/openfaas/faas v0.0.0-20200422142642-18f6c720b50d/go.mod h1:E0m2rLup0Vvxg53BKxGgaYAGcZa3Xl+vvL7vSi5yQ14= +github.com/openfaas/faas-cli v0.0.0-20201119120128-c9d284d0c5bd h1:OfeOHjiVdwc16wNjdIU8Dg0eVeeItSKOi9KjNH1l3Aw= +github.com/openfaas/faas-cli v0.0.0-20201119120128-c9d284d0c5bd/go.mod h1:FpYBV5h19PQYfAX8m5+6sYNlUQs7SAEBPVc8NNulHFs= +github.com/openfaas/faas-provider v0.0.0-20191014125556-8699aa7d3999/go.mod h1:W4OIp33RUOpR7wW+omJB/7GhIydRmYXvKf/VqUKI4yM= +github.com/openfaas/faas-provider v0.15.1 h1:5M+DrGbuKlZxhN3/otFgLgBUnPQcYm6XosQDSbKXh30= +github.com/openfaas/faas-provider v0.15.1/go.mod h1:ChVioeB3snmfYxBTu7SrWGYLoYN9fZkQlnuEQ86a6l0= +github.com/openfaas/templates-sdk v0.0.0-20200723110415-a699ec277c12/go.mod h1:JWcVHdzlHcR7nLuaDL88Mz68wOqDvOn0CLO6t27OMhk= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/robfig/cron/v3 v3.0.0 h1:kQ6Cb7aHOHTSzNVNEhmp8EcWKLb4CbiMW9h9VyIhO4E= github.com/robfig/cron/v3 v3.0.0/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= -gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5 h1:E846t8CnR+lv5nE+VuiKTDG/v1U2stad0QzddfJC7kY= -gopkg.in/robfig/cron.v2 v2.0.0-20150107220207-be2e0b0deed5/go.mod h1:hiOFpYm0ZJbusNj2ywpbrXowU3G8U6GIQzqn2mw1UIE= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/ryanuber/go-glob v1.0.0 h1:iQh3xXAumdQ+4Ufa5b25cRpC5TYKlno6hsv6Cb3pkBk= +github.com/ryanuber/go-glob v1.0.0/go.mod h1:807d1WSdnB0XRJzKNil9Om6lcp/3a0v4qIHxIXzX/Yc= +github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.7/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/goleak v0.10.0 h1:G3eWbSNIskeRqtsN/1uI5B+eP73y3JUuBsv9AZjehb4= +go.uber.org/goleak v0.10.0/go.mod h1:VCZuO8V8mFPlL0F5J5GK1rtHV3DrFcQ1R8ryq7FK0aI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gotest.tools v1.4.0/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/main.go b/main.go index 2de23ff..4cbb0b6 100644 --- a/main.go +++ b/main.go @@ -4,22 +4,22 @@ package main import ( - "errors" + "context" "fmt" "log" + "net/http" "os" "time" "github.com/openfaas-incubator/connector-sdk/types" cfunction "github.com/openfaas/cron-connector/types" "github.com/openfaas/cron-connector/version" - "github.com/openfaas/faas/gateway/requests" + sdk "github.com/openfaas/faas-cli/proxy" + ptypes "github.com/openfaas/faas-provider/types" ) func main() { - creds := types.GetCredentials() config, err := getControllerConfig() - if err != nil { panic(err) } @@ -27,13 +27,13 @@ func main() { sha, ver := version.GetReleaseInfo() log.Printf("Version: %s\tCommit: %s", sha, ver) - controller := types.NewController(creds, config) + invoker := types.NewInvoker(config.GatewayURL, types.MakeClient(config.UpstreamTimeout), config.PrintResponse) cronScheduler := cfunction.NewScheduler() topic := "cron-function" interval := time.Second * 10 cronScheduler.Start() - err = startFunctionProbe(interval, topic, controller, cronScheduler, controller.Invoker) + err = startFunctionProbe(interval, topic, config, cronScheduler, invoker) if err != nil { panic(err) @@ -44,7 +44,7 @@ func getControllerConfig() (*types.ControllerConfig, error) { gURL, ok := os.LookupEnv("gateway_url") if !ok { - return nil, errors.New("Gateway URL not set") + return nil, fmt.Errorf("Gateway URL not set") } return &types.ControllerConfig{ @@ -54,54 +54,79 @@ func getControllerConfig() (*types.ControllerConfig, error) { }, nil } -func startFunctionProbe(interval time.Duration, topic string, c *types.Controller, cronScheduler *cfunction.Scheduler, invoker *types.Invoker) error { +//BasicAuth basic authentication for the the gateway +type BasicAuth struct { + Username string + Password string +} + +//Set set Authorization header on request +func (auth *BasicAuth) Set(req *http.Request) error { + req.SetBasicAuth(auth.Username, auth.Password) + return nil +} + +func startFunctionProbe(interval time.Duration, topic string, c *types.ControllerConfig, cronScheduler *cfunction.Scheduler, invoker *types.Invoker) error { runningFuncs := make(cfunction.ScheduledFunctions, 0) - lookupBuilder := cfunction.FunctionLookupBuilder{ - GatewayURL: c.Config.GatewayURL, - Client: types.MakeClient(c.Config.UpstreamTimeout), - Credentials: c.Credentials, + timeout := 3 * time.Second + auth := &BasicAuth{} + auth.Username = types.GetCredentials().User + auth.Password = types.GetCredentials().Password + + sdkClient, err := sdk.NewClient(auth, c.GatewayURL, nil, &timeout) + if err != nil { + panic(err) } + ctx := context.Background() + ticker := time.NewTicker(interval) defer ticker.Stop() for { <-ticker.C - functions, err := lookupBuilder.GetFunctions() + namespaces, err := sdkClient.ListNamespaces(ctx) if err != nil { - return errors.New(fmt.Sprint("Couldn't fetch Functions due to: ", err)) + return fmt.Errorf("Couldn't fetch Namespaces due to: %s", err) } - newCronFunctions := RequestsToCronFunctions(functions, topic) - addFuncs, deleteFuncs := GetNewAndDeleteFuncs(newCronFunctions, runningFuncs) + for _, namespace := range namespaces { + functions, err := sdkClient.ListFunctions(ctx, namespace) + if err != nil { + return fmt.Errorf("Couldn't fetch Functions due to: %s", err) + } - for _, function := range deleteFuncs { - cronScheduler.Remove(function) - log.Print("deleted function ", function.Function.Name) - } + newCronFunctions := RequestsToCronFunctions(functions, namespace, topic) + addFuncs, deleteFuncs := GetNewAndDeleteFuncs(newCronFunctions, runningFuncs, namespace) + + for _, function := range deleteFuncs { + cronScheduler.Remove(function) + log.Print("deleted function ", function.Function.Name, " in ", function.Function.Namespace) + } - newScheduledFuncs := make(cfunction.ScheduledFunctions, 0) + newScheduledFuncs := make(cfunction.ScheduledFunctions, 0) - for _, function := range addFuncs { - f, err := cronScheduler.AddCronFunction(function, invoker) - if err != nil { - log.Fatal("could not add function ", function.Name) + for _, function := range addFuncs { + f, err := cronScheduler.AddCronFunction(function, invoker) + if err != nil { + log.Fatal("could not add function ", function.Name, " in ", function.Namespace) + } + + newScheduledFuncs = append(newScheduledFuncs, f) + log.Print("added function ", function.Name, " in ", function.Namespace) } - newScheduledFuncs = append(newScheduledFuncs, f) - log.Print("added function ", function.Name) + runningFuncs = UpdateScheduledFunctions(runningFuncs, newScheduledFuncs, deleteFuncs) } - - runningFuncs = UpdateScheduledFunctions(runningFuncs, newScheduledFuncs, deleteFuncs) } } -// RequestsToCronFunctions converts an array of requests.Function object to CronFunction, ignoring those that cannot be converted -func RequestsToCronFunctions(functions []requests.Function, topic string) cfunction.CronFunctions { +// RequestsToCronFunctions converts an array of types.FunctionStatus object to CronFunction, ignoring those that cannot be converted +func RequestsToCronFunctions(functions []ptypes.FunctionStatus, namespace string, topic string) cfunction.CronFunctions { newCronFuncs := make(cfunction.CronFunctions, 0) for _, function := range functions { - cF, err := cfunction.ToCronFunction(function, topic) + cF, err := cfunction.ToCronFunction(function, namespace, topic) if err != nil { continue } @@ -111,7 +136,7 @@ func RequestsToCronFunctions(functions []requests.Function, topic string) cfunct } // GetNewAndDeleteFuncs takes new functions and running cron functions and returns functions that need to be added and that need to be deleted -func GetNewAndDeleteFuncs(newFuncs cfunction.CronFunctions, oldFuncs cfunction.ScheduledFunctions) (cfunction.CronFunctions, cfunction.ScheduledFunctions) { +func GetNewAndDeleteFuncs(newFuncs cfunction.CronFunctions, oldFuncs cfunction.ScheduledFunctions, namespace string) (cfunction.CronFunctions, cfunction.ScheduledFunctions) { addFuncs := make(cfunction.CronFunctions, 0) deleteFuncs := make(cfunction.ScheduledFunctions, 0) @@ -122,7 +147,7 @@ func GetNewAndDeleteFuncs(newFuncs cfunction.CronFunctions, oldFuncs cfunction.S } for _, function := range oldFuncs { - if !newFuncs.Contains(&function.Function) { + if !newFuncs.Contains(&function.Function) && function.Function.Namespace == namespace { deleteFuncs = append(deleteFuncs, function) } } diff --git a/main_test.go b/main_test.go index a5fb5cb..6a808a7 100644 --- a/main_test.go +++ b/main_test.go @@ -6,22 +6,22 @@ import ( "testing" cfunction "github.com/openfaas/cron-connector/types" - "github.com/openfaas/faas/gateway/requests" + ptypes "github.com/openfaas/faas-provider/types" ) func TestGetNewAndDeleteFuncs(t *testing.T) { newCronFunctions := make(cfunction.CronFunctions, 3) - defaultReq := requests.Function{} - newCronFunctions[0] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_unchanged", Schedule: "* * * * *"} - newCronFunctions[1] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_add", Schedule: "* * * * *"} - newCronFunctions[1] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_update", Schedule: "*/5 * * * *"} + defaultReq := ptypes.FunctionStatus{} + newCronFunctions[0] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_unchanged", Namespace: "openfaas-fn", Schedule: "* * * * *"} + newCronFunctions[1] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_add", Namespace: "openfaas-fn", Schedule: "* * * * *"} + newCronFunctions[2] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_update", Namespace: "openfaas-fn", Schedule: "*/5 * * * *"} oldFuncs := make(cfunction.ScheduledFunctions, 3) - oldFuncs[0] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_unchanged", Schedule: "* * * * *"}, ID: 0} - oldFuncs[1] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_delete", Schedule: "* * * * *"}, ID: 0} - oldFuncs[2] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_update", Schedule: "* * * * *"}, ID: 0} + oldFuncs[0] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_unchanged", Namespace: "openfaas-fn", Schedule: "* * * * *"}, ID: 0} + oldFuncs[1] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_delete", Namespace: "openfaas-fn", Schedule: "* * * * *"}, ID: 0} + oldFuncs[2] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_update", Namespace: "openfaas-fn", Schedule: "* * * * *"}, ID: 0} - addFuncs, deleteFuncs := GetNewAndDeleteFuncs(newCronFunctions, oldFuncs) + addFuncs, deleteFuncs := GetNewAndDeleteFuncs(newCronFunctions, oldFuncs, "openfaas-fn") if !deleteFuncs.Contains(&oldFuncs[1].Function) { t.Error("function was not deleted") } @@ -30,7 +30,37 @@ func TestGetNewAndDeleteFuncs(t *testing.T) { t.Error("function was not added") } - if !deleteFuncs.Contains(&oldFuncs[2].Function) && !addFuncs.Contains(&newCronFunctions[1]) { + if !deleteFuncs.Contains(&oldFuncs[2].Function) && !addFuncs.Contains(&newCronFunctions[2]) { + t.Error("function will not be updated") + } + + if addFuncs.Contains(&newCronFunctions[0]) || deleteFuncs.Contains(&newCronFunctions[0]) { + t.Error("function should be left as it is") + } +} + +func TestNamespaceFuncs(t *testing.T) { + newCronFunctions := make(cfunction.CronFunctions, 3) + defaultReq := ptypes.FunctionStatus{} + newCronFunctions[0] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_one", Namespace: "openfaas-fn", Schedule: "* * * * *"} + newCronFunctions[1] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_one", Namespace: "custom", Schedule: "* * * * *"} + newCronFunctions[2] = cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_update", Namespace: "openfaas-fn", Schedule: "*/5 * * * *"} + + oldFuncs := make(cfunction.ScheduledFunctions, 3) + oldFuncs[0] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_one", Namespace: "openfaas-fn", Schedule: "* * * * *"}, ID: 0} + oldFuncs[1] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_delete", Namespace: "openfaas-fn", Schedule: "* * * * *"}, ID: 0} + oldFuncs[2] = cfunction.ScheduledFunction{Function: cfunction.CronFunction{FuncData: defaultReq, Name: "test_function_to_update", Namespace: "openfaas-fn", Schedule: "* * * * *"}, ID: 0} + + addFuncs, deleteFuncs := GetNewAndDeleteFuncs(newCronFunctions, oldFuncs, "openfaas-fn") + if !deleteFuncs.Contains(&oldFuncs[1].Function) { + t.Error("function was not deleted") + } + + if !addFuncs.Contains(&newCronFunctions[1]) { + t.Error("function was not added") + } + + if !deleteFuncs.Contains(&oldFuncs[2].Function) && !addFuncs.Contains(&newCronFunctions[2]) { t.Error("function will not be updated") } diff --git a/types/cron_function.go b/types/cron_function.go index bf2bcf6..dd974ac 100644 --- a/types/cron_function.go +++ b/types/cron_function.go @@ -11,15 +11,16 @@ import ( "net/http" "github.com/openfaas-incubator/connector-sdk/types" - "github.com/openfaas/faas/gateway/requests" + ptypes "github.com/openfaas/faas-provider/types" "github.com/pkg/errors" ) // CronFunction depicts an OpenFaaS function which is invoked by cron type CronFunction struct { - FuncData requests.Function - Name string - Schedule string + FuncData ptypes.FunctionStatus + Name string + Namespace string + Schedule string } // CronFunctions a list of CronFunction @@ -30,7 +31,7 @@ func (c *CronFunctions) Contains(cF *CronFunction) bool { for _, f := range *c { - if f.Name == cF.Name && f.Schedule == cF.Schedule { + if f.Name == cF.Name && f.Namespace == cF.Namespace && f.Schedule == cF.Schedule { return true } @@ -39,8 +40,8 @@ func (c *CronFunctions) Contains(cF *CronFunction) bool { return false } -// ToCronFunction converts a requests.Function object to the CronFunction and returns error if it is not possible -func ToCronFunction(f requests.Function, topic string) (CronFunction, error) { +// ToCronFunction converts a ptypes.FunctionStatus object to the CronFunction and returns error if it is not possible +func ToCronFunction(f ptypes.FunctionStatus, namespace string, topic string) (CronFunction, error) { if f.Annotations == nil { return CronFunction{}, errors.New(fmt.Sprint(f.Name, " has no annotations.")) } @@ -58,13 +59,14 @@ func ToCronFunction(f requests.Function, topic string) (CronFunction, error) { var c CronFunction c.FuncData = f c.Name = f.Name + c.Namespace = namespace c.Schedule = fSchedule return c, nil } // InvokeFunction Invokes the cron function func (c CronFunction) InvokeFunction(i *types.Invoker) (*[]byte, error) { - gwURL := fmt.Sprintf("%s/function/%s", i.GatewayURL, c.Name) + gwURL := fmt.Sprintf("%s/function/%s.%s", i.GatewayURL, c.Name, c.Namespace) reader := bytes.NewReader(make([]byte, 0)) httpReq, _ := http.NewRequest(http.MethodPost, gwURL, reader) @@ -77,7 +79,7 @@ func (c CronFunction) InvokeFunction(i *types.Invoker) (*[]byte, error) { if doErr != nil { i.Responses <- types.InvokerResponse{ - Error: errors.Wrap(doErr, fmt.Sprint("unable to invoke ", c.Name)), + Error: errors.Wrap(doErr, fmt.Sprint("unable to invoke ", c.Name, " in ", c.Namespace)), } return nil, doErr } @@ -89,7 +91,7 @@ func (c CronFunction) InvokeFunction(i *types.Invoker) (*[]byte, error) { if readErr != nil { log.Printf("Error reading body") i.Responses <- types.InvokerResponse{ - Error: errors.Wrap(readErr, fmt.Sprint("unable to invoke ", c.Name)), + Error: errors.Wrap(readErr, fmt.Sprint("unable to invoke ", c.Name, " in ", c.Namespace)), } return nil, doErr } diff --git a/types/lookup_builder.go b/types/lookup_builder.go deleted file mode 100644 index bfe6728..0000000 --- a/types/lookup_builder.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) OpenFaaS Author(s) 2020. All rights reserved. -// Licensed under the MIT license. See LICENSE file in the project root for full license information. - -package types - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - - "github.com/openfaas-incubator/connector-sdk/types" - "github.com/openfaas/faas/gateway/requests" -) - -// FunctionLookupBuilder alias for types.FunctionLookupBuilder -type FunctionLookupBuilder types.FunctionLookupBuilder - -// GetFunctions requests the OpenFaaS gteway to return a list of all functions -func (lookup *FunctionLookupBuilder) GetFunctions() ([]requests.Function, error) { - req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/system/functions", lookup.GatewayURL), nil) - - if lookup.Credentials != nil { - req.SetBasicAuth(lookup.Credentials.User, lookup.Credentials.Password) - } - - res, reqErr := lookup.Client.Do(req) - - if reqErr != nil { - return nil, reqErr - } - - if res.Body != nil { - defer res.Body.Close() - } - - bytesOut, _ := ioutil.ReadAll(res.Body) - - functions := []requests.Function{} - marshalErr := json.Unmarshal(bytesOut, &functions) - - if marshalErr != nil { - return nil, marshalErr - } - - return functions, nil -} diff --git a/types/scheduler.go b/types/scheduler.go index 5adcf9c..4e4df1a 100644 --- a/types/scheduler.go +++ b/types/scheduler.go @@ -4,7 +4,6 @@ package types import ( - "fmt" "log" "github.com/openfaas-incubator/connector-sdk/types" @@ -39,7 +38,7 @@ type ScheduledFunctions []ScheduledFunction // AddCronFunction adds a function to cron func (s *Scheduler) AddCronFunction(c CronFunction, invoker *types.Invoker) (ScheduledFunction, error) { eID, err := s.main.AddFunc(c.Schedule, func() { - log.Print(fmt.Sprint("Executed function: ", c.Name)) + log.Printf("Executed function: %s (ns=%s)", c.Name, c.Namespace) c.InvokeFunction(invoker) }) return ScheduledFunction{c, EntryID(eID)}, err @@ -72,7 +71,7 @@ func CheckSchedule(schedule string) bool { func (functions *ScheduledFunctions) Contains(cronFunc *CronFunction) bool { for _, f := range *functions { - if f.Function.Name == cronFunc.Name && f.Function.Schedule == cronFunc.Schedule { + if f.Function.Name == cronFunc.Name && f.Function.Namespace == cronFunc.Namespace && f.Function.Schedule == cronFunc.Schedule { return true } diff --git a/vendor/github.com/drone/envsubst/.drone.yml b/vendor/github.com/drone/envsubst/.drone.yml new file mode 100644 index 0000000..2b06213 --- /dev/null +++ b/vendor/github.com/drone/envsubst/.drone.yml @@ -0,0 +1,8 @@ +kind: pipeline +name: default + +steps: +- name: build + image: golang:1.11 + commands: + - go test -v ./... diff --git a/vendor/github.com/drone/envsubst/.gitignore b/vendor/github.com/drone/envsubst/.gitignore new file mode 100644 index 0000000..2d83068 --- /dev/null +++ b/vendor/github.com/drone/envsubst/.gitignore @@ -0,0 +1 @@ +coverage.out diff --git a/vendor/github.com/drone/envsubst/LICENSE b/vendor/github.com/drone/envsubst/LICENSE new file mode 100644 index 0000000..1de55b7 --- /dev/null +++ b/vendor/github.com/drone/envsubst/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 drone.io + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/drone/envsubst/README b/vendor/github.com/drone/envsubst/README new file mode 100644 index 0000000..31c2ac5 --- /dev/null +++ b/vendor/github.com/drone/envsubst/README @@ -0,0 +1,33 @@ +Go package for expanding variables in a string using ${var} syntax. Includes support for bash string replacement functions. + +Documentation: + + http://godoc.org/github.com/drone/envsubst + +Supported Functions: + + ${var^} + ${var^^} + ${var,} + ${var,,} + ${var:position} + ${var:position:length} + ${var#substring} + ${var##substring} + ${var%substring} + ${var%%substring} + ${var/substring/replacement} + ${var//substring/replacement} + ${var/#substring/replacement} + ${var/%substring/replacement} + ${#var} + ${var=default} + ${var:=default} + ${var:-default} + +Unsupported Functions: + + ${var-default} + ${var+default} + ${var:?default} + ${var:+default} diff --git a/vendor/github.com/drone/envsubst/eval.go b/vendor/github.com/drone/envsubst/eval.go new file mode 100644 index 0000000..375ca4c --- /dev/null +++ b/vendor/github.com/drone/envsubst/eval.go @@ -0,0 +1,19 @@ +package envsubst + +import "os" + +// Eval replaces ${var} in the string based on the mapping function. +func Eval(s string, mapping func(string) string) (string, error) { + t, err := Parse(s) + if err != nil { + return s, err + } + return t.Execute(mapping) +} + +// EvalEnv replaces ${var} in the string according to the values of the +// current environment variables. References to undefined variables are +// replaced by the empty string. +func EvalEnv(s string) (string, error) { + return Eval(s, os.Getenv) +} diff --git a/vendor/github.com/drone/envsubst/funcs.go b/vendor/github.com/drone/envsubst/funcs.go new file mode 100644 index 0000000..94a3be1 --- /dev/null +++ b/vendor/github.com/drone/envsubst/funcs.go @@ -0,0 +1,228 @@ +package envsubst + +import ( + "strconv" + "strings" + "unicode" + "unicode/utf8" + + "github.com/drone/envsubst/path" +) + +// defines a parameter substitution function. +type substituteFunc func(string, ...string) string + +// toLen returns the length of string s. +func toLen(s string, args ...string) string { + return strconv.Itoa(len(s)) +} + +// toLower returns a copy of the string s with all characters +// mapped to their lower case. +func toLower(s string, args ...string) string { + return strings.ToLower(s) +} + +// toUpper returns a copy of the string s with all characters +// mapped to their upper case. +func toUpper(s string, args ...string) string { + return strings.ToUpper(s) +} + +// toLowerFirst returns a copy of the string s with the first +// character mapped to its lower case. +func toLowerFirst(s string, args ...string) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToLower(r)) + s[n:] +} + +// toUpperFirst returns a copy of the string s with the first +// character mapped to its upper case. +func toUpperFirst(s string, args ...string) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(unicode.ToUpper(r)) + s[n:] +} + +// toDefault returns a copy of the string s if not empty, else +// returns a copy of the first string arugment. +func toDefault(s string, args ...string) string { + if len(s) == 0 && len(args) == 1 { + s = args[0] + } + return s +} + +// toSubstr returns a slice of the string s at the specified +// length and position. +func toSubstr(s string, args ...string) string { + if len(args) == 0 { + return s // should never happen + } + + pos, err := strconv.Atoi(args[0]) + if err != nil { + // bash returns the string if the position + // cannot be parsed. + return s + } + + if len(args) == 1 { + if pos < len(s) { + return s[pos:] + } + // if the position exceeds the length of the + // string an empty string is returned + return "" + } + + length, err := strconv.Atoi(args[1]) + if err != nil { + // bash returns the string if the length + // cannot be parsed. + return s + } + + if pos+length >= len(s) { + // if the position exceeds the length of the + // string just return the rest of it like bash + return s[pos:] + } + + return s[pos : pos+length] +} + +// replaceAll returns a copy of the string s with all instances +// of the substring replaced with the replacement string. +func replaceAll(s string, args ...string) string { + switch len(args) { + case 0: + return s + case 1: + return strings.Replace(s, args[0], "", -1) + default: + return strings.Replace(s, args[0], args[1], -1) + } +} + +// replaceFirst returns a copy of the string s with the first +// instance of the substring replaced with the replacement string. +func replaceFirst(s string, args ...string) string { + switch len(args) { + case 0: + return s + case 1: + return strings.Replace(s, args[0], "", 1) + default: + return strings.Replace(s, args[0], args[1], 1) + } +} + +// replacePrefix returns a copy of the string s with the matching +// prefix replaced with the replacement string. +func replacePrefix(s string, args ...string) string { + if len(args) != 2 { + return s + } + if strings.HasPrefix(s, args[0]) { + return strings.Replace(s, args[0], args[1], 1) + } + return s +} + +// replaceSuffix returns a copy of the string s with the matching +// suffix replaced with the replacement string. +func replaceSuffix(s string, args ...string) string { + if len(args) != 2 { + return s + } + if strings.HasSuffix(s, args[0]) { + s = strings.TrimSuffix(s, args[0]) + s = s + args[1] + } + return s +} + +// TODO + +func trimShortestPrefix(s string, args ...string) string { + if len(args) != 0 { + s = trimShortest(s, args[0]) + } + return s +} + +func trimShortestSuffix(s string, args ...string) string { + if len(args) != 0 { + r := reverse(s) + rarg := reverse(args[0]) + s = reverse(trimShortest(r, rarg)) + } + return s +} + +func trimLongestPrefix(s string, args ...string) string { + if len(args) != 0 { + s = trimLongest(s, args[0]) + } + return s +} + +func trimLongestSuffix(s string, args ...string) string { + if len(args) != 0 { + r := reverse(s) + rarg := reverse(args[0]) + s = reverse(trimLongest(r, rarg)) + } + return s +} + +func trimShortest(s, arg string) string { + var shortestMatch string + for i := 0; i < len(s); i++ { + match, err := path.Match(arg, s[0:len(s)-i]) + + if err != nil { + return s + } + + if match { + shortestMatch = s[0 : len(s)-i] + } + } + + if shortestMatch != "" { + return strings.TrimPrefix(s, shortestMatch) + } + + return s +} + +func trimLongest(s, arg string) string { + for i := 0; i < len(s); i++ { + match, err := path.Match(arg, s[0:len(s)-i]) + + if err != nil { + return s + } + + if match { + return strings.TrimPrefix(s, s[0:len(s)-i]) + } + } + + return s +} + +func reverse(s string) string { + r := []rune(s) + for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 { + r[i], r[j] = r[j], r[i] + } + return string(r) +} diff --git a/vendor/github.com/drone/envsubst/go.mod b/vendor/github.com/drone/envsubst/go.mod new file mode 100644 index 0000000..7b2ba94 --- /dev/null +++ b/vendor/github.com/drone/envsubst/go.mod @@ -0,0 +1,3 @@ +module github.com/drone/envsubst + +require github.com/google/go-cmp v0.2.0 diff --git a/vendor/github.com/drone/envsubst/go.sum b/vendor/github.com/drone/envsubst/go.sum new file mode 100644 index 0000000..5f4f636 --- /dev/null +++ b/vendor/github.com/drone/envsubst/go.sum @@ -0,0 +1,2 @@ +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= diff --git a/vendor/github.com/drone/envsubst/parse/node.go b/vendor/github.com/drone/envsubst/parse/node.go new file mode 100644 index 0000000..09787eb --- /dev/null +++ b/vendor/github.com/drone/envsubst/parse/node.go @@ -0,0 +1,86 @@ +package parse + +// Node is an element in the parse tree. +type Node interface { + node() +} + +// empty string node +var empty = new(TextNode) + +// a template is represented by a tree consisting of one +// or more of the following nodes. +type ( + // TextNode represents a string of text. + TextNode struct { + Value string + } + + // FuncNode represents a string function. + FuncNode struct { + Param string + Name string + Args []Node + } + + // ListNode represents a list of nodes. + ListNode struct { + Nodes []Node + } + + // ParamNode struct{ + // Name string + // } + // + // CaseNode struct { + // Name string + // First bool + // } + // + // LowerNode struct { + // Name string + // First bool + // } + // + // SubstrNode struct { + // Name string + // Pos Node + // Len Node + // } + // + // ReplaceNode struct { + // Name string + // Substring Node + // Replacement Node + // } + // + // TrimNode struct{ + // + // } + // + // DefaultNode struct { + // Name string + // Default Node + // } +) + +// newTextNode returns a new TextNode. +func newTextNode(text string) *TextNode { + return &TextNode{Value: text} +} + +// newListNode returns a new ListNode. +func newListNode(nodes ...Node) *ListNode { + return &ListNode{Nodes: nodes} +} + +// newFuncNode returns a new FuncNode. +func newFuncNode(name string) *FuncNode { + return &FuncNode{Param: name} +} + +// node() defines the node in a parse tree + +func (*TextNode) node() {} +func (*ListNode) node() {} +func (*FuncNode) node() {} diff --git a/vendor/github.com/drone/envsubst/parse/parse.go b/vendor/github.com/drone/envsubst/parse/parse.go new file mode 100644 index 0000000..683b44d --- /dev/null +++ b/vendor/github.com/drone/envsubst/parse/parse.go @@ -0,0 +1,374 @@ +package parse + +import "errors" + +// ErrBadSubstitution represents a substitution parsing error. +var ErrBadSubstitution = errors.New("bad substitution") + +// Tree is the representation of a single parsed SQL statement. +type Tree struct { + Root Node + + // Parsing only; cleared after parse. + scanner *scanner +} + +// Parse parses the string and returns a Tree. +func Parse(buf string) (*Tree, error) { + t := new(Tree) + t.scanner = new(scanner) + return t.Parse(buf) +} + +// Parse parses the string buffer to construct an ast +// representation for expansion. +func (t *Tree) Parse(buf string) (tree *Tree, err error) { + t.scanner.init(buf) + t.Root, err = t.parseAny() + return t, err +} + +func (t *Tree) parseAny() (Node, error) { + t.scanner.accept = acceptRune + t.scanner.mode = scanIdent | scanLbrack | scanEscape + + switch t.scanner.scan() { + case tokenIdent: + left := newTextNode( + t.scanner.string(), + ) + right, err := t.parseAny() + switch { + case err != nil: + return nil, err + case right == empty: + return left, nil + } + return newListNode(left, right), nil + case tokenEOF: + return empty, nil + case tokenLbrack: + left, err := t.parseFunc() + if err != nil { + return nil, err + } + + right, err := t.parseAny() + switch { + case err != nil: + return nil, err + case right == empty: + return left, nil + } + return newListNode(left, right), nil + } + + return nil, ErrBadSubstitution +} + +func (t *Tree) parseFunc() (Node, error) { + switch t.scanner.peek() { + case '#': + return t.parseLenFunc() + } + + var name string + t.scanner.accept = acceptIdent + t.scanner.mode = scanIdent + + switch t.scanner.scan() { + case tokenIdent: + name = t.scanner.string() + default: + return nil, ErrBadSubstitution + } + + switch t.scanner.peek() { + case ':': + return t.parseDefaultOrSubstr(name) + case '=': + return t.parseDefaultFunc(name) + case ',', '^': + return t.parseCasingFunc(name) + case '/': + return t.parseReplaceFunc(name) + case '#': + return t.parseRemoveFunc(name, acceptHashFunc) + case '%': + return t.parseRemoveFunc(name, acceptPercentFunc) + } + + t.scanner.accept = acceptIdent + t.scanner.mode = scanRbrack + switch t.scanner.scan() { + case tokenRbrack: + return newFuncNode(name), nil + default: + return nil, ErrBadSubstitution + } +} + +// parse a substitution function parameter. +func (t *Tree) parseParam(accept acceptFunc, mode byte) (Node, error) { + t.scanner.accept = accept + t.scanner.mode = mode | scanLbrack + switch t.scanner.scan() { + case tokenLbrack: + return t.parseFunc() + case tokenIdent: + return newTextNode( + t.scanner.string(), + ), nil + default: + return nil, ErrBadSubstitution + } +} + +// parse either a default or substring substitution function. +func (t *Tree) parseDefaultOrSubstr(name string) (Node, error) { + t.scanner.read() + r := t.scanner.peek() + t.scanner.unread() + switch r { + case '=', '-', '?', '+': + return t.parseDefaultFunc(name) + default: + return t.parseSubstrFunc(name) + } +} + +// parses the ${param:offset} string function +// parses the ${param:offset:length} string function +func (t *Tree) parseSubstrFunc(name string) (Node, error) { + node := new(FuncNode) + node.Param = name + + t.scanner.accept = acceptOneColon + t.scanner.mode = scanIdent + switch t.scanner.scan() { + case tokenIdent: + node.Name = t.scanner.string() + default: + return nil, ErrBadSubstitution + } + + // scan arg[1] + { + param, err := t.parseParam(rejectColonClose, scanIdent) + if err != nil { + return nil, err + } + + // param.Value = t.scanner.string() + node.Args = append(node.Args, param) + } + + // expect delimiter or close + t.scanner.accept = acceptColon + t.scanner.mode = scanIdent | scanRbrack + switch t.scanner.scan() { + case tokenRbrack: + return node, nil + case tokenIdent: + // no-op + default: + return nil, ErrBadSubstitution + } + + // scan arg[2] + { + param, err := t.parseParam(acceptNotClosing, scanIdent) + if err != nil { + return nil, err + } + node.Args = append(node.Args, param) + } + + return node, t.consumeRbrack() +} + +// parses the ${param%word} string function +// parses the ${param%%word} string function +// parses the ${param#word} string function +// parses the ${param##word} string function +func (t *Tree) parseRemoveFunc(name string, accept acceptFunc) (Node, error) { + node := new(FuncNode) + node.Param = name + + t.scanner.accept = accept + t.scanner.mode = scanIdent + switch t.scanner.scan() { + case tokenIdent: + node.Name = t.scanner.string() + default: + return nil, ErrBadSubstitution + } + + // scan arg[1] + { + param, err := t.parseParam(acceptNotClosing, scanIdent) + if err != nil { + return nil, err + } + + // param.Value = t.scanner.string() + node.Args = append(node.Args, param) + } + + return node, t.consumeRbrack() +} + +// parses the ${param/pattern/string} string function +// parses the ${param//pattern/string} string function +// parses the ${param/#pattern/string} string function +// parses the ${param/%pattern/string} string function +func (t *Tree) parseReplaceFunc(name string) (Node, error) { + node := new(FuncNode) + node.Param = name + + t.scanner.accept = acceptReplaceFunc + t.scanner.mode = scanIdent + switch t.scanner.scan() { + case tokenIdent: + node.Name = t.scanner.string() + default: + return nil, ErrBadSubstitution + } + + // scan arg[1] + { + param, err := t.parseParam(acceptNotSlash, scanIdent|scanEscape) + if err != nil { + return nil, err + } + node.Args = append(node.Args, param) + } + + // expect delimiter + t.scanner.accept = acceptSlash + t.scanner.mode = scanIdent + switch t.scanner.scan() { + case tokenIdent: + // no-op + default: + return nil, ErrBadSubstitution + } + + // check for blank string + switch t.scanner.peek() { + case '}': + return node, t.consumeRbrack() + } + + // scan arg[2] + { + param, err := t.parseParam(acceptNotClosing, scanIdent|scanEscape) + if err != nil { + return nil, err + } + node.Args = append(node.Args, param) + } + + return node, t.consumeRbrack() +} + +// parses the ${parameter=word} string function +// parses the ${parameter:=word} string function +// parses the ${parameter:-word} string function +// parses the ${parameter:?word} string function +// parses the ${parameter:+word} string function +func (t *Tree) parseDefaultFunc(name string) (Node, error) { + node := new(FuncNode) + node.Param = name + + t.scanner.accept = acceptDefaultFunc + if t.scanner.peek() == '=' { + t.scanner.accept = acceptOneEqual + } + t.scanner.mode = scanIdent + switch t.scanner.scan() { + case tokenIdent: + node.Name = t.scanner.string() + default: + return nil, ErrBadSubstitution + } + + // scan arg[1] + { + param, err := t.parseParam(acceptNotClosing, scanIdent) + if err != nil { + return nil, err + } + + // param.Value = t.scanner.string() + node.Args = append(node.Args, param) + } + + return node, t.consumeRbrack() +} + +// parses the ${param,} string function +// parses the ${param,,} string function +// parses the ${param^} string function +// parses the ${param^^} string function +func (t *Tree) parseCasingFunc(name string) (Node, error) { + node := new(FuncNode) + node.Param = name + + t.scanner.accept = acceptCasingFunc + t.scanner.mode = scanIdent + switch t.scanner.scan() { + case tokenIdent: + node.Name = t.scanner.string() + default: + return nil, ErrBadSubstitution + } + + return node, t.consumeRbrack() +} + +// parses the ${#param} string function +func (t *Tree) parseLenFunc() (Node, error) { + node := new(FuncNode) + + t.scanner.accept = acceptOneHash + t.scanner.mode = scanIdent + switch t.scanner.scan() { + case tokenIdent: + node.Name = t.scanner.string() + default: + return nil, ErrBadSubstitution + } + + t.scanner.accept = acceptIdent + t.scanner.mode = scanIdent + switch t.scanner.scan() { + case tokenIdent: + node.Param = t.scanner.string() + default: + return nil, ErrBadSubstitution + } + + return node, t.consumeRbrack() +} + +// consumeRbrack consumes a right closing bracket. If a closing +// bracket token is not consumed an ErrBadSubstitution is returned. +func (t *Tree) consumeRbrack() error { + t.scanner.mode = scanRbrack + if t.scanner.scan() != tokenRbrack { + return ErrBadSubstitution + } + return nil +} + +// consumeDelimiter consumes a function argument delimiter. If a +// delimiter is not consumed an ErrBadSubstitution is returned. +// func (t *Tree) consumeDelimiter(accept acceptFunc, mode uint) error { +// t.scanner.accept = accept +// t.scanner.mode = mode +// if t.scanner.scan() != tokenRbrack { +// return ErrBadSubstitution +// } +// return nil +// } diff --git a/vendor/github.com/drone/envsubst/parse/scan.go b/vendor/github.com/drone/envsubst/parse/scan.go new file mode 100644 index 0000000..94812c1 --- /dev/null +++ b/vendor/github.com/drone/envsubst/parse/scan.go @@ -0,0 +1,280 @@ +package parse + +import ( + "unicode" + "unicode/utf8" +) + +// eof rune sent when end of file is reached +var eof = rune(0) + +// token is a lexical token. +type token uint + +// list of lexical tokens. +const ( + // special tokens + tokenIllegal token = iota + tokenEOF + + // identifiers and literals + tokenIdent + + // operators and delimiters + tokenLbrack + tokenRbrack + tokenQuote +) + +// predefined mode bits to control recognition of tokens. +const ( + scanIdent byte = 1 << iota + scanLbrack + scanRbrack + scanEscape +) + +// returns true if rune is accepted. +type acceptFunc func(r rune, i int) bool + +// scanner implements a lexical scanner that reads unicode +// characters and tokens from a string buffer. +type scanner struct { + buf string + pos int + start int + width int + mode byte + + accept acceptFunc +} + +// init initializes a scanner with a new buffer. +func (s *scanner) init(buf string) { + s.buf = buf + s.pos = 0 + s.start = 0 + s.width = 0 + s.accept = nil +} + +// read returns the next unicode character. It returns eof at +// the end of the string buffer. +func (s *scanner) read() rune { + if s.pos >= len(s.buf) { + s.width = 0 + return eof + } + r, w := utf8.DecodeRuneInString(s.buf[s.pos:]) + s.width = w + s.pos += s.width + return r +} + +func (s *scanner) unread() { + s.pos -= s.width +} + +// skip skips over the curring unicode character in the buffer +// by slicing and removing from the buffer. +func (s *scanner) skip() { + l := s.buf[:s.pos-1] + r := s.buf[s.pos:] + s.buf = l + r +} + +// peek returns the next unicode character in the buffer without +// advancing the scanner. It returns eof if the scanner's position +// is at the last character of the source. +func (s *scanner) peek() rune { + r := s.read() + s.unread() + return r +} + +// string returns the string corresponding to the most recently +// scanned token. Valid after calling scan(). +func (s *scanner) string() string { + return s.buf[s.start:s.pos] +} + +// scan reads the next token or Unicode character from source and +// returns it. It returns EOF at the end of the source. +func (s *scanner) scan() token { + s.start = s.pos + r := s.read() + switch { + case r == eof: + return tokenEOF + case s.scanLbrack(r): + return tokenLbrack + case s.scanRbrack(r): + return tokenRbrack + case s.scanIdent(r): + return tokenIdent + } + return tokenIllegal +} + +// scanIdent reads the next token or Unicode character from source +// and returns true if the Ident character is accepted. +func (s *scanner) scanIdent(r rune) bool { + if s.mode&scanIdent == 0 { + return false + } + if s.scanEscaped(r) { + s.skip() + } else if !s.accept(r, s.pos-s.start) { + return false + } +loop: + for { + r := s.read() + switch { + case r == eof: + s.unread() + break loop + case s.scanLbrack(r): + s.unread() + s.unread() + break loop + } + if s.scanEscaped(r) { + s.skip() + continue + } + if !s.accept(r, s.pos-s.start) { + s.unread() + break loop + } + } + return true +} + +// scanLbrack reads the next token or Unicode character from source +// and returns true if the open bracket is encountered. +func (s *scanner) scanLbrack(r rune) bool { + if s.mode&scanLbrack == 0 { + return false + } + if r == '$' { + if s.read() == '{' { + return true + } + s.unread() + } + return false +} + +// scanRbrack reads the next token or Unicode character from source +// and returns true if the closing bracket is encountered. +func (s *scanner) scanRbrack(r rune) bool { + if s.mode&scanRbrack == 0 { + return false + } + return r == '}' +} + +// scanEscaped reads the next token or Unicode character from source +// and returns true if it being escaped and should be sipped. +func (s *scanner) scanEscaped(r rune) bool { + if s.mode&scanEscape == 0 { + return false + } + if r == '$' { + if s.peek() == '$' { + return true + } + } + if r != '\\' { + return false + } + switch s.peek() { + case '/', '\\': + return true + default: + return false + } +} + +// +// scanner functions accept or reject runes. +// + +func acceptRune(r rune, i int) bool { + return true +} + +func acceptIdent(r rune, i int) bool { + return unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' +} + +func acceptColon(r rune, i int) bool { + return r == ':' +} + +func acceptOneHash(r rune, i int) bool { + return r == '#' && i == 1 +} + +func acceptNone(r rune, i int) bool { + return false +} + +func acceptNotClosing(r rune, i int) bool { + return r != '}' +} + +func acceptHashFunc(r rune, i int) bool { + return r == '#' && i < 3 +} + +func acceptPercentFunc(r rune, i int) bool { + return r == '%' && i < 3 +} + +func acceptDefaultFunc(r rune, i int) bool { + switch { + case i == 1 && r == ':': + return true + case i == 2 && (r == '=' || r == '-' || r == '?' || r == '+'): + return true + default: + return false + } +} + +func acceptReplaceFunc(r rune, i int) bool { + switch { + case i == 1 && r == '/': + return true + case i == 2 && (r == '/' || r == '#' || r == '%'): + return true + default: + return false + } +} + +func acceptOneEqual(r rune, i int) bool { + return i == 1 && r == '=' +} + +func acceptOneColon(r rune, i int) bool { + return i == 1 && r == ':' +} + +func rejectColonClose(r rune, i int) bool { + return r != ':' && r != '}' +} + +func acceptSlash(r rune, i int) bool { + return r == '/' +} + +func acceptNotSlash(r rune, i int) bool { + return r != '/' +} + +func acceptCasingFunc(r rune, i int) bool { + return (r == ',' || r == '^') && i < 3 +} diff --git a/vendor/github.com/drone/envsubst/path/match.go b/vendor/github.com/drone/envsubst/path/match.go new file mode 100644 index 0000000..9306b0c --- /dev/null +++ b/vendor/github.com/drone/envsubst/path/match.go @@ -0,0 +1,207 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package path + +import ( + "errors" + "unicode/utf8" +) + +// ErrBadPattern indicates a globbing pattern was malformed. +var ErrBadPattern = errors.New("syntax error in pattern") + +// Match reports whether name matches the shell file name pattern. +// The pattern syntax is: +// +// pattern: +// { term } +// term: +// '*' matches any sequence of non-/ characters +// '?' matches any single non-/ character +// '[' [ '^' ] { character-range } ']' +// character class (must be non-empty) +// c matches character c (c != '*', '?', '\\', '[') +// '\\' c matches character c +// +// character-range: +// c matches character c (c != '\\', '-', ']') +// '\\' c matches character c +// lo '-' hi matches character c for lo <= c <= hi +// +// Match requires pattern to match all of name, not just a substring. +// The only possible returned error is ErrBadPattern, when pattern +// is malformed. +// +func Match(pattern, name string) (matched bool, err error) { +Pattern: + for len(pattern) > 0 { + var star bool + var chunk string + star, chunk, pattern = scanChunk(pattern) + if star && chunk == "" { + // Trailing * matches rest of string unless it has a /. + // return !strings.Contains(name, "/"), nil + + // Return rest of string + return true, nil + } + // Look for match at current position. + t, ok, err := matchChunk(chunk, name) + // if we're the last chunk, make sure we've exhausted the name + // otherwise we'll give a false result even if we could still match + // using the star + if ok && (len(t) == 0 || len(pattern) > 0) { + name = t + continue + } + if err != nil { + return false, err + } + if star { + // Look for match skipping i+1 bytes. + for i := 0; i < len(name); i++ { + t, ok, err := matchChunk(chunk, name[i+1:]) + if ok { + // if we're the last chunk, make sure we exhausted the name + if len(pattern) == 0 && len(t) > 0 { + continue + } + name = t + continue Pattern + } + if err != nil { + return false, err + } + } + } + return false, nil + } + return len(name) == 0, nil +} + +// scanChunk gets the next segment of pattern, which is a non-star string +// possibly preceded by a star. +func scanChunk(pattern string) (star bool, chunk, rest string) { + for len(pattern) > 0 && pattern[0] == '*' { + pattern = pattern[1:] + star = true + } + inrange := false + var i int +Scan: + for i = 0; i < len(pattern); i++ { + switch pattern[i] { + case '\\': + // error check handled in matchChunk: bad pattern. + if i+1 < len(pattern) { + i++ + } + case '[': + inrange = true + case ']': + inrange = false + case '*': + if !inrange { + break Scan + } + } + } + return star, pattern[0:i], pattern[i:] +} + +// matchChunk checks whether chunk matches the beginning of s. +// If so, it returns the remainder of s (after the match). +// Chunk is all single-character operators: literals, char classes, and ?. +func matchChunk(chunk, s string) (rest string, ok bool, err error) { + for len(chunk) > 0 { + if len(s) == 0 { + return + } + switch chunk[0] { + case '[': + // character class + r, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + // possibly negated + notNegated := true + if len(chunk) > 0 && chunk[0] == '^' { + notNegated = false + chunk = chunk[1:] + } + // parse all ranges + match := false + nrange := 0 + for { + if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 { + chunk = chunk[1:] + break + } + var lo, hi rune + if lo, chunk, err = getEsc(chunk); err != nil { + return + } + hi = lo + if chunk[0] == '-' { + if hi, chunk, err = getEsc(chunk[1:]); err != nil { + return + } + } + if lo <= r && r <= hi { + match = true + } + nrange++ + } + if match != notNegated { + return + } + + case '?': + _, n := utf8.DecodeRuneInString(s) + s = s[n:] + chunk = chunk[1:] + + case '\\': + chunk = chunk[1:] + if len(chunk) == 0 { + err = ErrBadPattern + return + } + fallthrough + + default: + if chunk[0] != s[0] { + return + } + s = s[1:] + chunk = chunk[1:] + } + } + return s, true, nil +} + +// getEsc gets a possibly-escaped character from chunk, for a character class. +func getEsc(chunk string) (r rune, nchunk string, err error) { + if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' { + err = ErrBadPattern + return + } + if chunk[0] == '\\' { + chunk = chunk[1:] + if len(chunk) == 0 { + err = ErrBadPattern + return + } + } + r, n := utf8.DecodeRuneInString(chunk) + if r == utf8.RuneError && n == 1 { + err = ErrBadPattern + } + nchunk = chunk[n:] + if len(nchunk) == 0 { + err = ErrBadPattern + } + return +} diff --git a/vendor/github.com/drone/envsubst/template.go b/vendor/github.com/drone/envsubst/template.go new file mode 100644 index 0000000..b725d4f --- /dev/null +++ b/vendor/github.com/drone/envsubst/template.go @@ -0,0 +1,157 @@ +package envsubst + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/drone/envsubst/parse" +) + +// state represents the state of template execution. It is not part of the +// template so that multiple executions can run in parallel. +type state struct { + template *Template + writer io.Writer + node parse.Node // current node + + // maps variable names to values + mapper func(string) string +} + +// Template is the representation of a parsed shell format string. +type Template struct { + tree *parse.Tree +} + +// Parse creates a new shell format template and parses the template +// definition from string s. +func Parse(s string) (t *Template, err error) { + t = new(Template) + t.tree, err = parse.Parse(s) + if err != nil { + return nil, err + } + return t, nil +} + +// ParseFile creates a new shell format template and parses the template +// definition from the named file. +func ParseFile(path string) (*Template, error) { + b, err := ioutil.ReadFile(path) + if err != nil { + return nil, err + } + return Parse(string(b)) +} + +// Execute applies a parsed template to the specified data mapping. +func (t *Template) Execute(mapping func(string) string) (str string, err error) { + b := new(bytes.Buffer) + s := new(state) + s.node = t.tree.Root + s.mapper = mapping + s.writer = b + err = t.eval(s) + if err != nil { + return + } + return b.String(), nil +} + +func (t *Template) eval(s *state) (err error) { + switch node := s.node.(type) { + case *parse.TextNode: + err = t.evalText(s, node) + case *parse.FuncNode: + err = t.evalFunc(s, node) + case *parse.ListNode: + err = t.evalList(s, node) + } + return err +} + +func (t *Template) evalText(s *state, node *parse.TextNode) error { + _, err := io.WriteString(s.writer, node.Value) + return err +} + +func (t *Template) evalList(s *state, node *parse.ListNode) (err error) { + for _, n := range node.Nodes { + s.node = n + err = t.eval(s) + if err != nil { + return err + } + } + return nil +} + +func (t *Template) evalFunc(s *state, node *parse.FuncNode) error { + var w = s.writer + var buf bytes.Buffer + var args []string + for _, n := range node.Args { + buf.Reset() + s.writer = &buf + s.node = n + err := t.eval(s) + if err != nil { + return err + } + args = append(args, buf.String()) + } + + // restore the origin writer + s.writer = w + s.node = node + + v := s.mapper(node.Param) + + fn := lookupFunc(node.Name, len(args)) + + _, err := io.WriteString(s.writer, fn(v, args...)) + return err +} + +// lookupFunc returns the parameters substitution function by name. If the +// named function does not exists, a default function is returned. +func lookupFunc(name string, args int) substituteFunc { + switch name { + case ",": + return toLowerFirst + case ",,": + return toLower + case "^": + return toUpperFirst + case "^^": + return toUpper + case "#": + if args == 0 { + return toLen + } + return trimShortestPrefix + case "##": + return trimLongestPrefix + case "%": + return trimShortestSuffix + case "%%": + return trimLongestSuffix + case ":": + return toSubstr + case "/#": + return replacePrefix + case "/%": + return replaceSuffix + case "/": + return replaceFirst + case "//": + return replaceAll + case "=", ":=", ":-": + return toDefault + case ":?", ":+", "-", "+": + return toDefault + default: + return toDefault + } +} diff --git a/vendor/github.com/mitchellh/go-homedir/LICENSE b/vendor/github.com/mitchellh/go-homedir/LICENSE new file mode 100644 index 0000000..f9c841a --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-homedir/README.md b/vendor/github.com/mitchellh/go-homedir/README.md new file mode 100644 index 0000000..d70706d --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/README.md @@ -0,0 +1,14 @@ +# go-homedir + +This is a Go library for detecting the user's home directory without +the use of cgo, so the library can be used in cross-compilation environments. + +Usage is incredibly simple, just call `homedir.Dir()` to get the home directory +for a user, and `homedir.Expand()` to expand the `~` in a path to the home +directory. + +**Why not just use `os/user`?** The built-in `os/user` package requires +cgo on Darwin systems. This means that any Go code that uses that package +cannot cross compile. But 99% of the time the use for `os/user` is just to +retrieve the home directory, which we can do for the current user without +cgo. This library does that, enabling cross-compilation. diff --git a/vendor/github.com/mitchellh/go-homedir/go.mod b/vendor/github.com/mitchellh/go-homedir/go.mod new file mode 100644 index 0000000..7efa09a --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/go-homedir diff --git a/vendor/github.com/mitchellh/go-homedir/homedir.go b/vendor/github.com/mitchellh/go-homedir/homedir.go new file mode 100644 index 0000000..2537853 --- /dev/null +++ b/vendor/github.com/mitchellh/go-homedir/homedir.go @@ -0,0 +1,167 @@ +package homedir + +import ( + "bytes" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" +) + +// DisableCache will disable caching of the home directory. Caching is enabled +// by default. +var DisableCache bool + +var homedirCache string +var cacheLock sync.RWMutex + +// Dir returns the home directory for the executing user. +// +// This uses an OS-specific method for discovering the home directory. +// An error is returned if a home directory cannot be detected. +func Dir() (string, error) { + if !DisableCache { + cacheLock.RLock() + cached := homedirCache + cacheLock.RUnlock() + if cached != "" { + return cached, nil + } + } + + cacheLock.Lock() + defer cacheLock.Unlock() + + var result string + var err error + if runtime.GOOS == "windows" { + result, err = dirWindows() + } else { + // Unix-like system, so just assume Unix + result, err = dirUnix() + } + + if err != nil { + return "", err + } + homedirCache = result + return result, nil +} + +// Expand expands the path to include the home directory if the path +// is prefixed with `~`. If it isn't prefixed with `~`, the path is +// returned as-is. +func Expand(path string) (string, error) { + if len(path) == 0 { + return path, nil + } + + if path[0] != '~' { + return path, nil + } + + if len(path) > 1 && path[1] != '/' && path[1] != '\\' { + return "", errors.New("cannot expand user-specific home dir") + } + + dir, err := Dir() + if err != nil { + return "", err + } + + return filepath.Join(dir, path[1:]), nil +} + +// Reset clears the cache, forcing the next call to Dir to re-detect +// the home directory. This generally never has to be called, but can be +// useful in tests if you're modifying the home directory via the HOME +// env var or something. +func Reset() { + cacheLock.Lock() + defer cacheLock.Unlock() + homedirCache = "" +} + +func dirUnix() (string, error) { + homeEnv := "HOME" + if runtime.GOOS == "plan9" { + // On plan9, env vars are lowercase. + homeEnv = "home" + } + + // First prefer the HOME environmental variable + if home := os.Getenv(homeEnv); home != "" { + return home, nil + } + + var stdout bytes.Buffer + + // If that fails, try OS specific commands + if runtime.GOOS == "darwin" { + cmd := exec.Command("sh", "-c", `dscl -q . -read /Users/"$(whoami)" NFSHomeDirectory | sed 's/^[^ ]*: //'`) + cmd.Stdout = &stdout + if err := cmd.Run(); err == nil { + result := strings.TrimSpace(stdout.String()) + if result != "" { + return result, nil + } + } + } else { + cmd := exec.Command("getent", "passwd", strconv.Itoa(os.Getuid())) + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + // If the error is ErrNotFound, we ignore it. Otherwise, return it. + if err != exec.ErrNotFound { + return "", err + } + } else { + if passwd := strings.TrimSpace(stdout.String()); passwd != "" { + // username:password:uid:gid:gecos:home:shell + passwdParts := strings.SplitN(passwd, ":", 7) + if len(passwdParts) > 5 { + return passwdParts[5], nil + } + } + } + } + + // If all else fails, try the shell + stdout.Reset() + cmd := exec.Command("sh", "-c", "cd && pwd") + cmd.Stdout = &stdout + if err := cmd.Run(); err != nil { + return "", err + } + + result := strings.TrimSpace(stdout.String()) + if result == "" { + return "", errors.New("blank output when reading home directory") + } + + return result, nil +} + +func dirWindows() (string, error) { + // First prefer the HOME environmental variable + if home := os.Getenv("HOME"); home != "" { + return home, nil + } + + // Prefer standard environment variable USERPROFILE + if home := os.Getenv("USERPROFILE"); home != "" { + return home, nil + } + + drive := os.Getenv("HOMEDRIVE") + path := os.Getenv("HOMEPATH") + home := drive + path + if drive == "" || path == "" { + return "", errors.New("HOMEDRIVE, HOMEPATH, or USERPROFILE are blank") + } + + return home, nil +} diff --git a/vendor/github.com/openfaas-incubator/connector-sdk/types/controller.go b/vendor/github.com/openfaas-incubator/connector-sdk/types/controller.go index 4bd8533..ddd7e76 100644 --- a/vendor/github.com/openfaas-incubator/connector-sdk/types/controller.go +++ b/vendor/github.com/openfaas-incubator/connector-sdk/types/controller.go @@ -1,6 +1,11 @@ +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + package types import ( + "context" + "fmt" "log" "sync" "time" @@ -24,11 +29,29 @@ type ControllerConfig struct { // RebuildInterval the interval at which the topic map is rebuilt RebuildInterval time.Duration + + // TopicAnnotationDelimiter defines the character upon which to split the Topic annotation value + TopicAnnotationDelimiter string + + // AsyncFunctionInvocation if true points to the asynchronous function route + AsyncFunctionInvocation bool + + // PrintSync indicates whether the sync should be logged. + PrintSync bool +} + +// Controller is used to invoke functions on a per-topic basis and to subscribe to responses returned by said functions. +type Controller interface { + Subscribe(subscriber ResponseSubscriber) + Invoke(topic string, message *[]byte) + InvokeWithContext(ctx context.Context, topic string, message *[]byte) + BeginMapBuilder() + Topics() []string } -// Controller for the connector SDK -type Controller struct { - // Config for the Controller +// controller is the default implementation of the Controller interface. +type controller struct { + // Config for the controller Config *ControllerConfig // Invoker to invoke functions via HTTP(s) @@ -50,9 +73,11 @@ type Controller struct { } // NewController create a new connector SDK controller -func NewController(credentials *auth.BasicAuthCredentials, config *ControllerConfig) *Controller { +func NewController(credentials *auth.BasicAuthCredentials, config *ControllerConfig) Controller { - invoker := NewInvoker(config.GatewayURL, + gatewayFunctionPath := gatewayRoute(config) + + invoker := NewInvoker(gatewayFunctionPath, MakeClient(config.UpstreamTimeout), config.PrintResponse) @@ -60,7 +85,7 @@ func NewController(credentials *auth.BasicAuthCredentials, config *ControllerCon topicMap := NewTopicMap() - controller := Controller{ + c := controller{ Config: config, Invoker: invoker, TopicMap: &topicMap, @@ -71,10 +96,10 @@ func NewController(credentials *auth.BasicAuthCredentials, config *ControllerCon if config.PrintResponse { // printer := &{} - controller.Subscribe(&ResponsePrinter{config.PrintResponseBody}) + c.Subscribe(&ResponsePrinter{config.PrintResponseBody}) } - go func(ch *chan InvokerResponse, controller *Controller) { + go func(ch *chan InvokerResponse, controller *controller) { for { res := <-*ch @@ -84,16 +109,16 @@ func NewController(credentials *auth.BasicAuthCredentials, config *ControllerCon } controller.Lock.RUnlock() } - }(&invoker.Responses, &controller) + }(&invoker.Responses, &c) - return &controller + return &c } // Subscribe adds a ResponseSubscriber to the list of subscribers // which receive messages upon function invocation or error // Note: it is not possible to Unsubscribe at this point using -// the API of the Controller -func (c *Controller) Subscribe(subscriber ResponseSubscriber) { +// the API of the controller +func (c *controller) Subscribe(subscriber ResponseSubscriber) { c.Lock.Lock() defer c.Lock.Unlock() c.Subscribers = append(c.Subscribers, subscriber) @@ -101,36 +126,64 @@ func (c *Controller) Subscribe(subscriber ResponseSubscriber) { // Invoke attempts to invoke any functions which match the // topic the incoming message was published on. -func (c *Controller) Invoke(topic string, message *[]byte) { - c.Invoker.Invoke(c.TopicMap, topic, message) +func (c *controller) Invoke(topic string, message *[]byte) { + c.InvokeWithContext(context.Background(), topic, message) +} + +// InvokeWithContext attempts to invoke any functions which match the topic +// the incoming message was published on while propagating context. +func (c *controller) InvokeWithContext(ctx context.Context, topic string, message *[]byte) { + c.Invoker.InvokeWithContext(ctx, c.TopicMap, topic, message) } // BeginMapBuilder begins to build a map of function->topic by // querying the API gateway. -func (c *Controller) BeginMapBuilder() { +func (c *controller) BeginMapBuilder() { lookupBuilder := FunctionLookupBuilder{ - GatewayURL: c.Config.GatewayURL, - Client: MakeClient(c.Config.UpstreamTimeout), - Credentials: c.Credentials, + GatewayURL: c.Config.GatewayURL, + Client: MakeClient(c.Config.UpstreamTimeout), + Credentials: c.Credentials, + TopicDelimiter: c.Config.TopicAnnotationDelimiter, } ticker := time.NewTicker(c.Config.RebuildInterval) - go synchronizeLookups(ticker, &lookupBuilder, c.TopicMap) + go c.synchronizeLookups(ticker, &lookupBuilder, c.TopicMap) } -func synchronizeLookups(ticker *time.Ticker, +func (c *controller) synchronizeLookups(ticker *time.Ticker, lookupBuilder *FunctionLookupBuilder, topicMap *TopicMap) { - for { - <-ticker.C + fn := func() { lookups, err := lookupBuilder.Build() if err != nil { log.Fatalln(err) } - log.Println("Syncing topic map") + if c.Config.PrintSync { + log.Println("Syncing topic map") + } + topicMap.Sync(&lookups) } + + fn() + for { + <-ticker.C + fn() + } +} + +// Topics gets the list of topics that functions have indicated should +// be used as triggers. +func (c *controller) Topics() []string { + return c.TopicMap.Topics() +} + +func gatewayRoute(config *ControllerConfig) string { + if config.AsyncFunctionInvocation == true { + return fmt.Sprintf("%s/%s", config.GatewayURL, "async-function") + } + return fmt.Sprintf("%s/%s", config.GatewayURL, "function") } diff --git a/vendor/github.com/openfaas-incubator/connector-sdk/types/credentials.go b/vendor/github.com/openfaas-incubator/connector-sdk/types/credentials.go index bf4684d..2e9a859 100644 --- a/vendor/github.com/openfaas-incubator/connector-sdk/types/credentials.go +++ b/vendor/github.com/openfaas-incubator/connector-sdk/types/credentials.go @@ -1,3 +1,6 @@ +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + package types import ( diff --git a/vendor/github.com/openfaas-incubator/connector-sdk/types/function_list_builder.go b/vendor/github.com/openfaas-incubator/connector-sdk/types/function_list_builder.go index 0f4ffc1..507cc07 100644 --- a/vendor/github.com/openfaas-incubator/connector-sdk/types/function_list_builder.go +++ b/vendor/github.com/openfaas-incubator/connector-sdk/types/function_list_builder.go @@ -1,4 +1,4 @@ -// Copyright (c) OpenFaaS Project 2018. All rights reserved. +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. package types @@ -8,26 +8,78 @@ import ( "fmt" "io/ioutil" "net/http" + "net/url" + "strings" "github.com/openfaas/faas-provider/auth" - "github.com/openfaas/faas/gateway/requests" + "github.com/openfaas/faas-provider/types" + "github.com/pkg/errors" ) // FunctionLookupBuilder builds a list of OpenFaaS functions type FunctionLookupBuilder struct { - GatewayURL string - Client *http.Client - Credentials *auth.BasicAuthCredentials + GatewayURL string + Client *http.Client + Credentials *auth.BasicAuthCredentials + TopicDelimiter string } -// Build compiles a map of topic names and functions that have -// advertised to receive messages on said topic -func (s *FunctionLookupBuilder) Build() (map[string][]string, error) { - var err error - serviceMap := make(map[string][]string) +//getNamespaces get openfaas namespaces +func (s *FunctionLookupBuilder) getNamespaces() ([]string, error) { + var ( + err error + namespaces []string + ) + req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/system/namespaces", s.GatewayURL), nil) + if err != nil { + return namespaces, err + } - req, _ := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/system/functions", s.GatewayURL), nil) + if s.Credentials != nil { + req.SetBasicAuth(s.Credentials.User, s.Credentials.Password) + } + res, err := s.Client.Do(req) + if err != nil { + return namespaces, err + } + + if res.Body != nil { + defer res.Body.Close() + } + + if res.StatusCode != http.StatusNotFound { + bytesOut, err := ioutil.ReadAll(res.Body) + if err != nil { + return namespaces, err + } + + if len(bytesOut) == 0 { + return namespaces, nil + } + + err = json.Unmarshal(bytesOut, &namespaces) + if err != nil { + return namespaces, err + } + } + + return namespaces, err +} + +func (s *FunctionLookupBuilder) getFunctions(namespace string) ([]types.FunctionStatus, error) { + gateway := fmt.Sprintf("%s/system/functions", s.GatewayURL) + gatewayURL, err := url.Parse(gateway) + if err != nil { + return []types.FunctionStatus{}, fmt.Errorf("invalid gateway URL: %s", err.Error()) + } + if len(namespace) > 0 { + query := gatewayURL.Query() + query.Set("namespace", namespace) + gatewayURL.RawQuery = query.Encode() + } + + req, _ := http.NewRequest(http.MethodGet, gatewayURL.String(), nil) if s.Credentials != nil { req.SetBasicAuth(s.Credentials.User, s.Credentials.Password) } @@ -35,7 +87,7 @@ func (s *FunctionLookupBuilder) Build() (map[string][]string, error) { res, reqErr := s.Client.Do(req) if reqErr != nil { - return serviceMap, reqErr + return []types.FunctionStatus{}, reqErr } if res.Body != nil { @@ -44,26 +96,91 @@ func (s *FunctionLookupBuilder) Build() (map[string][]string, error) { bytesOut, _ := ioutil.ReadAll(res.Body) - functions := []requests.Function{} + functions := []types.FunctionStatus{} marshalErr := json.Unmarshal(bytesOut, &functions) if marshalErr != nil { - return serviceMap, marshalErr + return []types.FunctionStatus{}, errors.Wrap(marshalErr, fmt.Sprintf("unable to unmarshal value: %q", string(bytesOut))) } - for _, function := range functions { + return functions, nil +} + +// Build compiles a map of topic names and functions that have +// advertised to receive messages on said topic +func (s *FunctionLookupBuilder) Build() (map[string][]string, error) { + var ( + err error + ) + + namespaces, err := s.getNamespaces() + if err != nil { + return map[string][]string{}, err + } + serviceMap := make(map[string][]string) + + if len(namespaces) == 0 { + namespace := "" + functions, err := s.getFunctions(namespace) + if err != nil { + return map[string][]string{}, err + } + serviceMap = buildServiceMap(&functions, s.TopicDelimiter, namespace, serviceMap) + } else { + for _, namespace := range namespaces { + functions, err := s.getFunctions(namespace) + if err != nil { + return map[string][]string{}, err + } + serviceMap = buildServiceMap(&functions, s.TopicDelimiter, namespace, serviceMap) + } + } + + return serviceMap, err +} + +func buildServiceMap(functions *[]types.FunctionStatus, topicDelimiter, namespace string, serviceMap map[string][]string) map[string][]string { + for _, function := range *functions { + if function.Annotations != nil { + annotations := *function.Annotations - if topic, pass := annotations["topic"]; pass { + if topicNames, exist := annotations["topic"]; exist { + + if len(topicDelimiter) > 0 && strings.Count(topicNames, topicDelimiter) > 0 { + + topicSlice := strings.Split(topicNames, topicDelimiter) - if serviceMap[topic] == nil { - serviceMap[topic] = []string{} + for _, topic := range topicSlice { + serviceMap = appendServiceMap(topic, function.Name, namespace, serviceMap) + } + } else { + serviceMap = appendServiceMap(topicNames, function.Name, namespace, serviceMap) } - serviceMap[topic] = append(serviceMap[topic], function.Name) } } } + return serviceMap +} - return serviceMap, err +func appendServiceMap(key, function, namespace string, sm map[string][]string) map[string][]string { + + key = strings.TrimSpace(key) + + if len(key) > 0 { + + if sm[key] == nil { + sm[key] = []string{} + } + sep := "" + if len(namespace) > 0 { + sep = "." + } + + functionPath := fmt.Sprintf("%s%s%s", function, sep, namespace) + sm[key] = append(sm[key], functionPath) + } + + return sm } diff --git a/vendor/github.com/openfaas-incubator/connector-sdk/types/invoker.go b/vendor/github.com/openfaas-incubator/connector-sdk/types/invoker.go index 135f625..6c7ea59 100644 --- a/vendor/github.com/openfaas-incubator/connector-sdk/types/invoker.go +++ b/vendor/github.com/openfaas-incubator/connector-sdk/types/invoker.go @@ -1,7 +1,11 @@ +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + package types import ( "bytes" + "context" "fmt" "io" "io/ioutil" @@ -19,6 +23,7 @@ type Invoker struct { } type InvokerResponse struct { + Context context.Context Body *[]byte Header *http.Header Status int @@ -38,9 +43,15 @@ func NewInvoker(gatewayURL string, client *http.Client, printResponse bool) *Inv // Invoke triggers a function by accessing the API Gateway func (i *Invoker) Invoke(topicMap *TopicMap, topic string, message *[]byte) { + i.InvokeWithContext(context.Background(), topicMap, topic, message) +} + +//InvokeWithContext triggers a function by accessing the API Gateway while propagating context +func (i *Invoker) InvokeWithContext(ctx context.Context, topicMap *TopicMap, topic string, message *[]byte) { if len(*message) == 0 { i.Responses <- InvokerResponse{ - Error: fmt.Errorf("no message to send"), + Context: ctx, + Error: fmt.Errorf("no message to send"), } } @@ -48,19 +59,21 @@ func (i *Invoker) Invoke(topicMap *TopicMap, topic string, message *[]byte) { for _, matchedFunction := range matchedFunctions { log.Printf("Invoke function: %s", matchedFunction) - gwURL := fmt.Sprintf("%s/function/%s", i.GatewayURL, matchedFunction) + gwURL := fmt.Sprintf("%s/%s", i.GatewayURL, matchedFunction) reader := bytes.NewReader(*message) - body, statusCode, header, doErr := invokefunction(i.Client, gwURL, reader) + body, statusCode, header, doErr := invokefunction(ctx, i.Client, gwURL, reader) if doErr != nil { i.Responses <- InvokerResponse{ - Error: errors.Wrap(doErr, fmt.Sprintf("unable to invoke %s", matchedFunction)), + Context: ctx, + Error: errors.Wrap(doErr, fmt.Sprintf("unable to invoke %s", matchedFunction)), } continue } i.Responses <- InvokerResponse{ + Context: ctx, Body: body, Status: statusCode, Header: header, @@ -70,9 +83,10 @@ func (i *Invoker) Invoke(topicMap *TopicMap, topic string, message *[]byte) { } } -func invokefunction(c *http.Client, gwURL string, reader io.Reader) (*[]byte, int, *http.Header, error) { +func invokefunction(ctx context.Context, c *http.Client, gwURL string, reader io.Reader) (*[]byte, int, *http.Header, error) { httpReq, _ := http.NewRequest(http.MethodPost, gwURL, reader) + httpReq.WithContext(ctx) if httpReq.Body != nil { defer httpReq.Body.Close() diff --git a/vendor/github.com/openfaas-incubator/connector-sdk/types/make_client.go b/vendor/github.com/openfaas-incubator/connector-sdk/types/make_client.go index a380f80..5d50236 100644 --- a/vendor/github.com/openfaas-incubator/connector-sdk/types/make_client.go +++ b/vendor/github.com/openfaas-incubator/connector-sdk/types/make_client.go @@ -1,3 +1,6 @@ +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + package types import ( @@ -6,11 +9,15 @@ import ( "time" ) +// MakeClient returns a http.Client with a timeout for connection establishing and request handling func MakeClient(timeout time.Duration) *http.Client { return &http.Client{ Transport: &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ + // Timeout is the maximum amount of time a dial will wait for + // a connect to complete. If Deadline is also set, it may fail + // earlier. Timeout: timeout, KeepAlive: 10 * time.Second, }).DialContext, @@ -18,5 +25,11 @@ func MakeClient(timeout time.Duration) *http.Client { MaxIdleConnsPerHost: 100, IdleConnTimeout: 120 * time.Millisecond, }, + // Timeout specifies a time limit for requests made by this + // Client. The timeout includes connection time, any + // redirects, and reading the response body. The timer remains + // running after Get, Head, Post, or Do return and will + // interrupt reading of the Response.Body. + Timeout: timeout, } } diff --git a/vendor/github.com/openfaas-incubator/connector-sdk/types/response_printer.go b/vendor/github.com/openfaas-incubator/connector-sdk/types/response_printer.go index 2889ad6..4a0836c 100644 --- a/vendor/github.com/openfaas-incubator/connector-sdk/types/response_printer.go +++ b/vendor/github.com/openfaas-incubator/connector-sdk/types/response_printer.go @@ -1,3 +1,6 @@ +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + package types import ( @@ -18,7 +21,7 @@ func (rp *ResponsePrinter) Response(res InvokerResponse) { } else { log.Printf("connector-sdk got result: [%d] %s => %s (%d) bytes", res.Status, res.Topic, res.Function, len(*res.Body)) if rp.PrintResponseBody { - fmt.Printf("[%d] %s => %s\n%q", res.Status, res.Topic, res.Function, string(*res.Body)) + fmt.Printf("[%d] %s => %s\n%s\n", res.Status, res.Topic, res.Function, string(*res.Body)) } } } diff --git a/vendor/github.com/openfaas-incubator/connector-sdk/types/response_subscriber.go b/vendor/github.com/openfaas-incubator/connector-sdk/types/response_subscriber.go index c313701..c3b432c 100644 --- a/vendor/github.com/openfaas-incubator/connector-sdk/types/response_subscriber.go +++ b/vendor/github.com/openfaas-incubator/connector-sdk/types/response_subscriber.go @@ -1,3 +1,6 @@ +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + package types // ResponseSubscriber enables connector or another client in connector diff --git a/vendor/github.com/openfaas-incubator/connector-sdk/types/topic_map.go b/vendor/github.com/openfaas-incubator/connector-sdk/types/topic_map.go index 684d928..328e9fb 100644 --- a/vendor/github.com/openfaas-incubator/connector-sdk/types/topic_map.go +++ b/vendor/github.com/openfaas-incubator/connector-sdk/types/topic_map.go @@ -1,26 +1,28 @@ -// Copyright (c) OpenFaaS Project 2018. All rights reserved. +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. // Licensed under the MIT license. See LICENSE file in the project root for full license information. package types -import "sync" +import ( + "sync" +) func NewTopicMap() TopicMap { lookup := make(map[string][]string) return TopicMap{ lookup: &lookup, - lock: sync.Mutex{}, + lock: sync.RWMutex{}, } } type TopicMap struct { lookup *map[string][]string - lock sync.Mutex + lock sync.RWMutex } func (t *TopicMap) Match(topicName string) []string { - t.lock.Lock() - defer t.lock.Unlock() + t.lock.RLock() + defer t.lock.RUnlock() var values []string @@ -40,3 +42,15 @@ func (t *TopicMap) Sync(updated *map[string][]string) { t.lookup = updated } + +func (t *TopicMap) Topics() []string { + t.lock.RLock() + defer t.lock.RUnlock() + + topics := make([]string, 0, len(*t.lookup)) + for topic := range *t.lookup { + topics = append(topics, topic) + } + + return topics +} diff --git a/vendor/github.com/openfaas/faas-cli/LICENSE b/vendor/github.com/openfaas/faas-cli/LICENSE new file mode 100644 index 0000000..e366418 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (c) 2016-2017 Alex Ellis + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + diff --git a/vendor/github.com/openfaas/faas-cli/config/config_file.go b/vendor/github.com/openfaas/faas-cli/config/config_file.go new file mode 100644 index 0000000..07ee903 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/config/config_file.go @@ -0,0 +1,332 @@ +// Copyright (c) OpenFaaS Author(s) 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package config + +import ( + "encoding/base64" + + "fmt" + "io/ioutil" + "net/url" + "os" + "path" + "path/filepath" + "strings" + + "github.com/mitchellh/go-homedir" + "gopkg.in/yaml.v2" +) + +//AuthType auth type +type AuthType string + +const ( + //BasicAuthType basic authentication type + BasicAuthType = "basic" + //Oauth2AuthType oauth2 authentication type + Oauth2AuthType = "oauth2" + + // ConfigLocationEnv is the name of he env variable used + // to configure the location of the faas-cli config folder. + // When not set, DefaultDir location is used. + ConfigLocationEnv string = "OPENFAAS_CONFIG" + + DefaultDir string = "~/.openfaas" + DefaultFile string = "config.yml" + DefaultPermissions os.FileMode = 0700 + + // DefaultCIDir creates the 'openfaas' directory in the current directory + // if running in a CI environment. + DefaultCIDir string = ".openfaas" + // DefaultCIPermissions creates the config file with elevated permissions + // for it to be read by multiple users when running in a CI environment. + DefaultCIPermissions os.FileMode = 0744 +) + +// ConfigFile for OpenFaaS CLI exclusively. +type ConfigFile struct { + AuthConfigs []AuthConfig `yaml:"auths"` + FilePath string `yaml:"-"` +} + +type AuthConfig struct { + Gateway string `yaml:"gateway,omitempty"` + Auth AuthType `yaml:"auth,omitempty"` + Token string `yaml:"token,omitempty"` +} + +// New initializes a config file for the given file path +func New(filePath string) (*ConfigFile, error) { + if filePath == "" { + return nil, fmt.Errorf("can't create config with empty filePath") + } + conf := &ConfigFile{ + AuthConfigs: make([]AuthConfig, 0), + FilePath: filePath, + } + + return conf, nil +} + +// ConfigDir returns the path to the faas-cli config directory. +// When +// 1. CI = "true" and OPENFAAS_CONFIG="", then it will return `.openfaas`, which is located in the current working directory. +// 2. CI = "true" and OPENFAAS_CONFIG="", then it will return the path value in OPENFAAS_CONFIG +// 3. CI = "" and OPENFAAS_CONFIG="", then it will return the default location ~/.openfaas +func ConfigDir() string { + override := os.Getenv(ConfigLocationEnv) + ci := isRunningInCI() + + switch { + // case (1) from docs string + case ci && override == "": + return DefaultCIDir + // case (2) from the doc string + case override != "": + // case (3) from the doc string + return override + default: + return DefaultDir + } +} + +// isRunningInCI checks the ENV var CI and returns true if it's set to true or 1 +func isRunningInCI() bool { + if env, ok := os.LookupEnv("CI"); ok { + if env == "true" || env == "1" { + return true + } + } + return false +} + +// EnsureFile creates the root dir and config file +func EnsureFile() (string, error) { + permission := DefaultPermissions + dir := ConfigDir() + if isRunningInCI() { + permission = DefaultCIPermissions + } + dirPath, err := homedir.Expand(dir) + if err != nil { + return "", err + } + + filePath := path.Clean(filepath.Join(dirPath, DefaultFile)) + if err := os.MkdirAll(filepath.Dir(filePath), permission); err != nil { + return "", err + } + + if _, err := os.Stat(filePath); os.IsNotExist(err) { + file, err := os.OpenFile(filePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return "", err + } + defer file.Close() + } + + return filePath, nil +} + +// FileExists returns true if the config file is located at the default path +func fileExists() bool { + dir := ConfigDir() + dirPath, err := homedir.Expand(dir) + if err != nil { + return false + } + + filePath := path.Clean(filepath.Join(dirPath, DefaultFile)) + if _, err := os.Stat(filePath); os.IsNotExist(err) { + return false + } + + return true +} + +// Save writes the config to disk +func (configFile *ConfigFile) save() error { + file, err := os.OpenFile(configFile.FilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) + if err != nil { + return err + } + defer file.Close() + + data, err := yaml.Marshal(configFile) + if err != nil { + return err + } + + _, err = file.Write(data) + return err +} + +// Load reads the yml file from disk +func (configFile *ConfigFile) load() error { + conf := &ConfigFile{} + + if _, err := os.Stat(configFile.FilePath); os.IsNotExist(err) { + return fmt.Errorf("can't load config from non existent filePath") + } + + data, err := ioutil.ReadFile(configFile.FilePath) + if err != nil { + return err + } + + if err := yaml.Unmarshal(data, conf); err != nil { + return err + } + + if len(conf.AuthConfigs) > 0 { + configFile.AuthConfigs = conf.AuthConfigs + } + return nil +} + +// EncodeAuth encodes the username and password strings to base64 +func EncodeAuth(username string, password string) string { + input := username + ":" + password + msg := []byte(input) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// DecodeAuth decodes the input string from base64 to username and password +func DecodeAuth(input string) (string, string, error) { + decoded, err := base64.StdEncoding.DecodeString(input) + if err != nil { + return "", "", err + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", fmt.Errorf("invalid auth config file") + } + return arr[0], arr[1], nil +} + +// UpdateAuthConfig creates or updates the username and password for a given gateway +func UpdateAuthConfig(gateway, token string, authType AuthType) error { + _, err := url.ParseRequestURI(gateway) + if err != nil || len(gateway) < 1 { + return fmt.Errorf("invalid gateway URL") + } + + configPath, err := EnsureFile() + if err != nil { + return err + } + + cfg, err := New(configPath) + if err != nil { + return err + } + + if err := cfg.load(); err != nil { + return err + } + + auth := AuthConfig{ + Gateway: gateway, + Auth: authType, + Token: token, + } + + index := -1 + for i, v := range cfg.AuthConfigs { + if gateway == v.Gateway { + index = i + break + } + } + + if index == -1 { + cfg.AuthConfigs = append(cfg.AuthConfigs, auth) + } else { + cfg.AuthConfigs[index] = auth + } + + if err := cfg.save(); err != nil { + return err + } + + return nil +} + +// LookupAuthConfig returns the username and password for a given gateway +func LookupAuthConfig(gateway string) (AuthConfig, error) { + var authConfig AuthConfig + + if !fileExists() { + return authConfig, fmt.Errorf("config file not found") + } + + configPath, err := EnsureFile() + if err != nil { + return authConfig, err + } + + cfg, err := New(configPath) + if err != nil { + return authConfig, err + } + + if err := cfg.load(); err != nil { + return authConfig, err + } + + for _, v := range cfg.AuthConfigs { + if gateway == v.Gateway { + authConfig = v + return authConfig, nil + } + } + + return authConfig, fmt.Errorf("no auth config found for %s", gateway) +} + +// RemoveAuthConfig deletes the username and password for a given gateway +func RemoveAuthConfig(gateway string) error { + if !fileExists() { + return fmt.Errorf("config file not found") + } + + configPath, err := EnsureFile() + if err != nil { + return err + } + + cfg, err := New(configPath) + if err != nil { + return err + } + + if err := cfg.load(); err != nil { + return err + } + + index := -1 + for i, v := range cfg.AuthConfigs { + if gateway == v.Gateway { + index = i + break + } + } + + if index > -1 { + cfg.AuthConfigs = removeAuthByIndex(cfg.AuthConfigs, index) + if err := cfg.save(); err != nil { + return err + } + } else { + return fmt.Errorf("gateway %s not found in config", gateway) + } + + return nil +} + +func removeAuthByIndex(s []AuthConfig, index int) []AuthConfig { + return append(s[:index], s[index+1:]...) +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/auth.go b/vendor/github.com/openfaas/faas-cli/proxy/auth.go new file mode 100644 index 0000000..8a07965 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/auth.go @@ -0,0 +1,71 @@ +package proxy + +import ( + "net/http" + + "github.com/openfaas/faas-cli/config" +) + +//CLIAuth auth struct for the CLI +type CLIAuth struct { + Username string + Password string + Token string +} + +//BasicAuth basic authentication type +type BasicAuth struct { + username string + password string +} + +func (auth *BasicAuth) Set(req *http.Request) error { + req.SetBasicAuth(auth.username, auth.password) + return nil +} + +//BearerToken bearer token +type BearerToken struct { + token string +} + +func (c *BearerToken) Set(req *http.Request) error { + req.Header.Set("Authorization", "Bearer "+c.token) + return nil +} + +//NewCLIAuth returns a new CLI Auth +func NewCLIAuth(token string, gateway string) (ClientAuth, error) { + authConfig, _ := config.LookupAuthConfig(gateway) + + var ( + username string + password string + bearerToken string + err error + ) + + if authConfig.Auth == config.BasicAuthType { + username, password, err = config.DecodeAuth(authConfig.Token) + if err != nil { + return nil, err + } + + return &BasicAuth{ + username: username, + password: password, + }, nil + + } + + // User specified token gets priority + if len(token) > 0 { + bearerToken = token + } else { + bearerToken = authConfig.Token + } + + return &BearerToken{ + token: bearerToken, + }, nil +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/client.go b/vendor/github.com/openfaas/faas-cli/proxy/client.go new file mode 100644 index 0000000..bb32e38 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/client.go @@ -0,0 +1,131 @@ +package proxy + +import ( + "context" + "fmt" + "io" + "net/http" + "net/http/httputil" + "net/url" + "os" + gopath "path" + "strings" + "time" +) + +//Client an API client to perform all operations +type Client struct { + httpClient *http.Client + //ClientAuth a type implementing ClientAuth interface for client authentication + ClientAuth ClientAuth + //GatewayURL base URL of OpenFaaS gateway + GatewayURL *url.URL + //UserAgent user agent for the client + UserAgent string +} + +//ClientAuth an interface for client authentication. +// to add authentication to the client implement this interface +type ClientAuth interface { + Set(req *http.Request) error +} + +//NewClient initializes a new API client +func NewClient(auth ClientAuth, gatewayURL string, transport http.RoundTripper, timeout *time.Duration) (*Client, error) { + gatewayURL = strings.TrimRight(gatewayURL, "/") + baseURL, err := url.Parse(gatewayURL) + if err != nil { + return nil, fmt.Errorf("invalid gateway URL: %s", gatewayURL) + } + + client := &http.Client{} + if timeout != nil { + client.Timeout = *timeout + } + + if transport != nil { + client.Transport = transport + } + + return &Client{ + ClientAuth: auth, + httpClient: client, + GatewayURL: baseURL, + }, nil +} + +//newRequest create a new HTTP request with authentication +func (c *Client) newRequest(method, path string, body io.Reader) (*http.Request, error) { + u, err := url.Parse(path) + if err != nil { + return nil, err + } + // deep copy gateway url and then add the supplied path and args to the copy so that + // we preserve the original gateway URL as much as possible + endpoint, err := url.Parse(c.GatewayURL.String()) + if err != nil { + return nil, err + } + endpoint.Path = gopath.Join(endpoint.Path, u.Path) + endpoint.RawQuery = u.RawQuery + + req, err := http.NewRequest(method, endpoint.String(), body) + if err != nil { + return nil, err + } + + if body != nil { + req.Header.Set("Content-Type", "application/json") + } + + if c.UserAgent != "" { + req.Header.Set("User-Agent", c.UserAgent) + } + + c.ClientAuth.Set(req) + + return req, err +} + +//doRequest perform an HTTP request with context +func (c *Client) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) { + req = req.WithContext(ctx) + + if val, ok := os.LookupEnv("OPENFAAS_DUMP_HTTP"); ok && val == "true" { + dump, err := httputil.DumpRequest(req, true) + if err != nil { + return nil, err + } + fmt.Println(string(dump)) + } + resp, err := c.httpClient.Do(req) + + if err != nil { + select { + case <-ctx.Done(): + return nil, ctx.Err() + default: + } + } + + return resp, err +} + +func addQueryParams(u string, params map[string]string) (string, error) { + parsedURL, err := url.Parse(u) + if err != nil { + return u, err + } + + qs := parsedURL.Query() + for key, value := range params { + qs.Add(key, value) + } + parsedURL.RawQuery = qs.Encode() + return parsedURL.String(), nil +} + +//AddCheckRedirect add CheckRedirect to the client +func (c *Client) AddCheckRedirect(checkRedirect func(*http.Request, []*http.Request) error) { + c.httpClient.CheckRedirect = checkRedirect +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/delete.go b/vendor/github.com/openfaas/faas-cli/proxy/delete.go new file mode 100644 index 0000000..a6fe051 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/delete.go @@ -0,0 +1,65 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package proxy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + "github.com/openfaas/faas/gateway/requests" +) + +// DeleteFunction delete a function from the OpenFaaS server +func (c *Client) DeleteFunction(ctx context.Context, functionName string, namespace string) error { + var err error + delReq := requests.DeleteFunctionRequest{FunctionName: functionName} + reqBytes, _ := json.Marshal(&delReq) + reader := bytes.NewReader(reqBytes) + deleteEndpoint := "/system/functions" + if len(namespace) > 0 { + deleteEndpoint, err = addQueryParams(deleteEndpoint, map[string]string{namespaceKey: namespace}) + if err != nil { + return err + } + } + + req, err := c.newRequest(http.MethodDelete, deleteEndpoint, reader) + if err != nil { + fmt.Println(err) + return err + } + delRes, delErr := c.doRequest(ctx, req) + + if delErr != nil { + fmt.Printf("Error removing existing function: %s, gateway=%s, functionName=%s\n", delErr.Error(), c.GatewayURL.String(), functionName) + return delErr + } + + if delRes.Body != nil { + defer delRes.Body.Close() + } + + switch delRes.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusAccepted: + fmt.Println("Removing old function.") + case http.StatusNotFound: + err = fmt.Errorf("No existing function to remove") + case http.StatusUnauthorized: + err = fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + default: + var bodyReadErr error + bytesOut, bodyReadErr := ioutil.ReadAll(delRes.Body) + if bodyReadErr != nil { + err = bodyReadErr + } else { + err = fmt.Errorf("Server returned unexpected status code %d %s", delRes.StatusCode, string(bytesOut)) + } + } + + return err +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/deploy.go b/vendor/github.com/openfaas/faas-cli/proxy/deploy.go new file mode 100644 index 0000000..7e2a3ff --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/deploy.go @@ -0,0 +1,184 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package proxy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/openfaas/faas-cli/stack" + + types "github.com/openfaas/faas-provider/types" +) + +var ( + defaultCommandTimeout = 60 * time.Second +) + +// FunctionResourceRequest defines a request to set function resources +type FunctionResourceRequest struct { + Limits *stack.FunctionResources + Requests *stack.FunctionResources +} + +// DeployFunctionSpec defines the spec used when deploying a function +type DeployFunctionSpec struct { + FProcess string + FunctionName string + Image string + RegistryAuth string + Language string + Replace bool + EnvVars map[string]string + Network string + Constraints []string + Update bool + Secrets []string + Labels map[string]string + Annotations map[string]string + FunctionResourceRequest FunctionResourceRequest + ReadOnlyRootFilesystem bool + TLSInsecure bool + Token string + Namespace string +} + +func generateFuncStr(spec *DeployFunctionSpec) string { + + if len(spec.Namespace) > 0 { + return fmt.Sprintf("%s.%s", spec.FunctionName, spec.Namespace) + } + return spec.FunctionName +} + +// DeployFunction first tries to deploy a function and if it exists will then attempt +// a rolling update. Warnings are suppressed for the second API call (if required.) +func (c *Client) DeployFunction(context context.Context, spec *DeployFunctionSpec) int { + + rollingUpdateInfo := fmt.Sprintf("Function %s already exists, attempting rolling-update.", spec.FunctionName) + statusCode, deployOutput := c.deploy(context, spec, spec.Update) + + if spec.Update == true && statusCode == http.StatusNotFound { + // Re-run the function with update=false + + statusCode, deployOutput = c.deploy(context, spec, false) + } else if statusCode == http.StatusOK { + fmt.Println(rollingUpdateInfo) + } + fmt.Println() + fmt.Println(deployOutput) + return statusCode +} + +// deploy a function to an OpenFaaS gateway over REST +func (c *Client) deploy(context context.Context, spec *DeployFunctionSpec, update bool) (int, string) { + + var deployOutput string + // Need to alter Gateway to allow nil/empty string as fprocess, to avoid this repetition. + var fprocessTemplate string + if len(spec.FProcess) > 0 { + fprocessTemplate = spec.FProcess + } + + if spec.Replace { + c.DeleteFunction(context, spec.FunctionName, spec.Namespace) + } + + req := types.FunctionDeployment{ + EnvProcess: fprocessTemplate, + Image: spec.Image, + RegistryAuth: spec.RegistryAuth, + Network: spec.Network, + Service: spec.FunctionName, + EnvVars: spec.EnvVars, + Constraints: spec.Constraints, + Secrets: spec.Secrets, + Labels: &spec.Labels, + Annotations: &spec.Annotations, + ReadOnlyRootFilesystem: spec.ReadOnlyRootFilesystem, + Namespace: spec.Namespace, + } + + hasLimits := false + req.Limits = &types.FunctionResources{} + if spec.FunctionResourceRequest.Limits != nil && len(spec.FunctionResourceRequest.Limits.Memory) > 0 { + hasLimits = true + req.Limits.Memory = spec.FunctionResourceRequest.Limits.Memory + } + if spec.FunctionResourceRequest.Limits != nil && len(spec.FunctionResourceRequest.Limits.CPU) > 0 { + hasLimits = true + req.Limits.CPU = spec.FunctionResourceRequest.Limits.CPU + } + if !hasLimits { + req.Limits = nil + } + + hasRequests := false + req.Requests = &types.FunctionResources{} + if spec.FunctionResourceRequest.Requests != nil && len(spec.FunctionResourceRequest.Requests.Memory) > 0 { + hasRequests = true + req.Requests.Memory = spec.FunctionResourceRequest.Requests.Memory + } + if spec.FunctionResourceRequest.Requests != nil && len(spec.FunctionResourceRequest.Requests.CPU) > 0 { + hasRequests = true + req.Requests.CPU = spec.FunctionResourceRequest.Requests.CPU + } + + if !hasRequests { + req.Requests = nil + } + + reqBytes, _ := json.Marshal(&req) + reader := bytes.NewReader(reqBytes) + var request *http.Request + + method := http.MethodPost + // "application/json" + if update { + method = http.MethodPut + } + + var err error + request, err = c.newRequest(method, "/system/functions", reader) + + if err != nil { + deployOutput += fmt.Sprintln(err) + return http.StatusInternalServerError, deployOutput + } + + res, err := c.doRequest(context, request) + + if err != nil { + deployOutput += fmt.Sprintln("Is OpenFaaS deployed? Do you need to specify the --gateway flag?") + deployOutput += fmt.Sprintln(err) + return http.StatusInternalServerError, deployOutput + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusAccepted: + deployOutput += fmt.Sprintf("Deployed. %s.\n", res.Status) + + deployedURL := fmt.Sprintf("URL: %s/function/%s", c.GatewayURL.String(), generateFuncStr(spec)) + deployOutput += fmt.Sprintln(deployedURL) + case http.StatusUnauthorized: + deployOutput += fmt.Sprintln("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + deployOutput += fmt.Sprintf("Unexpected status: %d, message: %s\n", res.StatusCode, string(bytesOut)) + } + } + + return res.StatusCode, deployOutput +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/describe.go b/vendor/github.com/openfaas/faas-cli/proxy/describe.go new file mode 100644 index 0000000..404bd11 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/describe.go @@ -0,0 +1,68 @@ +// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package proxy + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + types "github.com/openfaas/faas-provider/types" +) + +//GetFunctionInfo get an OpenFaaS function information +func (c *Client) GetFunctionInfo(ctx context.Context, functionName string, namespace string) (types.FunctionStatus, error) { + var ( + result types.FunctionStatus + err error + ) + + functionPath := fmt.Sprintf("%s/%s", functionPath, functionName) + if len(namespace) > 0 { + functionPath, err = addQueryParams(functionPath, map[string]string{namespaceKey: namespace}) + if err != nil { + return result, err + } + } + + getRequest, err := c.newRequest(http.MethodGet, functionPath, nil) + if err != nil { + return result, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + res, err := c.doRequest(ctx, getRequest) + if err != nil { + return result, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK: + bytesOut, err := ioutil.ReadAll(res.Body) + if err != nil { + return result, fmt.Errorf("cannot read result from OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + jsonErr := json.Unmarshal(bytesOut, &result) + if jsonErr != nil { + return result, fmt.Errorf("cannot parse result from OpenFaaS on URL: %s\n%s", c.GatewayURL.String(), jsonErr.Error()) + } + case http.StatusUnauthorized: + return result, fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + case http.StatusNotFound: + return result, fmt.Errorf("No such function: %s", functionName) + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + return result, fmt.Errorf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + return result, nil +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/function_store.go b/vendor/github.com/openfaas/faas-cli/proxy/function_store.go new file mode 100644 index 0000000..5a6845a --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/function_store.go @@ -0,0 +1,57 @@ +package proxy + +import ( + "encoding/json" + "fmt" + v2 "github.com/openfaas/faas-cli/schema/store/v2" + "io/ioutil" + "net/http" + "strings" + "time" +) + +type StoreResult struct { + Version string `json:"version"` + Functions []v2.StoreFunction `json:"functions"` +} + +// FunctionStoreList returns functions from a store URL +func FunctionStoreList(store string) ([]v2.StoreFunction, error) { + + var storeResults StoreResult + + store = strings.TrimRight(store, "/") + + timeout := 60 * time.Second + tlsInsecure := false + + client := MakeHTTPClient(&timeout, tlsInsecure) + + res, err := client.Get(store) + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS store at URL: %s", store) + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK: + bytesOut, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("cannot read result from OpenFaaS store at URL: %s", store) + } + + jsonErr := json.Unmarshal(bytesOut, &storeResults) + if jsonErr != nil { + return nil, fmt.Errorf("cannot parse result from OpenFaaS store at URL: %s\n%s", store, jsonErr.Error()) + } + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + return nil, fmt.Errorf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + return storeResults.Functions, nil +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/invoke.go b/vendor/github.com/openfaas/faas-cli/proxy/invoke.go new file mode 100644 index 0000000..5dea390 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/invoke.go @@ -0,0 +1,168 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package proxy + +import ( + "bytes" + "os" + + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" +) + +// InvokeFunction a function +func InvokeFunction(gateway string, name string, bytesIn *[]byte, contentType string, query []string, headers []string, async bool, httpMethod string, tlsInsecure bool, namespace string) (*[]byte, error) { + var resBytes []byte + + gateway = strings.TrimRight(gateway, "/") + + reader := bytes.NewReader(*bytesIn) + + var disableFunctionTimeout *time.Duration + client := MakeHTTPClient(disableFunctionTimeout, tlsInsecure) + + qs, qsErr := buildQueryString(query) + if qsErr != nil { + return nil, qsErr + } + + headerMap, headerErr := parseHeaders(headers) + if headerErr != nil { + return nil, headerErr + } + + functionEndpoint := "/function/" + if async { + functionEndpoint = "/async-function/" + } + + httpMethodErr := validateHTTPMethod(httpMethod) + if httpMethodErr != nil { + return nil, httpMethodErr + } + + gatewayURL := gateway + functionEndpoint + name + if len(namespace) > 0 { + gatewayURL += "." + namespace + } + gatewayURL += qs + + req, err := http.NewRequest(httpMethod, gatewayURL, reader) + if err != nil { + fmt.Println() + fmt.Println(err) + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", gateway) + } + + req.Header.Add("Content-Type", contentType) + // Add additional headers to request + for name, value := range headerMap { + req.Header.Add(name, value) + } + + // Removed by AE - the system-level basic auth secrets should not be transmitted + // to functions. Functions should implement their own auth. + // SetAuth(req, gateway) + + res, err := client.Do(req) + + if err != nil { + fmt.Println() + fmt.Println(err) + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", gateway) + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusAccepted: + fmt.Fprintf(os.Stderr, "Function submitted asynchronously.\n") + case http.StatusOK: + var readErr error + resBytes, readErr = ioutil.ReadAll(res.Body) + if readErr != nil { + return nil, fmt.Errorf("cannot read result from OpenFaaS on URL: %s %s", gateway, readErr) + } + case http.StatusUnauthorized: + return nil, fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + return nil, fmt.Errorf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + + return &resBytes, nil +} + +func buildQueryString(query []string) (string, error) { + qs := "" + + if len(query) > 0 { + qs = "?" + for _, queryValue := range query { + qs = qs + queryValue + "&" + if strings.Contains(queryValue, "=") == false { + return "", fmt.Errorf("the --query flags must take the form of key=value (= not found)") + } + if strings.HasSuffix(queryValue, "=") { + return "", fmt.Errorf("the --query flag must take the form of: key=value (empty value given, or value ends in =)") + } + } + qs = strings.TrimRight(qs, "&") + } + + return qs, nil +} + +// parseHeaders parses header values from command +func parseHeaders(headers []string) (map[string]string, error) { + headerMap := make(map[string]string) + + for _, header := range headers { + headerValues := strings.SplitN(header, "=", 2) + if len(headerValues) != 2 { + return headerMap, fmt.Errorf("the --header or -H flag must take the form of key=value") + } + + name, value := headerValues[0], headerValues[1] + if name == "" { + return headerMap, fmt.Errorf("the --header or -H flag must take the form of key=value (empty key given)") + } + + if value == "" { + return headerMap, fmt.Errorf("the --header or -H flag must take the form of key=value (empty value given)") + } + + headerMap[name] = value + } + return headerMap, nil +} + +// validateMethod validates the HTTP request method +func validateHTTPMethod(httpMethod string) error { + var allowedMethods = []string{ + http.MethodGet, http.MethodPost, http.MethodPut, http.MethodPatch, http.MethodDelete, + } + helpString := strings.Join(allowedMethods, "/") + + if !contains(allowedMethods, httpMethod) { + return fmt.Errorf("the --method or -m flag must take one of these values (%s)", helpString) + } + return nil +} + +func contains(s []string, item string) bool { + for _, value := range s { + if value == item { + return true + } + } + return false +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/list.go b/vendor/github.com/openfaas/faas-cli/proxy/list.go new file mode 100644 index 0000000..5681f98 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/list.go @@ -0,0 +1,71 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package proxy + +import ( + "context" + "encoding/json" + + "fmt" + "io/ioutil" + "net/http" + + types "github.com/openfaas/faas-provider/types" +) + +// ListFunctions list deployed functions +func (c *Client) ListFunctions(ctx context.Context, namespace string) ([]types.FunctionStatus, error) { + var ( + results []types.FunctionStatus + listEndpoint string + err error + ) + + c.AddCheckRedirect(func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }) + + listEndpoint = systemPath + if len(namespace) > 0 { + listEndpoint, err = addQueryParams(listEndpoint, map[string]string{namespaceKey: namespace}) + if err != nil { + return results, err + } + } + + getRequest, err := c.newRequest(http.MethodGet, listEndpoint, nil) + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + res, err := c.doRequest(ctx, getRequest) + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK: + + bytesOut, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("cannot read result from OpenFaaS on URL: %s", c.GatewayURL.String()) + } + jsonErr := json.Unmarshal(bytesOut, &results) + if jsonErr != nil { + return nil, fmt.Errorf("cannot parse result from OpenFaaS on URL: %s\n%s", c.GatewayURL.String(), jsonErr.Error()) + } + case http.StatusUnauthorized: + return nil, fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + return nil, fmt.Errorf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + return results, nil +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/logs.go b/vendor/github.com/openfaas/faas-cli/proxy/logs.go new file mode 100644 index 0000000..c3205c0 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/logs.go @@ -0,0 +1,100 @@ +package proxy + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "log" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/openfaas/faas-provider/logs" +) + +// GetLogs return stream for the logs +func (c *Client) GetLogs(ctx context.Context, params logs.Request) (<-chan logs.Message, error) { + + logRequest, err := c.newRequest(http.MethodGet, "/system/logs", nil) + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + logRequest.URL.RawQuery = reqAsQueryValues(params).Encode() + + res, err := c.doRequest(ctx, logRequest) + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + logStream := make(chan logs.Message, 1000) + switch res.StatusCode { + case http.StatusOK: + go func() { + defer close(logStream) + defer res.Body.Close() + + decoder := json.NewDecoder(res.Body) + for decoder.More() { + msg := logs.Message{} + err := decoder.Decode(&msg) + if err != nil { + log.Printf("cannot parse log results: %s\n", err.Error()) + return + } + logStream <- msg + } + }() + case http.StatusUnauthorized: + return nil, fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + return nil, fmt.Errorf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + return logStream, nil +} + +func reqAsQueryValues(r logs.Request) url.Values { + query := url.Values{} + query.Add("name", r.Name) + if len(r.Namespace) > 0 { + query.Add("namespace", r.Namespace) + } + query.Add("follow", strconv.FormatBool(r.Follow)) + if r.Instance != "" { + query.Add("instance", r.Instance) + } + + if r.Since != nil { + query.Add("since", r.Since.Format(time.RFC3339)) + } + + if r.Tail != 0 { + query.Add("tail", strconv.Itoa(r.Tail)) + } + + return query +} + +func makeStreamingHTTPClient(tlsInsecure bool) http.Client { + client := http.Client{} + + if tlsInsecure { + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + } + + if tlsInsecure { + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: tlsInsecure} + } + + client.Transport = tr + } + + return client +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/namespaces.go b/vendor/github.com/openfaas/faas-cli/proxy/namespaces.go new file mode 100644 index 0000000..0a45d0b --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/namespaces.go @@ -0,0 +1,54 @@ +package proxy + +import ( + "context" + "encoding/json" + + "fmt" + "io/ioutil" + "net/http" +) + +// ListNamespaces lists available function namespaces +func (c *Client) ListNamespaces(ctx context.Context) ([]string, error) { + var namespaces []string + c.AddCheckRedirect(func(req *http.Request, via []*http.Request) error { + return http.ErrUseLastResponse + }) + + getRequest, err := c.newRequest(http.MethodGet, namespacesPath, nil) + + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + res, err := c.doRequest(ctx, getRequest) + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK: + + bytesOut, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("cannot read namespaces from OpenFaaS on URL: %s", c.GatewayURL.String()) + } + jsonErr := json.Unmarshal(bytesOut, &namespaces) + if jsonErr != nil { + return nil, fmt.Errorf("cannot parse namespaces from OpenFaaS on URL: %s\n%s", c.GatewayURL.String(), jsonErr.Error()) + } + case http.StatusUnauthorized: + return nil, fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + return nil, fmt.Errorf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + return namespaces, nil +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/proxy.go b/vendor/github.com/openfaas/faas-cli/proxy/proxy.go new file mode 100644 index 0000000..c303022 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/proxy.go @@ -0,0 +1,48 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package proxy + +import ( + "crypto/tls" + "net" + "net/http" + "time" +) + +// MakeHTTPClient makes a HTTP client with good defaults for timeouts. +func MakeHTTPClient(timeout *time.Duration, tlsInsecure bool) http.Client { + return makeHTTPClientWithDisableKeepAlives(timeout, tlsInsecure, false) +} + +// makeHTTPClientWithDisableKeepAlives makes a HTTP client with good defaults for timeouts. +func makeHTTPClientWithDisableKeepAlives(timeout *time.Duration, tlsInsecure bool, disableKeepAlives bool) http.Client { + client := http.Client{} + + if timeout != nil || tlsInsecure { + tr := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DisableKeepAlives: disableKeepAlives, + } + + if timeout != nil { + client.Timeout = *timeout + tr.DialContext = (&net.Dialer{ + Timeout: *timeout, + }).DialContext + + tr.IdleConnTimeout = 120 * time.Millisecond + tr.ExpectContinueTimeout = 1500 * time.Millisecond + } + + if tlsInsecure { + tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: tlsInsecure} + } + + tr.DisableKeepAlives = disableKeepAlives + + client.Transport = tr + } + + return client +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/scale.go b/vendor/github.com/openfaas/faas-cli/proxy/scale.go new file mode 100644 index 0000000..717e92e --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/scale.go @@ -0,0 +1,71 @@ +package proxy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "path/filepath" + + types "github.com/openfaas/faas-provider/types" +) + +//ScaleFunction scale a function +func (c *Client) ScaleFunction(ctx context.Context, functionName, namespace string, replicas uint64) error { + + scaleReq := types.ScaleServiceRequest{ + ServiceName: functionName, + Replicas: replicas, + } + + var err error + + bodyBytes, _ := json.Marshal(scaleReq) + bodyReader := bytes.NewReader(bodyBytes) + + functionPath := filepath.Join(scalePath, functionName) + if len(namespace) > 0 { + functionPath, err = addQueryParams(functionPath, map[string]string{namespaceKey: namespace}) + if err != nil { + return err + } + } + + req, err := c.newRequest(http.MethodPost, functionPath, bodyReader) + if err != nil { + return fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + res, err := c.doRequest(ctx, req) + if err != nil { + return fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusAccepted, http.StatusOK, http.StatusCreated: + break + + case http.StatusNotFound: + return fmt.Errorf("function %s not found", functionName) + + case http.StatusUnauthorized: + return fmt.Errorf("unauthorized action, please setup authentication for this server") + + default: + var bodyReadErr error + bytesOut, bodyReadErr := ioutil.ReadAll(res.Body) + if bodyReadErr != nil { + return bodyReadErr + } + + return fmt.Errorf("server returned unexpected status code %d %s", res.StatusCode, string(bytesOut)) + } + return nil +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/secret.go b/vendor/github.com/openfaas/faas-cli/proxy/secret.go new file mode 100644 index 0000000..e004273 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/secret.go @@ -0,0 +1,190 @@ +package proxy + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + + types "github.com/openfaas/faas-provider/types" +) + +const ( + secretEndpoint = "/system/secrets" +) + +// GetSecretList get secrets list +func (c *Client) GetSecretList(ctx context.Context, namespace string) ([]types.Secret, error) { + var ( + results []types.Secret + err error + secretPath = secretEndpoint + ) + + if len(namespace) > 0 { + secretPath, err = addQueryParams(secretPath, map[string]string{namespaceKey: namespace}) + } + + getRequest, err := c.newRequest(http.MethodGet, secretPath, nil) + + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + res, err := c.doRequest(ctx, getRequest) + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK, http.StatusAccepted: + + bytesOut, err := ioutil.ReadAll(res.Body) + if err != nil { + return nil, fmt.Errorf("cannot read result from OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + jsonErr := json.Unmarshal(bytesOut, &results) + if jsonErr != nil { + return nil, fmt.Errorf("cannot parse result from OpenFaaS on URL: %s\n%s", c.GatewayURL.String(), jsonErr.Error()) + } + + case http.StatusUnauthorized: + return nil, fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + return nil, fmt.Errorf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + + return results, nil +} + +// UpdateSecret update a secret via the OpenFaaS API by name +func (c *Client) UpdateSecret(ctx context.Context, secret types.Secret) (int, string) { + var output string + reqBytes, _ := json.Marshal(&secret) + + putRequest, err := c.newRequest(http.MethodPut, secretEndpoint, bytes.NewBuffer(reqBytes)) + + if err != nil { + output += fmt.Sprintf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + return http.StatusInternalServerError, output + } + + res, err := c.doRequest(ctx, putRequest) + if err != nil { + output += fmt.Sprintf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + return http.StatusInternalServerError, output + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK, http.StatusAccepted: + output += fmt.Sprintf("Updated: %s\n", res.Status) + break + + case http.StatusNotFound: + output += fmt.Sprintf("unable to find secret: %s", secret.Name) + + case http.StatusUnauthorized: + output += fmt.Sprintf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + output += fmt.Sprintf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + + return res.StatusCode, output +} + +// RemoveSecret remove a secret via the OpenFaaS API by name +func (c *Client) RemoveSecret(ctx context.Context, secret types.Secret) error { + body, _ := json.Marshal(secret) + req, err := c.newRequest(http.MethodDelete, secretEndpoint, bytes.NewBuffer(body)) + if err != nil { + return fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + res, err := c.doRequest(ctx, req) + if err != nil { + return fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK, http.StatusAccepted: + break + case http.StatusNotFound: + return fmt.Errorf("unable to find secret: %s", secret.Name) + case http.StatusUnauthorized: + return fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + return fmt.Errorf("server returned unexpected status code: %d - %s", res.StatusCode, string(bytesOut)) + } + } + + return nil +} + +// CreateSecret create secret +func (c *Client) CreateSecret(ctx context.Context, secret types.Secret) (int, string) { + var output string + reqBytes, _ := json.Marshal(&secret) + reader := bytes.NewReader(reqBytes) + + request, err := c.newRequest(http.MethodPost, secretEndpoint, reader) + + if err != nil { + output += fmt.Sprintf("cannot connect to OpenFaaS on URL: %s\n", c.GatewayURL.String()) + return http.StatusInternalServerError, output + } + + res, err := c.doRequest(ctx, request) + if err != nil { + output += fmt.Sprintf("cannot connect to OpenFaaS on URL: %s\n", c.GatewayURL.String()) + return http.StatusInternalServerError, output + } + + if res.Body != nil { + defer res.Body.Close() + } + + switch res.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusAccepted: + output += fmt.Sprintf("Created: %s\n", res.Status) + + case http.StatusUnauthorized: + output += fmt.Sprintln("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + + case http.StatusConflict: + output += fmt.Sprintf("secret with the name %q already exists\n", secret.Name) + + default: + bytesOut, err := ioutil.ReadAll(res.Body) + if err == nil { + output += fmt.Sprintf("server returned unexpected status code: %d - %s\n", res.StatusCode, string(bytesOut)) + } + } + + return res.StatusCode, output +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/utils.go b/vendor/github.com/openfaas/faas-cli/proxy/utils.go new file mode 100644 index 0000000..f0282fd --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/utils.go @@ -0,0 +1,52 @@ +package proxy + +import ( + "fmt" + "net/url" + "path" +) + +const ( + systemPath = "/system/functions" + functionPath = "/system/function" + namespacesPath = "/system/namespaces" + namespaceKey = "namespace" + scalePath = "/system/scale-function" +) + +func createSystemEndpoint(gateway, namespace string) (string, error) { + gatewayURL, err := url.Parse(gateway) + if err != nil { + return "", fmt.Errorf("invalid gateway URL: %s", err.Error()) + } + gatewayURL.Path = path.Join(gatewayURL.Path, systemPath) + if len(namespace) > 0 { + q := gatewayURL.Query() + q.Set("namespace", namespace) + gatewayURL.RawQuery = q.Encode() + } + return gatewayURL.String(), nil +} + +func createFunctionEndpoint(gateway, functionName, namespace string) (string, error) { + gatewayURL, err := url.Parse(gateway) + if err != nil { + return "", fmt.Errorf("invalid gateway URL: %s", err.Error()) + } + gatewayURL.Path = path.Join(gatewayURL.Path, functionPath, functionName) + if len(namespace) > 0 { + q := gatewayURL.Query() + q.Set("namespace", namespace) + gatewayURL.RawQuery = q.Encode() + } + return gatewayURL.String(), nil +} + +func createNamespacesEndpoint(gateway string) (string, error) { + gatewayURL, err := url.Parse(gateway) + if err != nil { + return "", fmt.Errorf("invalid gateway URL: %s", err.Error()) + } + gatewayURL.Path = path.Join(gatewayURL.Path, namespacesPath) + return gatewayURL.String(), nil +} diff --git a/vendor/github.com/openfaas/faas-cli/proxy/version.go b/vendor/github.com/openfaas/faas-cli/proxy/version.go new file mode 100644 index 0000000..0070b1f --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/proxy/version.go @@ -0,0 +1,53 @@ +// Copyright (c) OpenFaaS Author(s) 2018. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package proxy + +import ( + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" +) + +//GetSystemInfo get system information from /system/info endpoint +func (c *Client) GetSystemInfo(ctx context.Context) (map[string]interface{}, error) { + infoEndPoint := "/system/info" + req, err := c.newRequest(http.MethodGet, infoEndPoint, nil) + if err != nil { + return nil, fmt.Errorf("invalid HTTP method or invalid URL") + } + + response, err := c.doRequest(ctx, req) + if err != nil { + return nil, fmt.Errorf("cannot connect to OpenFaaS on URL: %s", c.GatewayURL.String()) + } + + if response.Body != nil { + defer response.Body.Close() + } + info := make(map[string]interface{}) + + switch response.StatusCode { + case http.StatusOK: + bytesOut, err := ioutil.ReadAll(response.Body) + if err != nil { + return nil, fmt.Errorf("cannot read result from OpenFaaS on URL: %s", c.GatewayURL.String()) + } + err = json.Unmarshal(bytesOut, &info) + if err != nil { + return nil, fmt.Errorf("cannot parse result from OpenFaaS on URL: %s\n%s", c.GatewayURL.String(), err.Error()) + } + + case http.StatusUnauthorized: + return nil, fmt.Errorf("unauthorized access, run \"faas-cli login\" to setup authentication for this server") + default: + bytesOut, err := ioutil.ReadAll(response.Body) + if err == nil { + return nil, fmt.Errorf("server returned unexpected status code: %d - %s", response.StatusCode, string(bytesOut)) + } + } + + return info, nil +} diff --git a/vendor/github.com/openfaas/faas-cli/schema/store/v2/store.go b/vendor/github.com/openfaas/faas-cli/schema/store/v2/store.go new file mode 100644 index 0000000..5163a91 --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/schema/store/v2/store.go @@ -0,0 +1,32 @@ +// Copyright (c) OpenFaaS Author(s) 2019. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package v2 + +//StoreFunction represents a multi-arch function in the store +type StoreFunction struct { + Icon string `json:"icon"` + Title string `json:"title"` + Description string `json:"description"` + Name string `json:"name"` + Fprocess string `json:"fprocess"` + Network string `json:"network"` + RepoURL string `json:"repo_url"` + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem"` + Environment map[string]string `json:"environment"` + Labels map[string]string `json:"labels"` + Annotations map[string]string `json:"annotations"` + Images map[string]string `json:"images"` +} + +//GetImageName get image name of function for a platform +func (s *StoreFunction) GetImageName(platform string) string { + imageName, _ := s.Images[platform] + return imageName +} + +// Store represents an item of store for version 2 +type Store struct { + Version string `json:"version"` + Functions []StoreFunction `json:"functions"` +} diff --git a/vendor/github.com/openfaas/faas-cli/stack/language_template.go b/vendor/github.com/openfaas/faas-cli/stack/language_template.go new file mode 100644 index 0000000..e3975fd --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/stack/language_template.go @@ -0,0 +1,78 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package stack + +import ( + "fmt" + "io/ioutil" + "net/url" + "os" + "strings" + + yaml "gopkg.in/yaml.v2" +) + +func ParseYAMLForLanguageTemplate(file string) (*LanguageTemplate, error) { + var err error + var fileData []byte + + urlParsed, err := url.Parse(file) + if err == nil && len(urlParsed.Scheme) > 0 { + fmt.Println("Parsed: " + urlParsed.String()) + fileData, err = fetchYAML(urlParsed) + if err != nil { + return nil, err + } + } else { + fileData, err = ioutil.ReadFile(file) + if err != nil { + return nil, err + } + } + + return ParseYAMLDataForLanguageTemplate(fileData) +} + +// ParseYAMLDataForLanguageTemplate parses YAML data into language template +func ParseYAMLDataForLanguageTemplate(fileData []byte) (*LanguageTemplate, error) { + var langTemplate LanguageTemplate + var err error + + err = yaml.Unmarshal(fileData, &langTemplate) + if err != nil { + fmt.Printf("Error with YAML file\n") + return nil, err + } + + return &langTemplate, err +} + +func IsValidTemplate(lang string) bool { + var found bool + + lang = strings.ToLower(lang) + + if _, err := os.Stat("./template/" + lang); err == nil { + templateYAMLPath := "./template/" + lang + "/template.yml" + + if _, err := ParseYAMLForLanguageTemplate(templateYAMLPath); err == nil { + found = true + } + } + + return found +} + +//LoadLanguageTemplate loads language template details from template.yml file. +func LoadLanguageTemplate(lang string) (*LanguageTemplate, error) { + lang = strings.ToLower(lang) + _, err := os.Stat("./template/" + lang) + + if err == nil { + templateYAMLPath := "./template/" + lang + "/template.yml" + languageTemplate, err := ParseYAMLForLanguageTemplate(templateYAMLPath) + return languageTemplate, err + } + return nil, err +} diff --git a/vendor/github.com/openfaas/faas-cli/stack/schema.go b/vendor/github.com/openfaas/faas-cli/stack/schema.go new file mode 100644 index 0000000..eaa473d --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/stack/schema.go @@ -0,0 +1,130 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package stack + +// Provider for the FaaS set of functions. +type Provider struct { + Name string `yaml:"name"` + GatewayURL string `yaml:"gateway"` + Network string `yaml:"network"` +} + +// Function as deployed or built on FaaS +type Function struct { + // Name of deployed function + Name string `yaml:"-"` + + Language string `yaml:"lang"` + + // Handler Local folder to use for function + Handler string `yaml:"handler"` + + // Image Docker image name + Image string `yaml:"image"` + + // Docker registry Authorization + RegistryAuth string `yaml:"registry_auth,omitempty"` + + FProcess string `yaml:"fprocess"` + + Environment map[string]string `yaml:"environment"` + + // Secrets list of secrets to be made available to function + Secrets []string `yaml:"secrets,omitempty"` + + SkipBuild bool `yaml:"skip_build,omitempty"` + + Constraints *[]string `yaml:"constraints,omitempty"` + + // EnvironmentFile is a list of files to import and override environmental variables. + // These are overriden in order. + EnvironmentFile []string `yaml:"environment_file,omitempty"` + + Labels *map[string]string `yaml:"labels,omitempty"` + + // Limits for function + Limits *FunctionResources `yaml:"limits,omitempty"` + + // Requests of resources requested by function + Requests *FunctionResources `yaml:"requests,omitempty"` + + // ReadOnlyRootFilesystem is used to set the container filesystem to read-only + ReadOnlyRootFilesystem bool `yaml:"readonly_root_filesystem,omitempty"` + + // BuildOptions to determine native packages + BuildOptions []string `yaml:"build_options,omitempty"` + + // Annotations + Annotations *map[string]string `yaml:"annotations,omitempty"` + + // Namespace of the function + Namespace string `yaml:"namespace,omitempty"` + + // BuildArgs for providing build-args + BuildArgs map[string]string `yaml:"build_args,omitempty"` + + // Platforms for use with buildx and faas-cli publish + Platforms string `yaml:"platforms,omitempty"` +} + +// Configuration for the stack.yml file +type Configuration struct { + StackConfig StackConfiguration `yaml:"configuration"` +} + +// StackConfiguration for the overall stack.yml +type StackConfiguration struct { + TemplateConfigs []TemplateSource `yaml:"templates"` + + // CopyExtraPaths specifies additional paths (relative to the stack file) that will be copied + // into the functions build context, e.g. specifying `"common"` will look for and copy the + // "common/" folder of file in the same root as the stack file. All paths must be contained + // within the project root defined by the location of the stack file. + // + // The yaml uses the shorter name `copy` to make it easier for developers to read and use + CopyExtraPaths []string `yaml:"copy"` +} + +// TemplateSource for build templates +type TemplateSource struct { + Name string `yaml:"name"` + Source string `yaml:"source,omitempty"` +} + +// FunctionResources Memory and CPU +type FunctionResources struct { + Memory string `yaml:"memory"` + CPU string `yaml:"cpu"` +} + +// EnvironmentFile represents external file for environment data +type EnvironmentFile struct { + Environment map[string]string `yaml:"environment"` +} + +// Services root level YAML file to define FaaS function-set +type Services struct { + Version string `yaml:"version,omitempty"` + Functions map[string]Function `yaml:"functions,omitempty"` + Provider Provider `yaml:"provider,omitempty"` + StackConfiguration StackConfiguration `yaml:"configuration,omitempty"` +} + +// LanguageTemplate read from template.yml within root of a language template folder +type LanguageTemplate struct { + Language string `yaml:"language,omitempty"` + FProcess string `yaml:"fprocess,omitempty"` + + BuildOptions []BuildOption `yaml:"build_options,omitempty"` + // WelcomeMessage is printed to the user after generating a function + WelcomeMessage string `yaml:"welcome_message,omitempty"` + // HandlerFolder to copy the function code into + HandlerFolder string `yaml:"handler_folder,omitempty"` +} + +// BuildOption a named build option for one or more packages +type BuildOption struct { + Name string `yaml:"name"` + Packages []string `yaml:"packages"` +} diff --git a/vendor/github.com/openfaas/faas-cli/stack/stack.go b/vendor/github.com/openfaas/faas-cli/stack/stack.go new file mode 100644 index 0000000..c68b01c --- /dev/null +++ b/vendor/github.com/openfaas/faas-cli/stack/stack.go @@ -0,0 +1,189 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package stack + +import ( + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "regexp" + "time" + + envsubst "github.com/drone/envsubst" + glob "github.com/ryanuber/go-glob" + yaml "gopkg.in/yaml.v2" +) + +const legacyProviderName = "faas" +const providerName = "openfaas" +const defaultSchemaVersion = "1.0" + +// ValidSchemaVersions available schema versions +var ValidSchemaVersions = []string{ + "1.0", +} + +// ParseYAMLFile parse YAML file into a stack of "services". +func ParseYAMLFile(yamlFile, regex, filter string, envsubst bool) (*Services, error) { + var err error + var fileData []byte + urlParsed, err := url.Parse(yamlFile) + if err == nil && len(urlParsed.Scheme) > 0 { + fmt.Println("Parsed: " + urlParsed.String()) + fileData, err = fetchYAML(urlParsed) + if err != nil { + return nil, err + } + } else { + fileData, err = ioutil.ReadFile(yamlFile) + if err != nil { + return nil, err + } + } + return ParseYAMLData(fileData, regex, filter, envsubst) +} + +func substituteEnvironment(data []byte) ([]byte, error) { + + ret, err := envsubst.Parse(string(data)) + if err != nil { + return nil, err + } + + res, resErr := ret.Execute(func(input string) string { + if val, ok := os.LookupEnv(input); ok { + return val + } + return "" + }) + + return []byte(res), resErr +} + +// ParseYAMLData parse YAML data into a stack of "services". +func ParseYAMLData(fileData []byte, regex string, filter string, envsubst bool) (*Services, error) { + var services Services + regexExists := len(regex) > 0 + filterExists := len(filter) > 0 + + var source []byte + if envsubst { + substData, substErr := substituteEnvironment(fileData) + + if substErr != nil { + return &services, substErr + } + source = substData + } else { + source = fileData + } + + err := yaml.Unmarshal(source, &services) + if err != nil { + fmt.Printf("Error with YAML file\n") + return nil, err + } + + for _, f := range services.Functions { + if f.Language == "Dockerfile" { + f.Language = "dockerfile" + } + } + + if services.Provider.Name != providerName { + return nil, fmt.Errorf(`['%s'] is the only valid "provider.name" for the OpenFaaS CLI, but you gave: %s`, providerName, services.Provider.Name) + } + + if len(services.Version) > 0 && !IsValidSchemaVersion(services.Version) { + return nil, fmt.Errorf("%s are the only valid versions for the stack file - found: %s", ValidSchemaVersions, services.Version) + } + + if regexExists && filterExists { + return nil, fmt.Errorf("pass in a regex or a filter, not both") + } + + if regexExists || filterExists { + for k, function := range services.Functions { + var match bool + var err error + function.Name = k + + if regexExists { + match, err = regexp.MatchString(regex, function.Name) + if err != nil { + return nil, err + } + } else { + match = glob.Glob(filter, function.Name) + } + + if !match { + delete(services.Functions, function.Name) + } + } + + if len(services.Functions) == 0 { + return nil, fmt.Errorf("no functions matching --filter/--regex were found in the YAML file") + } + + } + + return &services, nil +} + +func makeHTTPClient(timeout *time.Duration) http.Client { + if timeout != nil { + return http.Client{ + Timeout: *timeout, + Transport: &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: (&net.Dialer{ + Timeout: *timeout, + // KeepAlive: 0, + }).DialContext, + // MaxIdleConns: 1, + // DisableKeepAlives: true, + IdleConnTimeout: 120 * time.Millisecond, + ExpectContinueTimeout: 1500 * time.Millisecond, + }, + } + } + + // This should be used for faas-cli invoke etc. + return http.Client{} +} + +// fetchYAML pulls in file from remote location such as GitHub raw file-view +func fetchYAML(address *url.URL) ([]byte, error) { + req, err := http.NewRequest(http.MethodGet, address.String(), nil) + if err != nil { + return nil, err + } + + timeout := 120 * time.Second + client := makeHTTPClient(&timeout) + + res, err := client.Do(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + + resBytes, err := ioutil.ReadAll(res.Body) + + return resBytes, err +} + +// IsValidSchemaVersion validates schema version +func IsValidSchemaVersion(schemaVersion string) bool { + for _, validVersion := range ValidSchemaVersions { + if schemaVersion == validVersion { + return true + } + } + return false +} diff --git a/vendor/github.com/openfaas/faas-provider/auth/basic_auth.go b/vendor/github.com/openfaas/faas-provider/auth/basic_auth.go index 3fd7d75..01745e1 100644 --- a/vendor/github.com/openfaas/faas-provider/auth/basic_auth.go +++ b/vendor/github.com/openfaas/faas-provider/auth/basic_auth.go @@ -4,6 +4,7 @@ package auth import ( + "crypto/subtle" "net/http" ) @@ -12,10 +13,13 @@ func DecorateWithBasicAuth(next http.HandlerFunc, credentials *BasicAuthCredenti return func(w http.ResponseWriter, r *http.Request) { user, password, ok := r.BasicAuth() - w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) - if !ok || !(credentials.Password == password && user == credentials.User) { + const noMatch = 0 + if !ok || + user != credentials.User || + subtle.ConstantTimeCompare([]byte(credentials.Password), []byte(password)) == noMatch { + w.Header().Set("WWW-Authenticate", `Basic realm="Restricted"`) w.WriteHeader(http.StatusUnauthorized) w.Write([]byte("invalid credentials")) return diff --git a/vendor/github.com/openfaas/faas-provider/auth/credentials.go b/vendor/github.com/openfaas/faas-provider/auth/credentials.go index 4f2ca34..f7512bd 100644 --- a/vendor/github.com/openfaas/faas-provider/auth/credentials.go +++ b/vendor/github.com/openfaas/faas-provider/auth/credentials.go @@ -17,11 +17,15 @@ type BasicAuthCredentials struct { } type ReadBasicAuth interface { - Read() (error, *BasicAuthCredentials) + Read() (*BasicAuthCredentials, error) } type ReadBasicAuthFromDisk struct { SecretMountPath string + + UserFilename string + + PasswordFilename string } func (r *ReadBasicAuthFromDisk) Read() (*BasicAuthCredentials, error) { @@ -31,13 +35,23 @@ func (r *ReadBasicAuthFromDisk) Read() (*BasicAuthCredentials, error) { return nil, fmt.Errorf("invalid SecretMountPath specified for reading secrets") } - userPath := path.Join(r.SecretMountPath, "basic-auth-user") + userKey := "basic-auth-user" + if len(r.UserFilename) > 0 { + userKey = r.UserFilename + } + + passwordKey := "basic-auth-password" + if len(r.PasswordFilename) > 0 { + passwordKey = r.PasswordFilename + } + + userPath := path.Join(r.SecretMountPath, userKey) user, userErr := ioutil.ReadFile(userPath) if userErr != nil { return nil, fmt.Errorf("unable to load %s", userPath) } - userPassword := path.Join(r.SecretMountPath, "basic-auth-password") + userPassword := path.Join(r.SecretMountPath, passwordKey) password, passErr := ioutil.ReadFile(userPassword) if passErr != nil { return nil, fmt.Errorf("Unable to load %s", userPassword) diff --git a/vendor/github.com/openfaas/faas-provider/httputil/writers.go b/vendor/github.com/openfaas/faas-provider/httputil/writers.go new file mode 100644 index 0000000..c07e993 --- /dev/null +++ b/vendor/github.com/openfaas/faas-provider/httputil/writers.go @@ -0,0 +1,12 @@ +package httputil + +import ( + "fmt" + "net/http" +) + +// Errorf sets the response status code and write formats the provided message as the +// response body +func Errorf(w http.ResponseWriter, statusCode int, msg string, args ...interface{}) { + http.Error(w, fmt.Sprintf(msg, args...), statusCode) +} diff --git a/vendor/github.com/openfaas/faas-provider/logs/handler.go b/vendor/github.com/openfaas/faas-provider/logs/handler.go new file mode 100644 index 0000000..70616d8 --- /dev/null +++ b/vendor/github.com/openfaas/faas-provider/logs/handler.go @@ -0,0 +1,144 @@ +package logs + +import ( + "context" + "encoding/json" + "log" + "net/http" + "net/url" + "strconv" + "time" + + "github.com/openfaas/faas-provider/httputil" +) + +// Requester submits queries the logging system. +// This will be passed to the log handler constructor. +type Requester interface { + // Query submits a log request to the actual logging system. + Query(context.Context, Request) (<-chan Message, error) +} + +// NewLogHandlerFunc creates an http HandlerFunc from the supplied log Requestor. +func NewLogHandlerFunc(requestor Requester, timeout time.Duration) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if r.Body != nil { + defer r.Body.Close() + } + + cn, ok := w.(http.CloseNotifier) + if !ok { + log.Println("LogHandler: response is not a CloseNotifier, required for streaming response") + http.NotFound(w, r) + return + } + flusher, ok := w.(http.Flusher) + if !ok { + log.Println("LogHandler: response is not a Flusher, required for streaming response") + http.NotFound(w, r) + return + } + + logRequest, err := parseRequest(r) + if err != nil { + log.Printf("LogHandler: could not parse request %s", err) + httputil.Errorf(w, http.StatusUnprocessableEntity, "could not parse the log request") + return + } + + ctx, cancelQuery := context.WithTimeout(r.Context(), timeout) + defer cancelQuery() + messages, err := requestor.Query(ctx, logRequest) + if err != nil { + // add smarter error handling here + httputil.Errorf(w, http.StatusInternalServerError, "function log request failed") + return + } + + // Send the initial headers saying we're gonna stream the response. + w.Header().Set("Connection", "Keep-Alive") + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set(http.CanonicalHeaderKey("Content-Type"), "application/x-ndjson") + w.WriteHeader(http.StatusOK) + flusher.Flush() + + // ensure that we always try to send the closing chunk, not the inverted order due to how + // the defer stack works. We need two flush statements to ensure that the empty slice is + // sent as its own chunk + defer flusher.Flush() + defer w.Write([]byte{}) + defer flusher.Flush() + + jsonEncoder := json.NewEncoder(w) + for messages != nil { + select { + case <-cn.CloseNotify(): + log.Println("LogHandler: client stopped listening") + return + case msg, ok := <-messages: + if !ok { + log.Println("LogHandler: end of log stream") + messages = nil + return + } + + // serialize and write the msg to the http ResponseWriter + err := jsonEncoder.Encode(msg) + if err != nil { + // can't actually write the status header here so we should json serialize an error + // and return that because we have already sent the content type and status code + log.Printf("LogHandler: failed to serialize log message: '%s'\n", msg.String()) + log.Println(err.Error()) + // write json error message here ? + jsonEncoder.Encode(Message{Text: "failed to serialize log message"}) + flusher.Flush() + return + } + + flusher.Flush() + } + } + + return + } +} + +// parseRequest extracts the logRequest from the GET variables or from the POST body +func parseRequest(r *http.Request) (logRequest Request, err error) { + query := r.URL.Query() + logRequest.Name = getValue(query, "name") + logRequest.Namespace = getValue(query, "namespace") + logRequest.Instance = getValue(query, "instance") + tailStr := getValue(query, "tail") + if tailStr != "" { + logRequest.Tail, err = strconv.Atoi(tailStr) + if err != nil { + return logRequest, err + } + } + + // ignore error because it will default to false if we can't parse it + logRequest.Follow, _ = strconv.ParseBool(getValue(query, "follow")) + + sinceStr := getValue(query, "since") + if sinceStr != "" { + since, err := time.Parse(time.RFC3339, sinceStr) + logRequest.Since = &since + if err != nil { + return logRequest, err + } + } + + return logRequest, nil +} + +// getValue returns the value for the given key. If the key has more than one value, it returns the +// last value. if the value does not exist, it returns the empty string. +func getValue(queryValues url.Values, name string) string { + values := queryValues[name] + if len(values) == 0 { + return "" + } + + return values[len(values)-1] +} diff --git a/vendor/github.com/openfaas/faas-provider/logs/logs.go b/vendor/github.com/openfaas/faas-provider/logs/logs.go new file mode 100644 index 0000000..8dc076a --- /dev/null +++ b/vendor/github.com/openfaas/faas-provider/logs/logs.go @@ -0,0 +1,66 @@ +// Package logs provides the standard interface and handler for OpenFaaS providers to expose function logs. +// +// The package defines the Requester interface that OpenFaaS providers should implement and then expose using +// the predefined NewLogHandlerFunc. See the example folder for a minimal log provider implementation. +// +// The Requester is where the actual specific logic for connecting to and querying the log system should be implemented. +// +package logs + +import ( + "fmt" + "time" +) + +// Request is the query to return the function logs. +type Request struct { + // Name is the function name and is required + Name string `json:"name"` + // Namespace is the namespace the function is deployed to, how a namespace is defined + // is faas-provider specific + Namespace string `json:"namespace"` + // Instance is the optional container name, that allows you to request logs from a specific function instance + Instance string `json:"instance"` + // Since is the optional datetime value to start the logs from + Since *time.Time `json:"since"` + // Tail sets the maximum number of log messages to return, <=0 means unlimited + Tail int `json:"tail"` + // Follow is allows the user to request a stream of logs until the timeout + Follow bool `json:"follow"` +} + +// String implements that Stringer interface and prints the log Request in a consistent way that +// allows you to safely compare if two requests have the same value. +func (r Request) String() string { + return fmt.Sprintf( + "name: %s namespace: %s instance: %s since: %v tail: %d follow: %v", + r.Name, r.Namespace, r.Instance, r.Since, r.Tail, r.Follow, + ) +} + +// Message is a specific log message from a function container log stream +type Message struct { + // Name is the function name + Name string `json:"name"` + // Namespace is the namespace the function is deployed to, how a namespace is defined + // is faas-provider specific + Namespace string `json:"namespace"` + // instance is the name/id of the specific function instance + Instance string `json:"instance"` + // Timestamp is the timestamp of when the log message was recorded + Timestamp time.Time `json:"timestamp"` + // Text is the raw log message content + Text string `json:"text"` +} + +// String implements the Stringer interface and allows for nice and simple string formatting of a log Message. +func (m Message) String() string { + ns := "" + if len(m.Namespace) > 0 { + ns = fmt.Sprintf("%s ", m.Namespace) + } + return fmt.Sprintf( + "%s %s (%s%s) %s", + m.Timestamp.String(), m.Name, ns, m.Instance, m.Text, + ) +} diff --git a/vendor/github.com/openfaas/faas-provider/types/config.go b/vendor/github.com/openfaas/faas-provider/types/config.go new file mode 100644 index 0000000..6f4fd15 --- /dev/null +++ b/vendor/github.com/openfaas/faas-provider/types/config.go @@ -0,0 +1,87 @@ +package types + +import ( + "net/http" + "time" +) + +const ( + defaultReadTimeout = 10 * time.Second + defaultMaxIdleConns = 1024 +) + +// FaaSHandlers provide handlers for OpenFaaS +type FaaSHandlers struct { + // FunctionProxy provides the function invocation proxy logic. Use proxy.NewHandlerFunc to + // use the standard OpenFaaS proxy implementation or provide completely custom proxy logic. + FunctionProxy http.HandlerFunc + + FunctionReader http.HandlerFunc + DeployHandler http.HandlerFunc + + DeleteHandler http.HandlerFunc + ReplicaReader http.HandlerFunc + ReplicaUpdater http.HandlerFunc + SecretHandler http.HandlerFunc + // LogHandler provides streaming json logs of functions + LogHandler http.HandlerFunc + + // UpdateHandler an existing function/service + UpdateHandler http.HandlerFunc + // HealthHandler defines the default health endpoint bound to "/healthz + // If the handler is not set, then the "/healthz" path will not be configured + HealthHandler http.HandlerFunc + InfoHandler http.HandlerFunc + ListNamespaceHandler http.HandlerFunc +} + +// FaaSConfig set config for HTTP handlers +type FaaSConfig struct { + // TCPPort is the public port for the API. + TCPPort *int + // HTTP timeout for reading a request from clients. + ReadTimeout time.Duration + // HTTP timeout for writing a response from functions. + WriteTimeout time.Duration + // EnableHealth enables/disables the default health endpoint bound to "/healthz". + // + // Deprecated: basic auth is enabled automatcally by setting the HealthHandler in the FaaSHandlers + // struct. This value is not longer read or used. + EnableHealth bool + // EnableBasicAuth enforces basic auth on the API. If set, reads secrets from file-system + // location specificed in `SecretMountPath`. + EnableBasicAuth bool + // SecretMountPath specifies where to read secrets from for embedded basic auth. + SecretMountPath string + // MaxIdleConns with a default value of 1024, can be used for tuning HTTP proxy performance. + MaxIdleConns int + // MaxIdleConnsPerHost with a default value of 1024, can be used for tuning HTTP proxy performance. + MaxIdleConnsPerHost int +} + +// GetReadTimeout is a helper to safely return the configured ReadTimeout or the default value of 10s +func (c *FaaSConfig) GetReadTimeout() time.Duration { + if c.ReadTimeout <= 0*time.Second { + return defaultReadTimeout + } + return c.ReadTimeout +} + +// GetMaxIdleConns is a helper to safely return the configured MaxIdleConns or the default value of 1024 +func (c *FaaSConfig) GetMaxIdleConns() int { + if c.MaxIdleConns < 1 { + return defaultMaxIdleConns + } + + return c.MaxIdleConns +} + +// GetMaxIdleConns is a helper to safely return the configured MaxIdleConns or the default value which +// should then match the MaxIdleConns +func (c *FaaSConfig) GetMaxIdleConnsPerHost() int { + if c.MaxIdleConnsPerHost < 1 { + return c.GetMaxIdleConns() + } + + return c.MaxIdleConnsPerHost +} diff --git a/vendor/github.com/openfaas/faas-provider/types/model.go b/vendor/github.com/openfaas/faas-provider/types/model.go new file mode 100644 index 0000000..9e73c21 --- /dev/null +++ b/vendor/github.com/openfaas/faas-provider/types/model.go @@ -0,0 +1,99 @@ +package types + +// FunctionDeployment represents a request to create or update a Function. +type FunctionDeployment struct { + + // Service corresponds to a Service + Service string `json:"service"` + + // Image corresponds to a Docker image + Image string `json:"image"` + + // Network is specific to Docker Swarm - default overlay network is: func_functions + Network string `json:"network"` + + // EnvProcess corresponds to the fprocess variable for your container watchdog. + EnvProcess string `json:"envProcess"` + + // EnvVars provides overrides for functions. + EnvVars map[string]string `json:"envVars"` + + // RegistryAuth is the registry authentication (optional) + // in the same encoded format as Docker native credentials + // (see ~/.docker/config.json) + RegistryAuth string `json:"registryAuth,omitempty"` + + // Constraints are specific to back-end orchestration platform + Constraints []string `json:"constraints"` + + // Secrets list of secrets to be made available to function + Secrets []string `json:"secrets"` + + // Labels are metadata for functions which may be used by the + // back-end for making scheduling or routing decisions + Labels *map[string]string `json:"labels"` + + // Annotations are metadata for functions which may be used by the + // back-end for management, orchestration, events and build tasks + Annotations *map[string]string `json:"annotations"` + + // Limits for function + Limits *FunctionResources `json:"limits"` + + // Requests of resources requested by function + Requests *FunctionResources `json:"requests"` + + // ReadOnlyRootFilesystem removes write-access from the root filesystem + // mount-point. + ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem"` + + // Namespace for the function to be deployed into + Namespace string `json:"namespace,omitempty"` +} + +// FunctionResources Memory and CPU +type FunctionResources struct { + Memory string `json:"memory"` + CPU string `json:"cpu"` +} + +// FunctionStatus exported for system/functions endpoint +type FunctionStatus struct { + + // Name corresponds to a Service + Name string `json:"name"` + + // Image corresponds to a Docker image + Image string `json:"image"` + + // InvocationCount count of invocations + InvocationCount float64 `json:"invocationCount"` + + // Replicas desired within the cluster + Replicas uint64 `json:"replicas"` + + // EnvProcess is the process to pass to the watchdog, if in use + EnvProcess string `json:"envProcess"` + + // AvailableReplicas is the count of replicas ready to receive + // invocations as reported by the backend + AvailableReplicas uint64 `json:"availableReplicas"` + + // Labels are metadata for functions which may be used by the + // backend for making scheduling or routing decisions + Labels *map[string]string `json:"labels"` + + // Annotations are metadata for functions which may be used by the + // backend for management, orchestration, events and build tasks + Annotations *map[string]string `json:"annotations"` + + // Namespace where the function can be accessed + Namespace string `json:"namespace,omitempty"` +} + +// Secret for underlying orchestrator +type Secret struct { + Name string `json:"name"` + Namespace string `json:"namespace,omitempty"` + Value string `json:"value,omitempty"` +} diff --git a/vendor/github.com/openfaas/faas-provider/types/read_config.go b/vendor/github.com/openfaas/faas-provider/types/read_config.go new file mode 100644 index 0000000..d28a9d1 --- /dev/null +++ b/vendor/github.com/openfaas/faas-provider/types/read_config.go @@ -0,0 +1,112 @@ +package types + +import ( + "fmt" + "os" + "strconv" + "time" +) + +// OsEnv implements interface to wrap os.Getenv +type OsEnv struct { +} + +// Getenv wraps os.Getenv +func (OsEnv) Getenv(key string) string { + return os.Getenv(key) +} + +// HasEnv provides interface for os.Getenv +type HasEnv interface { + Getenv(key string) string +} + +// ReadConfig constitutes config from env variables +type ReadConfig struct { +} + +// ParseIntValue parses the the int in val or, if there is an error, returns the +// specified default value +func ParseIntValue(val string, fallback int) int { + if len(val) > 0 { + parsedVal, parseErr := strconv.Atoi(val) + if parseErr == nil && parsedVal >= 0 { + return parsedVal + } + } + return fallback +} + +// ParseIntOrDurationValue parses the the duration in val or, if there is an error, returns the +// specified default value +func ParseIntOrDurationValue(val string, fallback time.Duration) time.Duration { + if len(val) > 0 { + parsedVal, parseErr := strconv.Atoi(val) + if parseErr == nil && parsedVal >= 0 { + return time.Duration(parsedVal) * time.Second + } + } + + duration, durationErr := time.ParseDuration(val) + if durationErr != nil { + return fallback + } + + return duration +} + +// ParseBoolValue parses the the boolean in val or, if there is an error, returns the +// specified default value +func ParseBoolValue(val string, fallback bool) bool { + if len(val) > 0 { + return val == "true" + } + return fallback +} + +// ParseString verifies the string in val is not empty. When empty, it returns the +// specified default value +func ParseString(val string, fallback string) string { + if len(val) > 0 { + return val + } + return fallback +} + +// Read fetches config from environmental variables. +func (ReadConfig) Read(hasEnv HasEnv) (*FaaSConfig, error) { + cfg := &FaaSConfig{ + ReadTimeout: ParseIntOrDurationValue(hasEnv.Getenv("read_timeout"), time.Second*10), + WriteTimeout: ParseIntOrDurationValue(hasEnv.Getenv("write_timeout"), time.Second*10), + EnableBasicAuth: ParseBoolValue(hasEnv.Getenv("basic_auth"), false), + // default value from Gateway + SecretMountPath: ParseString(hasEnv.Getenv("secret_mount_path"), "/run/secrets/"), + } + + port := ParseIntValue(hasEnv.Getenv("port"), 8080) + cfg.TCPPort = &port + + cfg.MaxIdleConns = 1024 + maxIdleConns := hasEnv.Getenv("max_idle_conns") + if len(maxIdleConns) > 0 { + val, err := strconv.Atoi(maxIdleConns) + if err != nil { + return nil, fmt.Errorf("invalid value for max_idle_conns: %s", maxIdleConns) + } + cfg.MaxIdleConns = val + + } + + cfg.MaxIdleConnsPerHost = 1024 + maxIdleConnsPerHost := hasEnv.Getenv("max_idle_conns_per_host") + if len(maxIdleConnsPerHost) > 0 { + val, err := strconv.Atoi(maxIdleConnsPerHost) + if err != nil { + return nil, fmt.Errorf("invalid value for max_idle_conns_per_host: %s", maxIdleConnsPerHost) + } + cfg.MaxIdleConnsPerHost = val + + } + + return cfg, nil +} diff --git a/vendor/github.com/openfaas/faas-provider/types/requests.go b/vendor/github.com/openfaas/faas-provider/types/requests.go new file mode 100644 index 0000000..bef4af4 --- /dev/null +++ b/vendor/github.com/openfaas/faas-provider/types/requests.go @@ -0,0 +1,22 @@ +// Copyright (c) Alex Ellis 2017. All rights reserved. +// Licensed under the MIT license. See LICENSE file in the project root for full license information. + +package types + +type ScaleServiceRequest struct { + ServiceName string `json:"serviceName"` + Replicas uint64 `json:"replicas"` +} + +// InfoResponse provides information about the underlying provider +type InfoResponse struct { + Provider string `json:"provider"` + Version ProviderVersion `json:"version"` + Orchestration string `json:"orchestration"` +} + +// ProviderVersion provides the commit sha and release version number of the underlying provider +type ProviderVersion struct { + SHA string `json:"sha"` + Release string `json:"release"` +} diff --git a/vendor/github.com/openfaas/faas/gateway/requests/requests.go b/vendor/github.com/openfaas/faas/gateway/requests/requests.go index 5c8f174..2528277 100644 --- a/vendor/github.com/openfaas/faas/gateway/requests/requests.go +++ b/vendor/github.com/openfaas/faas/gateway/requests/requests.go @@ -5,80 +5,6 @@ // the OpenFaaS gateway REST API package requests -// CreateFunctionRequest create a function in the swarm. -type CreateFunctionRequest struct { - - // Service corresponds to a Docker Service - Service string `json:"service"` - - // Image corresponds to a Docker image - Image string `json:"image"` - - // Network is specific to Docker Swarm - default overlay network is: func_functions - Network string `json:"network"` - - // EnvProcess corresponds to the fprocess variable for your container watchdog. - EnvProcess string `json:"envProcess"` - - // EnvVars provides overrides for functions. - EnvVars map[string]string `json:"envVars"` - - // RegistryAuth is the registry authentication (optional) - // in the same encoded format as Docker native credentials - // (see ~/.docker/config.json) - RegistryAuth string `json:"registryAuth,omitempty"` - - // Constraints are specific to back-end orchestration platform - Constraints []string `json:"constraints"` - - // Secrets list of secrets to be made available to function - Secrets []string `json:"secrets"` - - // Labels are metadata for functions which may be used by the - // back-end for making scheduling or routing decisions - Labels *map[string]string `json:"labels"` - - // Annotations are metadata for functions which may be used by the - // back-end for management, orchestration, events and build tasks - Annotations *map[string]string `json:"annotations"` - - // Limits for function - Limits *FunctionResources `json:"limits"` - - // Requests of resources requested by function - Requests *FunctionResources `json:"requests"` - - // ReadOnlyRootFilesystem removes write-access from the root filesystem - // mount-point. - ReadOnlyRootFilesystem bool `json:"readOnlyRootFilesystem"` -} - -// FunctionResources Memory and CPU -type FunctionResources struct { - Memory string `json:"memory"` - CPU string `json:"cpu"` -} - -// Function exported for system/functions endpoint -type Function struct { - Name string `json:"name"` - Image string `json:"image"` - InvocationCount float64 `json:"invocationCount"` // TODO: shouldn't this be int64? - Replicas uint64 `json:"replicas"` - EnvProcess string `json:"envProcess"` - - // AvailableReplicas is the count of replicas ready to receive invocations as reported by the back-end - AvailableReplicas uint64 `json:"availableReplicas"` - - // Labels are metadata for functions which may be used by the - // back-end for making scheduling or routing decisions - Labels *map[string]string `json:"labels"` - - // Annotations are metadata for functions which may be used by the - // back-end for management, orchestration, events and build tasks - Annotations *map[string]string `json:"annotations"` -} - // AsyncReport is the report from a function executed on a queue worker. type AsyncReport struct { FunctionName string `json:"name"` @@ -90,9 +16,3 @@ type AsyncReport struct { type DeleteFunctionRequest struct { FunctionName string `json:"functionName"` } - -// Secret for underlying orchestrator -type Secret struct { - Name string `json:"name"` - Value string `json:"value,omitempty"` -} diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml index d4b9266..9159de0 100644 --- a/vendor/github.com/pkg/errors/.travis.yml +++ b/vendor/github.com/pkg/errors/.travis.yml @@ -1,15 +1,10 @@ language: go go_import_path: github.com/pkg/errors go: - - 1.4.x - - 1.5.x - - 1.6.x - - 1.7.x - - 1.8.x - - 1.9.x - - 1.10.x - 1.11.x + - 1.12.x + - 1.13.x - tip script: - - go test -v ./... + - make check diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile new file mode 100644 index 0000000..ce9d7cd --- /dev/null +++ b/vendor/github.com/pkg/errors/Makefile @@ -0,0 +1,44 @@ +PKGS := github.com/pkg/errors +SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) +GO := go + +check: test vet gofmt misspell unconvert staticcheck ineffassign unparam + +test: + $(GO) test $(PKGS) + +vet: | test + $(GO) vet $(PKGS) + +staticcheck: + $(GO) get honnef.co/go/tools/cmd/staticcheck + staticcheck -checks all $(PKGS) + +misspell: + $(GO) get github.com/client9/misspell/cmd/misspell + misspell \ + -locale GB \ + -error \ + *.md *.go + +unconvert: + $(GO) get github.com/mdempsky/unconvert + unconvert -v $(PKGS) + +ineffassign: + $(GO) get github.com/gordonklaus/ineffassign + find $(SRCDIRS) -name '*.go' | xargs ineffassign + +pedantic: check errcheck + +unparam: + $(GO) get mvdan.cc/unparam + unparam ./... + +errcheck: + $(GO) get github.com/kisielk/errcheck + errcheck $(PKGS) + +gofmt: + @echo Checking code is gofmted + @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md index 6483ba2..54dfdcb 100644 --- a/vendor/github.com/pkg/errors/README.md +++ b/vendor/github.com/pkg/errors/README.md @@ -41,11 +41,18 @@ default: [Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). +## Roadmap + +With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: + +- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) +- 1.0. Final release. + ## Contributing -We welcome pull requests, bug fixes and issue reports. With that said, the bar for adding new symbols to this package is intentionally set high. +Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. -Before proposing a change, please discuss your change by raising an issue. +Before sending a PR, please discuss your change by raising an issue. ## License diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go index 7421f32..161aea2 100644 --- a/vendor/github.com/pkg/errors/errors.go +++ b/vendor/github.com/pkg/errors/errors.go @@ -82,7 +82,7 @@ // // if err, ok := err.(stackTracer); ok { // for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d", f) +// fmt.Printf("%+s:%d\n", f, f) // } // } // @@ -159,6 +159,9 @@ type withStack struct { func (w *withStack) Cause() error { return w.error } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + func (w *withStack) Format(s fmt.State, verb rune) { switch verb { case 'v': @@ -241,6 +244,9 @@ type withMessage struct { func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } func (w *withMessage) Cause() error { return w.cause } +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + func (w *withMessage) Format(s fmt.State, verb rune) { switch verb { case 'v': diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 0000000..be0d10d --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go index 2874a04..779a834 100644 --- a/vendor/github.com/pkg/errors/stack.go +++ b/vendor/github.com/pkg/errors/stack.go @@ -5,10 +5,13 @@ import ( "io" "path" "runtime" + "strconv" "strings" ) // Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. type Frame uintptr // pc returns the program counter for this frame; @@ -37,6 +40,15 @@ func (f Frame) line() int { return line } +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + // Format formats the frame according to the fmt.Formatter interface. // // %s source file @@ -54,22 +66,16 @@ func (f Frame) Format(s fmt.State, verb rune) { case 's': switch { case s.Flag('+'): - pc := f.pc() - fn := runtime.FuncForPC(pc) - if fn == nil { - io.WriteString(s, "unknown") - } else { - file, _ := fn.FileLine(pc) - fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) - } + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) default: io.WriteString(s, path.Base(f.file())) } case 'd': - fmt.Fprintf(s, "%d", f.line()) + io.WriteString(s, strconv.Itoa(f.line())) case 'n': - name := runtime.FuncForPC(f.pc()).Name() - io.WriteString(s, funcname(name)) + io.WriteString(s, funcname(f.name())) case 'v': f.Format(s, 's') io.WriteString(s, ":") @@ -77,6 +83,16 @@ func (f Frame) Format(s fmt.State, verb rune) { } } +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame @@ -94,16 +110,30 @@ func (st StackTrace) Format(s fmt.State, verb rune) { switch { case s.Flag('+'): for _, f := range st { - fmt.Fprintf(s, "\n%+v", f) + io.WriteString(s, "\n") + f.Format(s, verb) } case s.Flag('#'): fmt.Fprintf(s, "%#v", []Frame(st)) default: - fmt.Fprintf(s, "%v", []Frame(st)) + st.formatSlice(s, verb) } case 's': - fmt.Fprintf(s, "%s", []Frame(st)) + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) } + io.WriteString(s, "]") } // stack represents a stack of program counters. diff --git a/vendor/github.com/ryanuber/go-glob/.travis.yml b/vendor/github.com/ryanuber/go-glob/.travis.yml new file mode 100644 index 0000000..9d1ca3c --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - tip +script: + - go test -v ./... diff --git a/vendor/github.com/ryanuber/go-glob/LICENSE b/vendor/github.com/ryanuber/go-glob/LICENSE new file mode 100644 index 0000000..bdfbd95 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Ryan Uber + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/ryanuber/go-glob/README.md b/vendor/github.com/ryanuber/go-glob/README.md new file mode 100644 index 0000000..48f7fcb --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/README.md @@ -0,0 +1,29 @@ +# String globbing in golang [![Build Status](https://travis-ci.org/ryanuber/go-glob.svg)](https://travis-ci.org/ryanuber/go-glob) + +`go-glob` is a single-function library implementing basic string glob support. + +Globs are an extremely user-friendly way of supporting string matching without +requiring knowledge of regular expressions or Go's particular regex engine. Most +people understand that if you put a `*` character somewhere in a string, it is +treated as a wildcard. Surprisingly, this functionality isn't found in Go's +standard library, except for `path.Match`, which is intended to be used while +comparing paths (not arbitrary strings), and contains specialized logic for this +use case. A better solution might be a POSIX basic (non-ERE) regular expression +engine for Go, which doesn't exist currently. + +Example +======= + +``` +package main + +import "github.com/ryanuber/go-glob" + +func main() { + glob.Glob("*World!", "Hello, World!") // true + glob.Glob("Hello,*", "Hello, World!") // true + glob.Glob("*ello,*", "Hello, World!") // true + glob.Glob("World!", "Hello, World!") // false + glob.Glob("/home/*", "/home/ryanuber/.bashrc") // true +} +``` diff --git a/vendor/github.com/ryanuber/go-glob/glob.go b/vendor/github.com/ryanuber/go-glob/glob.go new file mode 100644 index 0000000..e67db3b --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/glob.go @@ -0,0 +1,56 @@ +package glob + +import "strings" + +// The character which is treated like a glob +const GLOB = "*" + +// Glob will test a string pattern, potentially containing globs, against a +// subject string. The result is a simple true/false, determining whether or +// not the glob pattern matched the subject text. +func Glob(pattern, subj string) bool { + // Empty pattern can only match empty subject + if pattern == "" { + return subj == pattern + } + + // If the pattern _is_ a glob, it matches everything + if pattern == GLOB { + return true + } + + parts := strings.Split(pattern, GLOB) + + if len(parts) == 1 { + // No globs in pattern, so test for equality + return subj == pattern + } + + leadingGlob := strings.HasPrefix(pattern, GLOB) + trailingGlob := strings.HasSuffix(pattern, GLOB) + end := len(parts) - 1 + + // Go over the leading parts and ensure they match. + for i := 0; i < end; i++ { + idx := strings.Index(subj, parts[i]) + + switch i { + case 0: + // Check the first section. Requires special handling. + if !leadingGlob && idx != 0 { + return false + } + default: + // Check that the middle parts match. + if idx < 0 { + return false + } + } + + // Trim evaluated text from subj as we loop over the pattern. + subj = subj[idx+len(parts[i]):] + } + + // Reached the last section. Requires special handling. + return trailingGlob || strings.HasSuffix(subj, parts[end]) +} diff --git a/vendor/github.com/ryanuber/go-glob/go.mod b/vendor/github.com/ryanuber/go-glob/go.mod new file mode 100644 index 0000000..f382035 --- /dev/null +++ b/vendor/github.com/ryanuber/go-glob/go.mod @@ -0,0 +1 @@ +module github.com/ryanuber/go-glob diff --git a/vendor/gopkg.in/yaml.v2/.travis.yml b/vendor/gopkg.in/yaml.v2/.travis.yml new file mode 100644 index 0000000..055480b --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/.travis.yml @@ -0,0 +1,16 @@ +language: go + +go: + - "1.4.x" + - "1.5.x" + - "1.6.x" + - "1.7.x" + - "1.8.x" + - "1.9.x" + - "1.10.x" + - "1.11.x" + - "1.12.x" + - "1.13.x" + - "tip" + +go_import_path: gopkg.in/yaml.v2 diff --git a/vendor/gopkg.in/yaml.v2/LICENSE b/vendor/gopkg.in/yaml.v2/LICENSE new file mode 100644 index 0000000..8dada3e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/LICENSE.libyaml b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml new file mode 100644 index 0000000..8da58fb --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/LICENSE.libyaml @@ -0,0 +1,31 @@ +The following files were ported to Go from C files of libyaml, and thus +are still covered by their original copyright and license: + + apic.go + emitterc.go + parserc.go + readerc.go + scannerc.go + writerc.go + yamlh.go + yamlprivateh.go + +Copyright (c) 2006 Kirill Simonov + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +of the Software, and to permit persons to whom the Software is furnished to do +so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/gopkg.in/yaml.v2/NOTICE b/vendor/gopkg.in/yaml.v2/NOTICE new file mode 100644 index 0000000..866d74a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/NOTICE @@ -0,0 +1,13 @@ +Copyright 2011-2016 Canonical Ltd. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vendor/gopkg.in/yaml.v2/README.md b/vendor/gopkg.in/yaml.v2/README.md new file mode 100644 index 0000000..b50c6e8 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/README.md @@ -0,0 +1,133 @@ +# YAML support for the Go language + +Introduction +------------ + +The yaml package enables Go programs to comfortably encode and decode YAML +values. It was developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a +pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) +C library to parse and generate YAML data quickly and reliably. + +Compatibility +------------- + +The yaml package supports most of YAML 1.1 and 1.2, including support for +anchors, tags, map merging, etc. Multi-document unmarshalling is not yet +implemented, and base-60 floats from YAML 1.1 are purposefully not +supported since they're a poor design and are gone in YAML 1.2. + +Installation and usage +---------------------- + +The import path for the package is *gopkg.in/yaml.v2*. + +To install it, run: + + go get gopkg.in/yaml.v2 + +API documentation +----------------- + +If opened in a browser, the import path itself leads to the API documentation: + + * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2) + +API stability +------------- + +The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in). + + +License +------- + +The yaml package is licensed under the Apache License 2.0. Please see the LICENSE file for details. + + +Example +------- + +```Go +package main + +import ( + "fmt" + "log" + + "gopkg.in/yaml.v2" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[interface{}]interface{}) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + diff --git a/vendor/gopkg.in/yaml.v2/apic.go b/vendor/gopkg.in/yaml.v2/apic.go new file mode 100644 index 0000000..1f7e87e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/apic.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "io" +) + +func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) { + //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// Create a new parser object. +func yaml_parser_initialize(parser *yaml_parser_t) bool { + *parser = yaml_parser_t{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } + return true +} + +// Destroy a parser object. +func yaml_parser_delete(parser *yaml_parser_t) { + *parser = yaml_parser_t{} +} + +// String read handler. +func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yaml_reader_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// Set a string input. +func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_string_read_handler + parser.input = input + parser.input_pos = 0 +} + +// Set a file input. +func yaml_parser_set_input_reader(parser *yaml_parser_t, r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yaml_reader_read_handler + parser.input_reader = r +} + +// Set the source encoding. +func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) { + if parser.encoding != yaml_ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// Create a new emitter object. +func yaml_emitter_initialize(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]yaml_emitter_state_t, 0, initial_stack_size), + events: make([]yaml_event_t, 0, initial_queue_size), + } +} + +// Destroy an emitter object. +func yaml_emitter_delete(emitter *yaml_emitter_t) { + *emitter = yaml_emitter_t{} +} + +// String write handler. +func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yaml_writer_write_handler uses emitter.output_writer to write the +// emitted text. +func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// Set a string output. +func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_string_write_handler + emitter.output_buffer = output_buffer +} + +// Set a file output. +func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yaml_writer_write_handler + emitter.output_writer = w +} + +// Set the output encoding. +func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) { + if emitter.encoding != yaml_ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// Set the canonical output style. +func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) { + emitter.canonical = canonical +} + +//// Set the indentation increment. +func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.best_indent = indent +} + +// Set the preferred line width. +func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// Set if unescaped non-ASCII characters are allowed. +func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) { + emitter.unicode = unicode +} + +// Set the preferred line break character. +func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// Create STREAM-START. +func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + encoding: encoding, + } +} + +// Create STREAM-END. +func yaml_stream_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + } +} + +// Create DOCUMENT-START. +func yaml_document_start_event_initialize( + event *yaml_event_t, + version_directive *yaml_version_directive_t, + tag_directives []yaml_tag_directive_t, + implicit bool, +) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: implicit, + } +} + +// Create DOCUMENT-END. +func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) { + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + implicit: implicit, + } +} + +///* +// * Create ALIAS. +// */ +// +//YAML_DECLARE(int) +//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t) +//{ +// mark yaml_mark_t = { 0, 0, 0 } +// anchor_copy *yaml_char_t = NULL +// +// assert(event) // Non-NULL event object is expected. +// assert(anchor) // Non-NULL anchor is expected. +// +// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0 +// +// anchor_copy = yaml_strdup(anchor) +// if (!anchor_copy) +// return 0 +// +// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark) +// +// return 1 +//} + +// Create SCALAR. +func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + anchor: anchor, + tag: tag, + value: value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-START. +func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } + return true +} + +// Create SEQUENCE-END. +func yaml_sequence_end_event_initialize(event *yaml_event_t) bool { + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + } + return true +} + +// Create MAPPING-START. +func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(style), + } +} + +// Create MAPPING-END. +func yaml_mapping_end_event_initialize(event *yaml_event_t) { + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + } +} + +// Destroy an event object. +func yaml_event_delete(event *yaml_event_t) { + *event = yaml_event_t{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/gopkg.in/yaml.v2/decode.go b/vendor/gopkg.in/yaml.v2/decode.go new file mode 100644 index 0000000..129bc2a --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/decode.go @@ -0,0 +1,815 @@ +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "strconv" + "time" +) + +const ( + documentNode = 1 << iota + mappingNode + sequenceNode + scalarNode + aliasNode +) + +type node struct { + kind int + line, column int + tag string + // For an alias node, alias holds the resolved alias. + alias *node + value string + implicit bool + children []*node + anchors map[string]*node +} + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser yaml_parser_t + event yaml_event_t + doc *node + doneInit bool +} + +func newParser(b []byte) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + if len(b) == 0 { + b = []byte{'\n'} + } + yaml_parser_set_input_string(&p.parser, b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{} + if !yaml_parser_initialize(&p.parser) { + panic("failed to initialize YAML emitter") + } + yaml_parser_set_input_reader(&p.parser, r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.expect(yaml_STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.typ != yaml_NO_EVENT { + yaml_event_delete(&p.event) + } + yaml_parser_delete(&p.parser) +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e yaml_event_type_t) { + if p.event.typ == yaml_NO_EVENT { + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + } + if p.event.typ == yaml_STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.typ != e { + p.parser.problem = fmt.Sprintf("expected %s event but got %s", e, p.event.typ) + p.fail() + } + yaml_event_delete(&p.event) + p.event.typ = yaml_NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() yaml_event_type_t { + if p.event.typ != yaml_NO_EVENT { + return p.event.typ + } + if !yaml_parser_parse(&p.parser, &p.event) { + p.fail() + } + return p.event.typ +} + +func (p *parser) fail() { + var where string + var line int + if p.parser.problem_mark.line != 0 { + line = p.parser.problem_mark.line + // Scanner errors don't iterate line before returning error + if p.parser.error == yaml_SCANNER_ERROR { + line++ + } + } else if p.parser.context_mark.line != 0 { + line = p.parser.context_mark.line + } + if line != 0 { + where = "line " + strconv.Itoa(line) + ": " + } + var msg string + if len(p.parser.problem) > 0 { + msg = p.parser.problem + } else { + msg = "unknown problem parsing YAML content" + } + failf("%s%s", where, msg) +} + +func (p *parser) anchor(n *node, anchor []byte) { + if anchor != nil { + p.doc.anchors[string(anchor)] = n + } +} + +func (p *parser) parse() *node { + p.init() + switch p.peek() { + case yaml_SCALAR_EVENT: + return p.scalar() + case yaml_ALIAS_EVENT: + return p.alias() + case yaml_MAPPING_START_EVENT: + return p.mapping() + case yaml_SEQUENCE_START_EVENT: + return p.sequence() + case yaml_DOCUMENT_START_EVENT: + return p.document() + case yaml_STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + default: + panic("attempted to parse unknown event: " + p.event.typ.String()) + } +} + +func (p *parser) node(kind int) *node { + return &node{ + kind: kind, + line: p.event.start_mark.line, + column: p.event.start_mark.column, + } +} + +func (p *parser) document() *node { + n := p.node(documentNode) + n.anchors = make(map[string]*node) + p.doc = n + p.expect(yaml_DOCUMENT_START_EVENT) + n.children = append(n.children, p.parse()) + p.expect(yaml_DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *node { + n := p.node(aliasNode) + n.value = string(p.event.anchor) + n.alias = p.doc.anchors[n.value] + if n.alias == nil { + failf("unknown anchor '%s' referenced", n.value) + } + p.expect(yaml_ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *node { + n := p.node(scalarNode) + n.value = string(p.event.value) + n.tag = string(p.event.tag) + n.implicit = p.event.implicit + p.anchor(n, p.event.anchor) + p.expect(yaml_SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *node { + n := p.node(sequenceNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_SEQUENCE_START_EVENT) + for p.peek() != yaml_SEQUENCE_END_EVENT { + n.children = append(n.children, p.parse()) + } + p.expect(yaml_SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *node { + n := p.node(mappingNode) + p.anchor(n, p.event.anchor) + p.expect(yaml_MAPPING_START_EVENT) + for p.peek() != yaml_MAPPING_END_EVENT { + n.children = append(n.children, p.parse(), p.parse()) + } + p.expect(yaml_MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *node + aliases map[*node]bool + mapType reflect.Type + terrors []string + strict bool + + decodeCount int + aliasCount int + aliasDepth int +} + +var ( + mapItemType = reflect.TypeOf(MapItem{}) + durationType = reflect.TypeOf(time.Duration(0)) + defaultMapType = reflect.TypeOf(map[interface{}]interface{}{}) + ifaceType = defaultMapType.Elem() + timeType = reflect.TypeOf(time.Time{}) + ptrTimeType = reflect.TypeOf(&time.Time{}) +) + +func newDecoder(strict bool) *decoder { + d := &decoder{mapType: defaultMapType, strict: strict} + d.aliases = make(map[*node]bool) + return d +} + +func (d *decoder) terror(n *node, tag string, out reflect.Value) { + if n.tag != "" { + tag = n.tag + } + value := n.value + if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type())) +} + +func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v interface{}) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + if e, ok := err.(*TypeError); ok { + d.terrors = append(d.terrors, e.Errors...) + return false + } + if err != nil { + fail(err) + } + return true +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "~" || n.value == "" && n.implicit) { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Ptr { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + if u, ok := out.Addr().Interface().(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + switch n.kind { + case documentNode: + return d.document(n, out) + case aliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.kind { + case scalarNode: + good = d.scalar(n, out) + case mappingNode: + good = d.mapping(n, out) + case sequenceNode: + good = d.sequence(n, out) + default: + panic("internal error: unknown node kind: " + strconv.Itoa(n.kind)) + } + return good +} + +func (d *decoder) document(n *node, out reflect.Value) (good bool) { + if len(n.children) == 1 { + d.doc = n + d.unmarshal(n.children[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +var zeroValue reflect.Value + +func resetMap(out reflect.Value) { + for _, k := range out.MapKeys() { + out.SetMapIndex(k, zeroValue) + } +} + +func (d *decoder) scalar(n *node, out reflect.Value) bool { + var tag string + var resolved interface{} + if n.tag == "" && !n.implicit { + tag = yaml_STR_TAG + resolved = n.value + } else { + tag, resolved = resolve(n.tag, n.value) + if tag == yaml_BINARY_TAG { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + if out.Kind() == reflect.Map && !out.CanAddr() { + resetMap(out) + } else { + out.Set(reflect.Zero(out.Type())) + } + return true + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == yaml_BINARY_TAG { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.value) + } + err := u.UnmarshalText(text) + if err != nil { + fail(err) + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == yaml_BINARY_TAG { + out.SetString(resolved.(string)) + return true + } + if resolved != nil { + out.SetString(n.value) + return true + } + case reflect.Interface: + if resolved == nil { + out.Set(reflect.Zero(out.Type())) + } else if tag == yaml_TIMESTAMP_TAG { + // It looks like a timestamp but for backward compatibility + // reasons we set it as a string, so that code that unmarshals + // timestamp-like values into interface{} will continue to + // see a string and not a time.Time. + // TODO(v3) Drop this. + out.Set(reflect.ValueOf(n.value)) + } else { + out.Set(reflect.ValueOf(resolved)) + } + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch resolved := resolved.(type) { + case int: + if !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Ptr: + if out.Type().Elem() == reflect.TypeOf(resolved) { + // TODO DOes this make sense? When is out a Ptr except when decoding a nil value? + elem := reflect.New(out.Type().Elem()) + elem.Elem().Set(reflect.ValueOf(resolved)) + out.Set(elem) + return true + } + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i interface{}) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *node, out reflect.Value) (good bool) { + l := len(n.children) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]interface{}, l)) + default: + d.terror(n, yaml_SEQ_TAG, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.children[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *node, out reflect.Value) (good bool) { + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Slice: + return d.mappingSlice(n, out) + case reflect.Map: + // okay + case reflect.Interface: + if d.mapType.Kind() == reflect.Map { + iface := out + out = reflect.MakeMap(d.mapType) + iface.Set(out) + } else { + slicev := reflect.New(d.mapType).Elem() + if !d.mappingSlice(n, slicev) { + return false + } + out.Set(slicev) + return true + } + default: + d.terror(n, yaml_MAP_TAG, out) + return false + } + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + mapType := d.mapType + if outt.Key() == ifaceType && outt.Elem() == ifaceType { + d.mapType = outt + } + + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + } + l := len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.children[i], k) { + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("invalid map key: %#v", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.children[i+1], e) { + d.setMapIndex(n.children[i+1], out, k, e) + } + } + } + d.mapType = mapType + return true +} + +func (d *decoder) setMapIndex(n *node, out, k, v reflect.Value) { + if d.strict && out.MapIndex(k) != zeroValue { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: key %#v already set in map", n.line+1, k.Interface())) + return + } + out.SetMapIndex(k, v) +} + +func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) { + outt := out.Type() + if outt.Elem() != mapItemType { + d.terror(n, yaml_MAP_TAG, out) + return false + } + + mapType := d.mapType + d.mapType = outt + + var slice []MapItem + var l = len(n.children) + for i := 0; i < l; i += 2 { + if isMerge(n.children[i]) { + d.merge(n.children[i+1], out) + continue + } + item := MapItem{} + k := reflect.ValueOf(&item.Key).Elem() + if d.unmarshal(n.children[i], k) { + v := reflect.ValueOf(&item.Value).Elem() + if d.unmarshal(n.children[i+1], v) { + slice = append(slice, item) + } + } + } + out.Set(reflect.ValueOf(slice)) + d.mapType = mapType + return true +} + +func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + name := settableValueOf("") + l := len(n.children) + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + inlineMap.Set(reflect.New(inlineMap.Type()).Elem()) + elemType = inlineMap.Type().Elem() + } + + var doneFields []bool + if d.strict { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + for i := 0; i < l; i += 2 { + ni := n.children[i] + if isMerge(ni) { + d.merge(n.children[i+1], out) + continue + } + if !d.unmarshal(ni, name) { + continue + } + if info, ok := sinfo.FieldsMap[name.String()]; ok { + if d.strict { + if doneFields[info.Id] { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s already set in type %s", ni.line+1, name.String(), out.Type())) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = out.FieldByIndex(info.Inline) + } + d.unmarshal(n.children[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.children[i+1], value) + d.setMapIndex(n.children[i+1], inlineMap, name, value) + } else if d.strict { + d.terrors = append(d.terrors, fmt.Sprintf("line %d: field %s not found in type %s", ni.line+1, name.String(), out.Type())) + } + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) merge(n *node, out reflect.Value) { + switch n.kind { + case mappingNode: + d.unmarshal(n, out) + case aliasNode: + if n.alias != nil && n.alias.kind != mappingNode { + failWantMap() + } + d.unmarshal(n, out) + case sequenceNode: + // Step backwards as earlier nodes take precedence. + for i := len(n.children) - 1; i >= 0; i-- { + ni := n.children[i] + if ni.kind == aliasNode { + if ni.alias != nil && ni.alias.kind != mappingNode { + failWantMap() + } + } else if ni.kind != mappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } +} + +func isMerge(n *node) bool { + return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG) +} diff --git a/vendor/gopkg.in/yaml.v2/emitterc.go b/vendor/gopkg.in/yaml.v2/emitterc.go new file mode 100644 index 0000000..a1c2cc5 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/emitterc.go @@ -0,0 +1,1685 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Flush the buffer if needed. +func flush(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return yaml_emitter_flush(emitter) + } + return true +} + +// Put a character to the output buffer. +func put(emitter *yaml_emitter_t, value byte) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return true +} + +// Put a line break to the output buffer. +func put_break(emitter *yaml_emitter_t) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + switch emitter.line_break { + case yaml_CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case yaml_LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case yaml_CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + emitter.column = 0 + emitter.line++ + return true +} + +// Copy a character from a string into buffer. +func write(emitter *yaml_emitter_t, s []byte, i *int) bool { + if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) { + return false + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return true +} + +// Write a whole string into buffer. +func write_all(emitter *yaml_emitter_t, s []byte) bool { + for i := 0; i < len(s); { + if !write(emitter, s, &i) { + return false + } + } + return true +} + +// Copy a line break character from a string into buffer. +func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool { + if s[*i] == '\n' { + if !put_break(emitter) { + return false + } + *i++ + } else { + if !write(emitter, s, i) { + return false + } + emitter.column = 0 + emitter.line++ + } + return true +} + +// Set an emitter error and return false. +func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_EMITTER_ERROR + emitter.problem = problem + return false +} + +// Emit an event. +func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.events = append(emitter.events, *event) + for !yaml_emitter_need_more_events(emitter) { + event := &emitter.events[emitter.events_head] + if !yaml_emitter_analyze_event(emitter, event) { + return false + } + if !yaml_emitter_state_machine(emitter, event) { + return false + } + yaml_event_delete(event) + emitter.events_head++ + } + return true +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +// +func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].typ { + case yaml_DOCUMENT_START_EVENT: + accumulate = 1 + break + case yaml_SEQUENCE_START_EVENT: + accumulate = 2 + break + case yaml_MAPPING_START_EVENT: + accumulate = 3 + break + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].typ { + case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT: + level++ + case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return true +} + +// Increase the indentation level. +func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.best_indent + } else { + emitter.indent = 0 + } + } else if !indentless { + emitter.indent += emitter.best_indent + } + return true +} + +// State dispatcher. +func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool { + switch emitter.state { + default: + case yaml_EMIT_STREAM_START_STATE: + return yaml_emitter_emit_stream_start(emitter, event) + + case yaml_EMIT_FIRST_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, true) + + case yaml_EMIT_DOCUMENT_START_STATE: + return yaml_emitter_emit_document_start(emitter, event, false) + + case yaml_EMIT_DOCUMENT_CONTENT_STATE: + return yaml_emitter_emit_document_content(emitter, event) + + case yaml_EMIT_DOCUMENT_END_STATE: + return yaml_emitter_emit_document_end(emitter, event) + + case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, true) + + case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_flow_sequence_item(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_KEY_STATE: + return yaml_emitter_emit_flow_mapping_key(emitter, event, false) + + case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, true) + + case yaml_EMIT_FLOW_MAPPING_VALUE_STATE: + return yaml_emitter_emit_flow_mapping_value(emitter, event, false) + + case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, true) + + case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return yaml_emitter_emit_block_sequence_item(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_KEY_STATE: + return yaml_emitter_emit_block_mapping_key(emitter, event, false) + + case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, true) + + case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE: + return yaml_emitter_emit_block_mapping_value(emitter, event, false) + + case yaml_EMIT_END_STATE: + return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_STREAM_START_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START") + } + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == yaml_ANY_ENCODING { + emitter.encoding = yaml_UTF8_ENCODING + } + } + if emitter.best_indent < 2 || emitter.best_indent > 9 { + emitter.best_indent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == yaml_ANY_BREAK { + emitter.line_break = yaml_LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + + if emitter.encoding != yaml_UTF8_ENCODING { + if !yaml_emitter_write_bom(emitter) { + return false + } + } + emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE + return true +} + +// Expect DOCUMENT-START or STREAM-END. +func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + + if event.typ == yaml_DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) { + return false + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) { + return false + } + if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) { + return false + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) { + return false + } + } + + implicit := event.implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if event.version_directive != nil { + implicit = false + if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) { + return false + } + if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + if yaml_emitter_check_empty_document(emitter) { + implicit = false + } + if !implicit { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) { + return false + } + if emitter.canonical { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + } + + emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE + return true + } + + if event.typ == yaml_STREAM_END_EVENT { + if emitter.open_ended { + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_END_STATE + return true + } + + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END") +} + +// Expect the root node. +func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool { + emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE) + return yaml_emitter_emit_node(emitter, event, true, false, false, false) +} + +// Expect DOCUMENT-END. +func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if event.typ != yaml_DOCUMENT_END_EVENT { + return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END") + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !event.implicit { + // [Go] Allocate the slice elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_flush(emitter) { + return false + } + emitter.state = yaml_EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return true +} + +// Expect a flow item node. +func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a flow key node. +func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + emitter.flow_level++ + } + + if event.typ == yaml_MAPPING_END_EVENT { + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + + if !first { + if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) { + return false + } + } + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + + if !emitter.canonical && yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a flow value node. +func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block item node. +func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) { + return false + } + } + if event.typ == yaml_SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE) + return yaml_emitter_emit_node(emitter, event, false, true, false, false) +} + +// Expect a block key node. +func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool { + if first { + if !yaml_emitter_increase_indent(emitter, false, false) { + return false + } + } + if event.typ == yaml_MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true + } + if !yaml_emitter_write_indent(emitter) { + return false + } + if yaml_emitter_check_simple_key(emitter) { + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, true) + } + if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) { + return false + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a block value node. +func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool { + if simple { + if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) { + return false + } + } else { + if !yaml_emitter_write_indent(emitter) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) { + return false + } + } + emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE) + return yaml_emitter_emit_node(emitter, event, false, false, true, false) +} + +// Expect a node. +func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t, + root bool, sequence bool, mapping bool, simple_key bool) bool { + + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.typ { + case yaml_ALIAS_EVENT: + return yaml_emitter_emit_alias(emitter, event) + case yaml_SCALAR_EVENT: + return yaml_emitter_emit_scalar(emitter, event) + case yaml_SEQUENCE_START_EVENT: + return yaml_emitter_emit_sequence_start(emitter, event) + case yaml_MAPPING_START_EVENT: + return yaml_emitter_emit_mapping_start(emitter, event) + default: + return yaml_emitter_set_emitter_error(emitter, + fmt.Sprintf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.typ)) + } +} + +// Expect ALIAS. +func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SCALAR. +func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_select_scalar_style(emitter, event) { + return false + } + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if !yaml_emitter_increase_indent(emitter, true, false) { + return false + } + if !yaml_emitter_process_scalar(emitter) { + return false + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return true +} + +// Expect SEQUENCE-START. +func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE || + yaml_emitter_check_empty_sequence(emitter) { + emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return true +} + +// Expect MAPPING-START. +func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool { + if !yaml_emitter_process_anchor(emitter) { + return false + } + if !yaml_emitter_process_tag(emitter) { + return false + } + if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE || + yaml_emitter_check_empty_mapping(emitter) { + emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return true +} + +// Check if the document content is an empty scalar. +func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool { + length := 0 + switch emitter.events[emitter.events_head].typ { + case yaml_ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case yaml_SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case yaml_SEQUENCE_START_EVENT: + if !yaml_emitter_check_empty_sequence(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case yaml_MAPPING_START_EVENT: + if !yaml_emitter_check_empty_mapping(emitter) { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.implicit && !event.quoted_implicit { + return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified") + } + + style := event.scalar_style() + if style == yaml_ANY_SCALAR_STYLE { + style = yaml_PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == yaml_PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.implicit { + style = yaml_SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return true +} + +// Write an anchor. +func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool { + if emitter.anchor_data.anchor == nil { + return true + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if !yaml_emitter_write_indicator(emitter, c, true, false, false) { + return false + } + return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor) +} + +// Write a tag. +func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return true + } + if len(emitter.tag_data.handle) > 0 { + if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) { + return false + } + if len(emitter.tag_data.suffix) > 0 { + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + } + } else { + // [Go] Allocate these slices elsewhere. + if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) { + return false + } + if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) { + return false + } + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) { + return false + } + } + return true +} + +// Write a scalar. +func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool { + switch emitter.scalar_data.style { + case yaml_PLAIN_SCALAR_STYLE: + return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_SINGLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_DOUBLE_QUOTED_SCALAR_STYLE: + return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context) + + case yaml_LITERAL_SCALAR_STYLE: + return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value) + + case yaml_FOLDED_SCALAR_STYLE: + return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Check if a %YAML directive is valid. +func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool { + if version_directive.major != 1 || version_directive.minor != 1 { + return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive") + } + return true +} + +// Check if a %TAG directive is valid. +func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty") + } + if handle[0] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !is_alpha(handle, i) { + return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty") + } + return true +} + +// Check if an anchor is valid. +func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool { + if len(anchor) == 0 { + problem := "anchor value must not be empty" + if alias { + problem = "alias value must not be empty" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !is_alpha(anchor, i) { + problem := "anchor value must contain alphanumerical characters only" + if alias { + problem = "alias value must contain alphanumerical characters only" + } + return yaml_emitter_set_emitter_error(emitter, problem) + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return true +} + +// Check if a tag is valid. +func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool { + if len(tag) == 0 { + return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return true + } + } + emitter.tag_data.suffix = tag + return true +} + +// Check if a scalar is valid. +func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool { + var ( + block_indicators = false + flow_indicators = false + line_breaks = false + special_characters = false + + leading_space = false + leading_break = false + trailing_space = false + trailing_break = false + break_space = false + space_break = false + + preceded_by_whitespace = false + followed_by_whitespace = false + previous_space = false + previous_break = false + ) + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return true + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode { + special_characters = true + } + if is_space(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if is_break(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = is_blankz(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return true +} + +// Check if the event data is valid. +func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool { + + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + switch event.typ { + case yaml_ALIAS_EVENT: + if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) { + return false + } + + case yaml_SCALAR_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + if !yaml_emitter_analyze_scalar(emitter, event.value) { + return false + } + + case yaml_SEQUENCE_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + + case yaml_MAPPING_START_EVENT: + if len(event.anchor) > 0 { + if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) { + return false + } + } + if len(event.tag) > 0 && (emitter.canonical || !event.implicit) { + if !yaml_emitter_analyze_tag(emitter, event.tag) { + return false + } + } + } + return true +} + +// Write the BOM character. +func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool { + if !flush(emitter) { + return false + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return true +} + +func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if !put_break(emitter) { + return false + } + } + for emitter.column < indent { + if !put(emitter, ' ') { + return false + } + } + emitter.whitespace = true + emitter.indention = true + return true +} + +func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, indicator) { + return false + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.open_ended = false + return true +} + +func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool { + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + if !write_all(emitter, value) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool { + if need_whitespace && !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = is_alpha(value, i) + } + if must_write { + if !write(emitter, value, &i) { + return false + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if !put(emitter, '%') { + return false + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if !put(emitter, c) { + return false + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + if !emitter.whitespace { + if !put(emitter, ' ') { + return false + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + + emitter.whitespace = false + emitter.indention = false + if emitter.root_context { + emitter.open_ended = true + } + + return true +} + +func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) { + return false + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + spaces = true + } else if is_break(value, i) { + if !breaks && value[i] == '\n' { + if !put_break(emitter) { + return false + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if value[i] == '\'' { + if !put(emitter, '\'') { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + spaces = false + breaks = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool { + spaces := false + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) { + return false + } + + for i := 0; i < len(value); { + if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) || + is_bom(value, i) || is_break(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if !put(emitter, '\\') { + return false + } + + var ok bool + switch v { + case 0x00: + ok = put(emitter, '0') + case 0x07: + ok = put(emitter, 'a') + case 0x08: + ok = put(emitter, 'b') + case 0x09: + ok = put(emitter, 't') + case 0x0A: + ok = put(emitter, 'n') + case 0x0b: + ok = put(emitter, 'v') + case 0x0c: + ok = put(emitter, 'f') + case 0x0d: + ok = put(emitter, 'r') + case 0x1b: + ok = put(emitter, 'e') + case 0x22: + ok = put(emitter, '"') + case 0x5c: + ok = put(emitter, '\\') + case 0x85: + ok = put(emitter, 'N') + case 0xA0: + ok = put(emitter, '_') + case 0x2028: + ok = put(emitter, 'L') + case 0x2029: + ok = put(emitter, 'P') + default: + if v <= 0xFF { + ok = put(emitter, 'x') + w = 2 + } else if v <= 0xFFFF { + ok = put(emitter, 'u') + w = 4 + } else { + ok = put(emitter, 'U') + w = 8 + } + for k := (w - 1) * 4; ok && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + ok = put(emitter, digit+'0') + } else { + ok = put(emitter, digit+'A'-10) + } + } + } + if !ok { + return false + } + spaces = false + } else if is_space(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if !yaml_emitter_write_indent(emitter) { + return false + } + if is_space(value, i+1) { + if !put(emitter, '\\') { + return false + } + } + i += width(value[i]) + } else if !write(emitter, value, &i) { + return false + } + spaces = true + } else { + if !write(emitter, value, &i) { + return false + } + spaces = false + } + } + if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) { + return false + } + emitter.whitespace = false + emitter.indention = false + return true +} + +func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool { + if is_space(value, 0) || is_break(value, 0) { + indent_hint := []byte{'0' + byte(emitter.best_indent)} + if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) { + return false + } + } + + emitter.open_ended = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !is_break(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.open_ended = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if is_break(value, i) { + chomp_hint[0] = '+' + emitter.open_ended = true + } + } + } + if chomp_hint[0] != 0 { + if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) { + return false + } + } + return true +} + +func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + } + if !write(emitter, value, &i) { + return false + } + emitter.indention = false + breaks = false + } + } + + return true +} + +func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool { + if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) { + return false + } + if !yaml_emitter_write_block_scalar_hints(emitter, value) { + return false + } + + if !put_break(emitter) { + return false + } + emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if is_break(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for is_break(value, k) { + k += width(value[k]) + } + if !is_blankz(value, k) { + if !put_break(emitter) { + return false + } + } + } + if !write_break(emitter, value, &i) { + return false + } + emitter.indention = true + breaks = true + } else { + if breaks { + if !yaml_emitter_write_indent(emitter) { + return false + } + leading_spaces = is_blank(value, i) + } + if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width { + if !yaml_emitter_write_indent(emitter) { + return false + } + i += width(value[i]) + } else { + if !write(emitter, value, &i) { + return false + } + } + emitter.indention = false + breaks = false + } + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/encode.go b/vendor/gopkg.in/yaml.v2/encode.go new file mode 100644 index 0000000..0ee738e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/encode.go @@ -0,0 +1,390 @@ +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// jsonNumber is the interface of the encoding/json.Number datatype. +// Repeating the interface here avoids a dependency on encoding/json, and also +// supports other libraries like jsoniter, which use a similar datatype with +// the same interface. Detecting this interface is useful when dealing with +// structures containing json.Number, which is a string under the hood. The +// encoder should prefer the use of Int64(), Float64() and string(), in that +// order, when encoding this type. +type jsonNumber interface { + Float64() (float64, error) + Int64() (int64, error) + String() string +} + +type encoder struct { + emitter yaml_emitter_t + event yaml_event_t + out []byte + flow bool + // doneInit holds whether the initial stream_start_event has been + // emitted. + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_string(&e.emitter, &e.out) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{} + yaml_emitter_initialize(&e.emitter) + yaml_emitter_set_output_writer(&e.emitter, w) + yaml_emitter_set_unicode(&e.emitter, true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.open_ended = false + yaml_stream_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) destroy() { + yaml_emitter_delete(&e.emitter) +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(yaml_emitter_emit(&e.emitter, &e.event)) +} + +func (e *encoder) must(ok bool) { + if !ok { + msg := e.emitter.problem + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + yaml_document_start_event_initialize(&e.event, nil, nil, true) + e.emit() + e.marshal(tag, in) + yaml_document_end_event_initialize(&e.event, true) + e.emit() +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + if !in.IsValid() || in.Kind() == reflect.Ptr && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch m := iface.(type) { + case jsonNumber: + integer, err := m.Int64() + if err == nil { + // In this case the json.Number is a valid int64 + in = reflect.ValueOf(integer) + break + } + float, err := m.Float64() + if err == nil { + // In this case the json.Number is a valid float64 + in = reflect.ValueOf(float) + break + } + // fallback case - no number could be obtained + in = reflect.ValueOf(m.String()) + case time.Time, *time.Time: + // Although time.Time implements TextMarshaler, + // we don't want to treat it as a string for YAML + // purposes because YAML has special support for + // timestamps. + case Marshaler: + v, err := m.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + in = reflect.ValueOf(v) + case encoding.TextMarshaler: + text, err := m.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Ptr: + if in.Type() == ptrTimeType { + e.timev(tag, in.Elem()) + } else { + e.marshal(tag, in.Elem()) + } + case reflect.Struct: + if in.Type() == timeType { + e.timev(tag, in) + } else { + e.structv(tag, in) + } + case reflect.Slice, reflect.Array: + if in.Type().Elem() == mapItemType { + e.itemsv(tag, in) + } else { + e.slicev(tag, in) + } + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + if in.Type() == durationType { + e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String())) + } else { + e.intv(tag, in) + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) itemsv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem) + for _, item := range slice { + e.marshal("", reflect.ValueOf(item.Key)) + e.marshal("", reflect.ValueOf(item.Value)) + } + }) +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = in.FieldByIndex(info.Inline) + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := yaml_BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_MAPPING_STYLE + } + yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style) + e.emit() + f() + yaml_mapping_end_event_initialize(&e.event) + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := yaml_BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = yaml_FLOW_SEQUENCE_STYLE + } + e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.must(yaml_sequence_end_event_initialize(&e.event)) + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshalled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style yaml_scalar_style_t + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == yaml_BINARY_TAG { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = yaml_BINARY_TAG + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == yaml_STR_TAG && !isBase60Float(s) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + style = yaml_LITERAL_SCALAR_STYLE + case canUsePlain: + style = yaml_PLAIN_SCALAR_STYLE + default: + style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) { + implicit := tag == "" + e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style)) + e.emit() +} diff --git a/vendor/gopkg.in/yaml.v2/go.mod b/vendor/gopkg.in/yaml.v2/go.mod new file mode 100644 index 0000000..1934e87 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/go.mod @@ -0,0 +1,5 @@ +module "gopkg.in/yaml.v2" + +require ( + "gopkg.in/check.v1" v0.0.0-20161208181325-20d25e280405 +) diff --git a/vendor/gopkg.in/yaml.v2/parserc.go b/vendor/gopkg.in/yaml.v2/parserc.go new file mode 100644 index 0000000..81d05df --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/parserc.go @@ -0,0 +1,1095 @@ +package yaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func peek_token(parser *yaml_parser_t) *yaml_token_t { + if parser.token_available || yaml_parser_fetch_more_tokens(parser) { + return &parser.tokens[parser.tokens_head] + } + return nil +} + +// Remove the next token from the queue (must be called after peek_token). +func skip_token(parser *yaml_parser_t) { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN + parser.tokens_head++ +} + +// Get the next event. +func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool { + // Erase the event object. + *event = yaml_event_t{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE { + return true + } + + // Generate the next event. + return yaml_parser_state_machine(parser, event) +} + +// Set parser error. +func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool { + parser.error = yaml_PARSER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = problem_mark + return false +} + +// State dispatcher. +func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool { + //trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case yaml_PARSE_STREAM_START_STATE: + return yaml_parser_parse_stream_start(parser, event) + + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, true) + + case yaml_PARSE_DOCUMENT_START_STATE: + return yaml_parser_parse_document_start(parser, event, false) + + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return yaml_parser_parse_document_content(parser, event) + + case yaml_PARSE_DOCUMENT_END_STATE: + return yaml_parser_parse_document_end(parser, event) + + case yaml_PARSE_BLOCK_NODE_STATE: + return yaml_parser_parse_node(parser, event, true, false) + + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return yaml_parser_parse_node(parser, event, true, true) + + case yaml_PARSE_FLOW_NODE_STATE: + return yaml_parser_parse_node(parser, event, false, false) + + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, true) + + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_block_sequence_entry(parser, event, false) + + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_indentless_sequence_entry(parser, event) + + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, true) + + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return yaml_parser_parse_block_mapping_key(parser, event, false) + + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return yaml_parser_parse_block_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, true) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return yaml_parser_parse_flow_sequence_entry(parser, event, false) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event) + + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event) + + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, true) + + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return yaml_parser_parse_flow_mapping_key(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, false) + + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return yaml_parser_parse_flow_mapping_value(parser, event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// ************ +func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_STREAM_START_TOKEN { + return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark) + } + parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + encoding: token.encoding, + } + skip_token(parser) + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// * +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// ************************* +func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool { + + token := peek_token(parser) + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.typ == yaml_DOCUMENT_END_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN && + token.typ != yaml_TAG_DIRECTIVE_TOKEN && + token.typ != yaml_DOCUMENT_START_TOKEN && + token.typ != yaml_STREAM_END_TOKEN { + // Parse an implicit document. + if !yaml_parser_process_directives(parser, nil, nil) { + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_BLOCK_NODE_STATE + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + } else if token.typ != yaml_STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + start_mark := token.start_mark + if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) { + return false + } + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_DOCUMENT_START_TOKEN { + yaml_parser_set_parser_error(parser, + "did not find expected ", token.start_mark) + return false + } + parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE) + parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.end_mark + + *event = yaml_event_t{ + typ: yaml_DOCUMENT_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + implicit: false, + } + skip_token(parser) + + } else { + // Parse the stream end. + parser.state = yaml_PARSE_END_STATE + *event = yaml_event_t{ + typ: yaml_STREAM_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// *********** +// +func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN || + token.typ == yaml_TAG_DIRECTIVE_TOKEN || + token.typ == yaml_DOCUMENT_START_TOKEN || + token.typ == yaml_DOCUMENT_END_TOKEN || + token.typ == yaml_STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return yaml_parser_process_empty_scalar(parser, event, + token.start_mark) + } + return yaml_parser_parse_node(parser, event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// ************* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + start_mark := token.start_mark + end_mark := token.start_mark + + implicit := true + if token.typ == yaml_DOCUMENT_END_TOKEN { + end_mark = token.end_mark + skip_token(parser) + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = yaml_PARSE_DOCUMENT_START_STATE + *event = yaml_event_t{ + typ: yaml_DOCUMENT_END_EVENT, + start_mark: start_mark, + end_mark: end_mark, + implicit: implicit, + } + return true +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// block_node ::= ALIAS +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// flow_node ::= ALIAS +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// ************************* +// block_content ::= block_collection | flow_collection | SCALAR +// ****** +// flow_content ::= flow_collection | SCALAR +// ****** +func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool { + //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = yaml_event_t{ + typ: yaml_ALIAS_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + anchor: token.value, + } + skip_token(parser) + return true + } + + start_mark := token.start_mark + end_mark := token.start_mark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark yaml_mark_t + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + start_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } else if token.typ == yaml_TAG_TOKEN { + tag_token = true + tag_handle = token.value + tag_suffix = token.suffix + start_mark = token.start_mark + tag_mark = token.start_mark + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_ANCHOR_TOKEN { + anchor = token.value + end_mark = token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + tag_suffix = nil + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + yaml_parser_set_parser_error_context(parser, + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.end_mark + if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + value: token.value, + implicit: plain_implicit, + quoted_implicit: quoted_implicit, + style: yaml_style_t(token.style), + } + skip_token(parser) + return true + } + if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE), + } + return true + } + if token.typ == yaml_FLOW_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_SEQUENCE_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE), + } + return true + } + if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN { + end_mark = token.end_mark + parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE), + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: start_mark, + end_mark: end_mark, + anchor: anchor, + tag: tag, + implicit: implicit, + quoted_implicit: false, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + yaml_parser_set_parser_error_context(parser, context, start_mark, + "did not find expected node content", token.start_mark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// ******************** *********** * ********* +// +func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } else { + parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } + if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.start_mark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// *********** * +func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_BLOCK_ENTRY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_BLOCK_ENTRY_TOKEN && + token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, true, false) + } + parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +// +func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ == yaml_KEY_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } else { + parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + } else if token.typ == yaml_BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a block mapping", context_mark, + "did not find expected key", token.start_mark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +// +// +func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + mark := token.end_mark + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_KEY_TOKEN && + token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_BLOCK_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, true, true) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) + } + parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_START_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + implicit: true, + style: yaml_style_t(yaml_FLOW_MAPPING_STYLE), + } + skip_token(parser) + return true + } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = yaml_event_t{ + typ: yaml_SEQUENCE_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + + skip_token(parser) + return true +} + +// +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// *** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + mark := token.end_mark + skip_token(parser) + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// ***** * +// +func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token := peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * +// +func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool { + token := peek_token(parser) + if token == nil { + return false + } + parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.start_mark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * *** * +// +func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool { + if first { + token := peek_token(parser) + parser.marks = append(parser.marks, token.start_mark) + skip_token(parser) + } + + token := peek_token(parser) + if token == nil { + return false + } + + if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + if !first { + if token.typ == yaml_FLOW_ENTRY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return yaml_parser_set_parser_error_context(parser, + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.start_mark) + } + } + + if token.typ == yaml_KEY_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_VALUE_TOKEN && + token.typ != yaml_FLOW_ENTRY_TOKEN && + token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } else { + parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = yaml_event_t{ + typ: yaml_MAPPING_END_EVENT, + start_mark: token.start_mark, + end_mark: token.end_mark, + } + skip_token(parser) + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// * ***** * +// +func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool { + token := peek_token(parser) + if token == nil { + return false + } + if empty { + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) + } + if token.typ == yaml_VALUE_TOKEN { + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE) + return yaml_parser_parse_node(parser, event, false, false) + } + } + parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE + return yaml_parser_process_empty_scalar(parser, event, token.start_mark) +} + +// Generate an empty scalar event. +func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool { + *event = yaml_event_t{ + typ: yaml_SCALAR_EVENT, + start_mark: mark, + end_mark: mark, + value: nil, // Empty + implicit: true, + style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []yaml_tag_directive_t{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func yaml_parser_process_directives(parser *yaml_parser_t, + version_directive_ref **yaml_version_directive_t, + tag_directives_ref *[]yaml_tag_directive_t) bool { + + var version_directive *yaml_version_directive_t + var tag_directives []yaml_tag_directive_t + + token := peek_token(parser) + if token == nil { + return false + } + + for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN { + if token.typ == yaml_VERSION_DIRECTIVE_TOKEN { + if version_directive != nil { + yaml_parser_set_parser_error(parser, + "found duplicate %YAML directive", token.start_mark) + return false + } + if token.major != 1 || token.minor != 1 { + yaml_parser_set_parser_error(parser, + "found incompatible YAML document", token.start_mark) + return false + } + version_directive = &yaml_version_directive_t{ + major: token.major, + minor: token.minor, + } + } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN { + value := yaml_tag_directive_t{ + handle: token.value, + prefix: token.prefix, + } + if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) { + return false + } + tag_directives = append(tag_directives, value) + } + + skip_token(parser) + token = peek_token(parser) + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := yaml_tag_directive_t{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/gopkg.in/yaml.v2/readerc.go b/vendor/gopkg.in/yaml.v2/readerc.go new file mode 100644 index 0000000..7c1f5fa --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/readerc.go @@ -0,0 +1,412 @@ +package yaml + +import ( + "io" +) + +// Set the reader error and return 0. +func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool { + parser.error = yaml_READER_ERROR + parser.problem = problem + parser.problem_offset = offset + parser.problem_value = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func yaml_parser_determine_encoding(parser *yaml_parser_t) bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !yaml_parser_update_raw_buffer(parser) { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = yaml_UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = yaml_UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = yaml_UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = yaml_UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + //return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == yaml_ANY_ENCODING { + if !yaml_parser_determine_encoding(parser) { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !yaml_parser_update_raw_buffer(parser) { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case yaml_UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return yaml_parser_set_reader_error(parser, + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return yaml_parser_set_reader_error(parser, + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return yaml_parser_set_reader_error(parser, + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return yaml_parser_set_reader_error(parser, + "invalid Unicode character", + parser.offset, int(value)) + } + + case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING: + var low, high int + if parser.encoding == yaml_UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return yaml_parser_set_reader_error(parser, + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return yaml_parser_set_reader_error(parser, + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return yaml_parser_set_reader_error(parser, + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return yaml_parser_set_reader_error(parser, + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/gopkg.in/yaml.v2/resolve.go b/vendor/gopkg.in/yaml.v2/resolve.go new file mode 100644 index 0000000..4120e0c --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/resolve.go @@ -0,0 +1,258 @@ +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value interface{} + tag string +} + +var resolveTable = make([]byte, 256) +var resolveMap = make(map[string]resolveMapItem) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + var resolveMapList = []struct { + v interface{} + tag string + l []string + }{ + {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}}, + {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}}, + {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}}, + {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}}, + {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}}, + {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}}, + {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}}, + {"<<", yaml_MERGE_TAG, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + // TODO This can easily be made faster and produce less garbage. + if strings.HasPrefix(tag, longTagPrefix) { + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG, yaml_TIMESTAMP_TAG: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out interface{}) { + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG: + return + case yaml_FLOAT_TAG: + if rtag == yaml_INT_TAG { + switch v := out.(type) { + case int64: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + case int: + rtag = yaml_FLOAT_TAG + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == yaml_TIMESTAMP_TAG { + t, ok := parseTimestamp(in) + if ok { + return yaml_TIMESTAMP_TAG, t + } + } + + plain := strings.Replace(in, "_", "", -1) + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return yaml_FLOAT_TAG, floatv + } + } + if strings.HasPrefix(plain, "0b") { + intv, err := strconv.ParseInt(plain[2:], 2, 64) + if err == nil { + if intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + uintv, err := strconv.ParseUint(plain[2:], 2, 64) + if err == nil { + return yaml_INT_TAG, uintv + } + } else if strings.HasPrefix(plain, "-0b") { + intv, err := strconv.ParseInt("-" + plain[3:], 2, 64) + if err == nil { + if true || intv == int64(int(intv)) { + return yaml_INT_TAG, int(intv) + } else { + return yaml_INT_TAG, intv + } + } + } + default: + panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")") + } + } + return yaml_STR_TAG, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/gopkg.in/yaml.v2/scannerc.go b/vendor/gopkg.in/yaml.v2/scannerc.go new file mode 100644 index 0000000..0b9bb60 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/scannerc.go @@ -0,0 +1,2711 @@ +package yaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The correspoding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax pecularities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func cache(parser *yaml_parser_t, length int) bool { + // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B) + return parser.unread >= length || yaml_parser_update_buffer(parser, length) +} + +// Advance the buffer pointer. +func skip(parser *yaml_parser_t) { + parser.mark.index++ + parser.mark.column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func skip_line(parser *yaml_parser_t) { + if is_crlf(parser.buffer, parser.buffer_pos) { + parser.mark.index += 2 + parser.mark.column = 0 + parser.mark.line++ + parser.unread -= 2 + parser.buffer_pos += 2 + } else if is_break(parser.buffer, parser.buffer_pos) { + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + } +} + +// Copy a character to a string buffer and advance pointers. +func read(parser *yaml_parser_t, s []byte) []byte { + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.index++ + parser.mark.column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func read_line(parser *yaml_parser_t, s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.index++ + parser.mark.column = 0 + parser.mark.line++ + parser.unread-- + return s +} + +// Get the next token. +func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool { + // Erase the token object. + *token = yaml_token_t{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.error != yaml_NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !yaml_parser_fetch_more_tokens(parser) { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.typ == yaml_STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool { + parser.error = yaml_SCANNER_ERROR + parser.context = context + parser.context_mark = context_mark + parser.problem = problem + parser.problem_mark = parser.mark + return false +} + +func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return yaml_parser_set_scanner_error(parser, context, context_mark, problem) +} + +func trace(args ...interface{}) func() { + pargs := append([]interface{}{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]interface{}{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool { + // While we need more tokens to fetch, do it. + for { + if parser.tokens_head != len(parser.tokens) { + // If queue is non-empty, check if any potential simple key may + // occupy the head position. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := yaml_simple_key_is_valid(parser, &parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !yaml_parser_fetch_next_token(parser) { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return yaml_parser_fetch_stream_start(parser) + } + + // Eat whitespaces and comments until we reach the next token. + if !yaml_parser_scan_to_next_token(parser) { + return false + } + + // Check the indentation level against the current column. + if !yaml_parser_unroll_indent(parser, parser.mark.column) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + // Is it the end of the stream? + if is_z(parser.buffer, parser.buffer_pos) { + return yaml_parser_fetch_stream_end(parser) + } + + // Is it a directive? + if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return yaml_parser_fetch_directive(parser) + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) { + return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN) + } + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return yaml_parser_fetch_flow_collection_end(parser, + yaml_FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return yaml_parser_fetch_flow_entry(parser) + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) { + return yaml_parser_fetch_block_entry(parser) + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_key(parser) + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_value(parser) + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return yaml_parser_fetch_tag(parser) + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return yaml_parser_fetch_block_scalar(parser, false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return yaml_parser_fetch_flow_scalar(parser, true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return yaml_parser_fetch_flow_scalar(parser, false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level == 0 && + (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !is_blankz(parser.buffer, parser.buffer_pos+1)) { + return yaml_parser_fetch_plain_scalar(parser) + } + + // If we don't determine the token type so far, it is an error. + return yaml_parser_set_scanner_error(parser, + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func yaml_simple_key_is_valid(parser *yaml_parser_t, simple_key *yaml_simple_key_t) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, yaml_parser_set_scanner_error(parser, + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func yaml_parser_save_simple_key(parser *yaml_parser_t) bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := yaml_simple_key_t{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !yaml_parser_remove_simple_key(parser) { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return yaml_parser_set_scanner_error(parser, + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return yaml_parser_set_scanner_error(parser, + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return yaml_parser_set_scanner_error(parser, + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := yaml_token_t{ + typ: typ, + start_mark: mark, + end_mark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + yaml_insert_token(parser, number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + // Loop through the indentation levels in the stack. + for parser.indent > column { + // Create a token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool { + + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_START_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + encoding: parser.encoding, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool { + + // Force new line. + if parser.mark.column != 0 { + parser.mark.column = 0 + parser.mark.line++ + } + + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := yaml_token_t{ + typ: yaml_STREAM_END_TOKEN, + start_mark: parser.mark, + end_mark: parser.mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func yaml_parser_fetch_directive(parser *yaml_parser_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := yaml_token_t{} + if !yaml_parser_scan_directive(parser, &token) { + return false + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset the indentation level. + if !yaml_parser_unroll_indent(parser, -1) { + return false + } + + // Reset simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + skip(parser) + skip(parser) + skip(parser) + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // The indicators '[' and '{' may start a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // Increase the flow level. + if !yaml_parser_increase_flow_level(parser) { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // Reset any potential simple key on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Decrease the flow level. + if !yaml_parser_decrease_flow_level(parser) { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + } + // Append the token to the queue. + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool { + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_FLOW_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_BLOCK_ENTRY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the KEY token. +func yaml_parser_fetch_key(parser *yaml_parser_t) bool { + + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not nessesary simple). + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the VALUE token. +func yaml_parser_fetch_value(parser *yaml_parser_t) bool { + + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := yaml_simple_key_is_valid(parser, simple_key); !ok { + return false + + } else if valid { + + // Create the KEY token and insert it into the queue. + token := yaml_token_t{ + typ: yaml_KEY_TOKEN, + start_mark: simple_key.mark, + end_mark: simple_key.mark, + } + yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !yaml_parser_roll_indent(parser, simple_key.mark.column, + simple_key.token_number, + yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return yaml_parser_set_scanner_error(parser, "", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + skip(parser) + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := yaml_token_t{ + typ: yaml_VALUE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool { + // An anchor or an alias could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_anchor(parser, &token, typ) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the TAG token. +func yaml_parser_fetch_tag(parser *yaml_parser_t) bool { + // A tag could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_tag(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool { + // Remove any potential simple keys. + if !yaml_parser_remove_simple_key(parser) { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_block_scalar(parser, &token, literal) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_flow_scalar(parser, &token, single) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool { + // A plain scalar could be a simple key. + if !yaml_parser_save_simple_key(parser) { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token yaml_token_t + if !yaml_parser_scan_plain_scalar(parser, &token) { + return false + } + yaml_insert_token(parser, -1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool { + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) { + skip(parser) + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // If it is a line break, eat it. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool { + // Eat '%'. + start_mark := parser.mark + skip(parser) + + // Scan the directive name. + var name []byte + if !yaml_parser_scan_directive_name(parser, start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_VERSION_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = yaml_token_t{ + typ: yaml_TAG_DIRECTIVE_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + return true +} + +// Scan the directive name. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +// +func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + var s []byte + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^^^^^^ +func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the major version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + skip(parser) + + // Consume the minor version number. + if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool { + + // Repeat while the next character is digit. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var value, length int8 + for is_digit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos)) + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// +func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a handle. + if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blank(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Scan a prefix. + if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + skip(parser) + + // Consume the value. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == yaml_ANCHOR_TOKEN { + context = "while scanning an anchor" + } + yaml_parser_set_scanner_error(parser, context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = yaml_token_t{ + typ: typ, + start_mark: start_mark, + end_mark: end_mark, + value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + skip(parser) + skip(parser) + + // Consume the tag value. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + skip(parser) + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if !is_blankz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_TAG_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = read(parser, s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_alpha(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = read(parser, s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool { + //size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] Convert this into more reasonable logic. + for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) { + return false + } + } else { + s = read(parser, s) + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + hasTag = true + } + + if !hasTag { + yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool { + + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + is_hex(parser.buffer, parser.buffer_pos+1) && + is_hex(parser.buffer, parser.buffer_pos+2)) { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return yaml_parser_set_scanner_tag_error(parser, directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + skip(parser) + skip(parser) + skip(parser) + w-- + } + return true +} + +// Scan a block scalar. +func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + skip(parser) + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + + // Check for an indentation indicator. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if is_digit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + } + + } else if is_digit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = as_digit(parser.buffer, parser.buffer_pos) + skip(parser) + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + skip(parser) + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for is_blank(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + for !is_breakz(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !is_breakz(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if is_break(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + skip_line(parser) + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = is_blank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !is_breakz(parser.buffer, parser.buffer_pos) { + s = read(parser, s) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + leading_break = read_line(parser, leading_break) + + // Eat the following indentation spaces and line breaks. + if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_LITERAL_SCALAR_STYLE, + } + if !literal { + token.style = yaml_FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) { + skip(parser) + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + if parser.mark.column > max_indent { + max_indent = parser.mark.column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) { + return yaml_parser_set_scanner_error(parser, "while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !is_break(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = read_line(parser, *breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + skip(parser) + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if is_z(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !is_blankz(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + skip(parser) + skip(parser) + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) { + return false + } + skip(parser) + skip_line(parser) + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + skip(parser) + skip(parser) + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !is_hex(parser.buffer, parser.buffer_pos+k) { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "did not find expected hexdecimal number") + return false + } + value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + skip(parser) + } + } + } else { + // It is a non-escaped non-blank character. + s = read(parser, s) + } + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + skip(parser) + end_mark := parser.mark + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool { + + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + var indent = parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) { + return false + } + if parser.mark.column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + is_blankz(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !is_blankz(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = read(parser, s) + + end_mark = parser.mark + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + } + + // Is it the end? + if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + + for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) { + if is_blank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) { + yaml_parser_set_scanner_error(parser, "while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = read(parser, whitespaces) + } else { + skip(parser) + } + } else { + if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = read_line(parser, leading_break) + leading_blanks = true + } else { + trailing_breaks = read_line(parser, trailing_breaks) + } + } + if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.column < indent { + break + } + } + + // Create a token. + *token = yaml_token_t{ + typ: yaml_SCALAR_TOKEN, + start_mark: start_mark, + end_mark: end_mark, + value: s, + style: yaml_PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} diff --git a/vendor/gopkg.in/yaml.v2/sorter.go b/vendor/gopkg.in/yaml.v2/sorter.go new file mode 100644 index 0000000..4c45e66 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/sorter.go @@ -0,0 +1,113 @@ +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + return bl + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i-1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/gopkg.in/yaml.v2/writerc.go b/vendor/gopkg.in/yaml.v2/writerc.go new file mode 100644 index 0000000..a2dde60 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/writerc.go @@ -0,0 +1,26 @@ +package yaml + +// Set the writer error and return false. +func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool { + emitter.error = yaml_WRITER_ERROR + emitter.problem = problem + return false +} + +// Flush the output buffer. +func yaml_emitter_flush(emitter *yaml_emitter_t) bool { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return true + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error()) + } + emitter.buffer_pos = 0 + return true +} diff --git a/vendor/gopkg.in/yaml.v2/yaml.go b/vendor/gopkg.in/yaml.v2/yaml.go new file mode 100644 index 0000000..89650e2 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yaml.go @@ -0,0 +1,466 @@ +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/go-yaml/yaml +// +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strings" + "sync" +) + +// MapSlice encodes and decodes as a YAML map. +// The order of keys is preserved when encoding and decoding. +type MapSlice []MapItem + +// MapItem is an item in a MapSlice. +type MapItem struct { + Key, Value interface{} +} + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. The UnmarshalYAML +// method receives a function that may be called to unmarshal the original +// YAML value into a field or variable. It is safe to call the unmarshal +// function parameter more than once if necessary. +type Unmarshaler interface { + UnmarshalYAML(unmarshal func(interface{}) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (interface{}, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshalling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +// +func Unmarshal(in []byte, out interface{}) (err error) { + return unmarshal(in, out, false) +} + +// UnmarshalStrict is like Unmarshal except that any fields that are found +// in the data that do not have corresponding struct members, or mapping +// keys that are duplicates, will result in +// an error. +func UnmarshalStrict(in []byte, out interface{}) (err error) { + return unmarshal(in, out, true) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + strict bool + parser *parser +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// SetStrict sets whether strict decoding behaviour is enabled when +// decoding items in the data (see UnmarshalStrict). By default, decoding is not strict. +func (dec *Decoder) SetStrict(strict bool) { + dec.strict = strict +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v interface{}) (err error) { + d := newDecoder(dec.strict) + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Ptr && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out interface{}, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder(strict) + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Ptr && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshalled if they are exported (have an upper case +// first letter), and are marshalled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshalling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be included if that method returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +// +func Marshal(in interface{}) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v interface{}) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(yamlError{err}) +} + +func failf(format string, args ...interface{}) { + panic(yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []string +} + +func (e *TypeError) Error() string { + return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n ")) +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var structMap = make(map[reflect.Type]*structInfo) +var fieldMapMutex sync.RWMutex + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && strings.Index(string(field.Tag), ":") < 0 { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st)) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("Multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct: + sinfo, err := getStructInfo(field.Type) + if err != nil { + return nil, err + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + default: + //return nil, errors.New("Option ,inline needs a struct value or map field") + return nil, errors.New("Option ,inline needs a struct value field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "Duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Ptr || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} diff --git a/vendor/gopkg.in/yaml.v2/yamlh.go b/vendor/gopkg.in/yaml.v2/yamlh.go new file mode 100644 index 0000000..f6a9c8e --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlh.go @@ -0,0 +1,739 @@ +package yaml + +import ( + "fmt" + "io" +) + +// The version directive data. +type yaml_version_directive_t struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// The tag directive data. +type yaml_tag_directive_t struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type yaml_encoding_t int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + yaml_ANY_ENCODING yaml_encoding_t = iota + + yaml_UTF8_ENCODING // The default UTF-8 encoding. + yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type yaml_break_t int + +// Line break types. +const ( + // Let the parser choose the break type. + yaml_ANY_BREAK yaml_break_t = iota + + yaml_CR_BREAK // Use CR for line breaks (Mac style). + yaml_LN_BREAK // Use LN for line breaks (Unix style). + yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type yaml_error_type_t int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + yaml_NO_ERROR yaml_error_type_t = iota + + yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + yaml_READER_ERROR // Cannot read or decode the input stream. + yaml_SCANNER_ERROR // Cannot scan the input stream. + yaml_PARSER_ERROR // Cannot parse the input stream. + yaml_COMPOSER_ERROR // Cannot compose a YAML document. + yaml_WRITER_ERROR // Cannot write to the output stream. + yaml_EMITTER_ERROR // Cannot emit a YAML stream. +) + +// The pointer position. +type yaml_mark_t struct { + index int // The position index. + line int // The position line. + column int // The position column. +} + +// Node Styles + +type yaml_style_t int8 + +type yaml_scalar_style_t yaml_style_t + +// Scalar styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota + + yaml_PLAIN_SCALAR_STYLE // The plain scalar style. + yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + yaml_LITERAL_SCALAR_STYLE // The literal scalar style. + yaml_FOLDED_SCALAR_STYLE // The folded scalar style. +) + +type yaml_sequence_style_t yaml_style_t + +// Sequence styles. +const ( + // Let the emitter choose the style. + yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota + + yaml_BLOCK_SEQUENCE_STYLE // The block sequence style. + yaml_FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type yaml_mapping_style_t yaml_style_t + +// Mapping styles. +const ( + // Let the emitter choose the style. + yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota + + yaml_BLOCK_MAPPING_STYLE // The block mapping style. + yaml_FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type yaml_token_type_t int + +// Token types. +const ( + // An empty token. + yaml_NO_TOKEN yaml_token_type_t = iota + + yaml_STREAM_START_TOKEN // A STREAM-START token. + yaml_STREAM_END_TOKEN // A STREAM-END token. + + yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token. + yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + yaml_BLOCK_END_TOKEN // A BLOCK-END token. + + yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + yaml_KEY_TOKEN // A KEY token. + yaml_VALUE_TOKEN // A VALUE token. + + yaml_ALIAS_TOKEN // An ALIAS token. + yaml_ANCHOR_TOKEN // An ANCHOR token. + yaml_TAG_TOKEN // A TAG token. + yaml_SCALAR_TOKEN // A SCALAR token. +) + +func (tt yaml_token_type_t) String() string { + switch tt { + case yaml_NO_TOKEN: + return "yaml_NO_TOKEN" + case yaml_STREAM_START_TOKEN: + return "yaml_STREAM_START_TOKEN" + case yaml_STREAM_END_TOKEN: + return "yaml_STREAM_END_TOKEN" + case yaml_VERSION_DIRECTIVE_TOKEN: + return "yaml_VERSION_DIRECTIVE_TOKEN" + case yaml_TAG_DIRECTIVE_TOKEN: + return "yaml_TAG_DIRECTIVE_TOKEN" + case yaml_DOCUMENT_START_TOKEN: + return "yaml_DOCUMENT_START_TOKEN" + case yaml_DOCUMENT_END_TOKEN: + return "yaml_DOCUMENT_END_TOKEN" + case yaml_BLOCK_SEQUENCE_START_TOKEN: + return "yaml_BLOCK_SEQUENCE_START_TOKEN" + case yaml_BLOCK_MAPPING_START_TOKEN: + return "yaml_BLOCK_MAPPING_START_TOKEN" + case yaml_BLOCK_END_TOKEN: + return "yaml_BLOCK_END_TOKEN" + case yaml_FLOW_SEQUENCE_START_TOKEN: + return "yaml_FLOW_SEQUENCE_START_TOKEN" + case yaml_FLOW_SEQUENCE_END_TOKEN: + return "yaml_FLOW_SEQUENCE_END_TOKEN" + case yaml_FLOW_MAPPING_START_TOKEN: + return "yaml_FLOW_MAPPING_START_TOKEN" + case yaml_FLOW_MAPPING_END_TOKEN: + return "yaml_FLOW_MAPPING_END_TOKEN" + case yaml_BLOCK_ENTRY_TOKEN: + return "yaml_BLOCK_ENTRY_TOKEN" + case yaml_FLOW_ENTRY_TOKEN: + return "yaml_FLOW_ENTRY_TOKEN" + case yaml_KEY_TOKEN: + return "yaml_KEY_TOKEN" + case yaml_VALUE_TOKEN: + return "yaml_VALUE_TOKEN" + case yaml_ALIAS_TOKEN: + return "yaml_ALIAS_TOKEN" + case yaml_ANCHOR_TOKEN: + return "yaml_ANCHOR_TOKEN" + case yaml_TAG_TOKEN: + return "yaml_TAG_TOKEN" + case yaml_SCALAR_TOKEN: + return "yaml_SCALAR_TOKEN" + } + return "" +} + +// The token structure. +type yaml_token_t struct { + // The token type. + typ yaml_token_type_t + + // The start/end of the token. + start_mark, end_mark yaml_mark_t + + // The stream encoding (for yaml_STREAM_START_TOKEN). + encoding yaml_encoding_t + + // The alias/anchor/scalar value or tag/tag directive handle + // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN). + value []byte + + // The tag suffix (for yaml_TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar style (for yaml_SCALAR_TOKEN). + style yaml_scalar_style_t + + // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type yaml_event_type_t int8 + +// Event types. +const ( + // An empty event. + yaml_NO_EVENT yaml_event_type_t = iota + + yaml_STREAM_START_EVENT // A STREAM-START event. + yaml_STREAM_END_EVENT // A STREAM-END event. + yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event. + yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event. + yaml_ALIAS_EVENT // An ALIAS event. + yaml_SCALAR_EVENT // A SCALAR event. + yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event. + yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event. + yaml_MAPPING_START_EVENT // A MAPPING-START event. + yaml_MAPPING_END_EVENT // A MAPPING-END event. +) + +var eventStrings = []string{ + yaml_NO_EVENT: "none", + yaml_STREAM_START_EVENT: "stream start", + yaml_STREAM_END_EVENT: "stream end", + yaml_DOCUMENT_START_EVENT: "document start", + yaml_DOCUMENT_END_EVENT: "document end", + yaml_ALIAS_EVENT: "alias", + yaml_SCALAR_EVENT: "scalar", + yaml_SEQUENCE_START_EVENT: "sequence start", + yaml_SEQUENCE_END_EVENT: "sequence end", + yaml_MAPPING_START_EVENT: "mapping start", + yaml_MAPPING_END_EVENT: "mapping end", +} + +func (e yaml_event_type_t) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// The event structure. +type yaml_event_t struct { + + // The event type. + typ yaml_event_type_t + + // The start and end of the event. + start_mark, end_mark yaml_mark_t + + // The document encoding (for yaml_STREAM_START_EVENT). + encoding yaml_encoding_t + + // The version directive (for yaml_DOCUMENT_START_EVENT). + version_directive *yaml_version_directive_t + + // The list of tag directives (for yaml_DOCUMENT_START_EVENT). + tag_directives []yaml_tag_directive_t + + // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT). + anchor []byte + + // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + tag []byte + + // The scalar value (for yaml_SCALAR_EVENT). + value []byte + + // Is the document start/end indicator implicit, or the tag optional? + // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT). + implicit bool + + // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT). + quoted_implicit bool + + // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). + style yaml_style_t +} + +func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) } +func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) } +func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) } + +// Nodes + +const ( + yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + yaml_BINARY_TAG = "tag:yaml.org,2002:binary" + yaml_MERGE_TAG = "tag:yaml.org,2002:merge" + + yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str. + yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq. + yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map. +) + +type yaml_node_type_t int + +// Node types. +const ( + // An empty node. + yaml_NO_NODE yaml_node_type_t = iota + + yaml_SCALAR_NODE // A scalar node. + yaml_SEQUENCE_NODE // A sequence node. + yaml_MAPPING_NODE // A mapping node. +) + +// An element of a sequence node. +type yaml_node_item_t int + +// An element of a mapping node. +type yaml_node_pair_t struct { + key int // The key of the element. + value int // The value of the element. +} + +// The node structure. +type yaml_node_t struct { + typ yaml_node_type_t // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for yaml_SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style yaml_scalar_style_t // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []yaml_node_item_t // The stack of sequence items. + style yaml_sequence_style_t // The sequence style. + } + + // The mapping parameters (for yaml_MAPPING_NODE). + mapping struct { + pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value). + pairs_start *yaml_node_pair_t // The beginning of the stack. + pairs_end *yaml_node_pair_t // The end of the stack. + pairs_top *yaml_node_pair_t // The top of the stack. + style yaml_mapping_style_t // The mapping style. + } + + start_mark yaml_mark_t // The beginning of the node. + end_mark yaml_mark_t // The end of the node. + +} + +// The document structure. +type yaml_document_t struct { + + // The document nodes. + nodes []yaml_node_t + + // The version directive. + version_directive *yaml_version_directive_t + + // The list of tag directives. + tag_directives_data []yaml_tag_directive_t + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark yaml_mark_t +} + +// The prototype of a read handler. +// +// The read handler is called when the parser needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// yaml_parser_set_input(). +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error) + +// This structure holds information about a potential simple key. +type yaml_simple_key_t struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark yaml_mark_t // The position mark. +} + +// The states of the parser. +type yaml_parser_state_t int + +const ( + yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota + + yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_PARSE_BLOCK_NODE_STATE // Expect a block node. + yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence. + yaml_PARSE_FLOW_NODE_STATE // Expect a flow node. + yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + yaml_PARSE_END_STATE // Expect nothing. +) + +func (ps yaml_parser_state_t) String() string { + switch ps { + case yaml_PARSE_STREAM_START_STATE: + return "yaml_PARSE_STREAM_START_STATE" + case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_START_STATE: + return "yaml_PARSE_DOCUMENT_START_STATE" + case yaml_PARSE_DOCUMENT_CONTENT_STATE: + return "yaml_PARSE_DOCUMENT_CONTENT_STATE" + case yaml_PARSE_DOCUMENT_END_STATE: + return "yaml_PARSE_DOCUMENT_END_STATE" + case yaml_PARSE_BLOCK_NODE_STATE: + return "yaml_PARSE_BLOCK_NODE_STATE" + case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE: + return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE" + case yaml_PARSE_FLOW_NODE_STATE: + return "yaml_PARSE_FLOW_NODE_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_KEY_STATE: + return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE" + case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE: + return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_KEY_STATE: + return "yaml_PARSE_FLOW_MAPPING_KEY_STATE" + case yaml_PARSE_FLOW_MAPPING_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE" + case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case yaml_PARSE_END_STATE: + return "yaml_PARSE_END_STATE" + } + return "" +} + +// This structure holds aliases data. +type yaml_alias_data_t struct { + anchor []byte // The anchor. + index int // The node id. + mark yaml_mark_t // The anchor mark. +} + +// The parser structure. +// +// All members are internal. Manage the structure using the +// yaml_parser_ family of functions. +type yaml_parser_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + + problem string // Error description. + + // The byte about which the problem occurred. + problem_offset int + problem_value int + problem_mark yaml_mark_t + + // The error context. + context string + context_mark yaml_mark_t + + // Reader stuff + + read_handler yaml_read_handler_t // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The input encoding. + + offset int // The offset of the current position (in bytes). + mark yaml_mark_t // The mark of the current position. + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []yaml_token_t // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []yaml_simple_key_t // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state yaml_parser_state_t // The current parser state. + states []yaml_parser_state_t // The parser states stack. + marks []yaml_mark_t // The stack of marks. + tag_directives []yaml_tag_directive_t // The list of TAG directives. + + // Dumper stuff + + aliases []yaml_alias_data_t // The alias data. + + document *yaml_document_t // The currently parsed document. +} + +// Emitter Definitions + +// The prototype of a write handler. +// +// The write handler is called when the emitter needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// yaml_emitter_set_output(). +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +// +type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error + +type yaml_emitter_state_t int + +// The emitter states. +const ( + // Expect STREAM-START. + yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota + + yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + yaml_EMIT_END_STATE // Expect nothing. +) + +// The emitter structure. +// +// All members are internal. Manage the structure using the @c yaml_emitter_ +// family of functions. +type yaml_emitter_t struct { + + // Error handling + + error yaml_error_type_t // Error type. + problem string // Error description. + + // Writer stuff + + write_handler yaml_write_handler_t // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding yaml_encoding_t // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + best_indent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break yaml_break_t // The preferred line break. + + state yaml_emitter_state_t // The current emitter state. + states []yaml_emitter_state_t // The stack of states. + + events []yaml_event_t // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []yaml_tag_directive_t // The list of tag directives. + + indent int // The current indentation level. + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + open_ended bool // If an explicit document end is required? + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expessed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style yaml_scalar_style_t // The output style. + } + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *yaml_document_t // The currently emitted document. +} diff --git a/vendor/gopkg.in/yaml.v2/yamlprivateh.go b/vendor/gopkg.in/yaml.v2/yamlprivateh.go new file mode 100644 index 0000000..8110ce3 --- /dev/null +++ b/vendor/gopkg.in/yaml.v2/yamlprivateh.go @@ -0,0 +1,173 @@ +package yaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func is_alpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a digit. +func is_digit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func as_digit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func is_hex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func as_hex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func is_ascii(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func is_printable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func is_z(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func is_bom(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func is_space(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func is_tab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func is_blank(b []byte, i int) bool { + //return is_space(b, i) || is_tab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func is_break(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func is_crlf(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func is_breakz(b []byte, i int) bool { + //return is_break(b, i) || is_z(b, i) + return ( // is_break: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // is_z: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func is_spacez(b []byte, i int) bool { + //return is_space(b, i) || is_breakz(b, i) + return ( // is_space: + b[i] == ' ' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func is_blankz(b []byte, i int) bool { + //return is_blank(b, i) || is_breakz(b, i) + return ( // is_blank: + b[i] == ' ' || b[i] == '\t' || + // is_breakz: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 + +} diff --git a/vendor/modules.txt b/vendor/modules.txt index a764d51..f3bfad7 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,10 +1,33 @@ -# github.com/openfaas-incubator/connector-sdk v0.0.0-20190125151851-d722c9f72ad0 +# github.com/drone/envsubst v1.0.2 +github.com/drone/envsubst +github.com/drone/envsubst/parse +github.com/drone/envsubst/path +# github.com/mitchellh/go-homedir v1.1.0 +github.com/mitchellh/go-homedir +# github.com/openfaas-incubator/connector-sdk v0.0.0-20200902074656-7f648543d4aa +## explicit github.com/openfaas-incubator/connector-sdk/types -# github.com/openfaas/faas v0.0.0-20190104165101-a65df4795bc6 +# github.com/openfaas/faas v0.0.0-20200422142642-18f6c720b50d github.com/openfaas/faas/gateway/requests -# github.com/openfaas/faas-provider v0.0.0-20181216160432-220324e98f5d +# github.com/openfaas/faas-cli v0.0.0-20201119120128-c9d284d0c5bd +## explicit +github.com/openfaas/faas-cli/config +github.com/openfaas/faas-cli/proxy +github.com/openfaas/faas-cli/schema/store/v2 +github.com/openfaas/faas-cli/stack +# github.com/openfaas/faas-provider v0.15.1 +## explicit github.com/openfaas/faas-provider/auth -# github.com/pkg/errors v0.8.1 +github.com/openfaas/faas-provider/httputil +github.com/openfaas/faas-provider/logs +github.com/openfaas/faas-provider/types +# github.com/pkg/errors v0.9.1 +## explicit github.com/pkg/errors # github.com/robfig/cron/v3 v3.0.0 +## explicit github.com/robfig/cron/v3 +# github.com/ryanuber/go-glob v1.0.0 +github.com/ryanuber/go-glob +# gopkg.in/yaml.v2 v2.2.8 +gopkg.in/yaml.v2