diff --git a/go.mod b/go.mod index 35c2c69ff..709c0b8a0 100644 --- a/go.mod +++ b/go.mod @@ -50,11 +50,9 @@ require ( github.com/evanphx/json-patch v5.6.0+incompatible // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/fsnotify/fsnotify v1.5.4 // indirect - github.com/go-bindata/go-bindata v3.1.2+incompatible // indirect github.com/go-openapi/jsonpointer v0.19.5 // indirect github.com/go-openapi/jsonreference v0.19.5 // indirect github.com/go-openapi/swag v0.19.14 // indirect - github.com/gobuffalo/flect v0.2.5 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt v3.2.2+incompatible // indirect @@ -99,14 +97,12 @@ require ( github.com/spf13/cast v1.3.1 // indirect github.com/spf13/pflag v1.0.5 // indirect golang.org/x/crypto v0.9.0 // indirect - golang.org/x/mod v0.8.0 // indirect golang.org/x/net v0.10.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sys v0.8.0 // indirect golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.6.0 // indirect gomodules.xyz/jsonpatch/v2 v2.2.0 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/protobuf v1.28.0 // indirect @@ -115,13 +111,10 @@ require ( gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.25.0 // indirect - k8s.io/code-generator v0.25.0 // indirect k8s.io/component-base v0.25.0 // indirect - k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 // indirect k8s.io/klog/v2 v2.70.1 // indirect k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect - sigs.k8s.io/controller-tools v0.10.0 // indirect sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect sigs.k8s.io/yaml v1.3.0 // indirect diff --git a/go.sum b/go.sum index 0f48e8833..53aa3b9fa 100644 --- a/go.sum +++ b/go.sum @@ -82,7 +82,6 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -124,8 +123,6 @@ github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:z github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= -github.com/codeready-toolchain/api v0.0.0-20230823083409-fe9ca973d9a9 h1:ytFqNSSEvgevqvwMilmmqlrrDH1O/qUwzg8bO3CxCiY= -github.com/codeready-toolchain/api v0.0.0-20230823083409-fe9ca973d9a9/go.mod h1:nn3W6eKb9PFIVwSwZW7wDeLACMBOwAV+4kddGuN+ARM= github.com/codeready-toolchain/api v0.0.0-20230918195153-739e8fb09a33 h1:hxXfcFq2JgFISVxrkISg8m9DZMzpcPWRjPspx3M3Sxo= github.com/codeready-toolchain/api v0.0.0-20230918195153-739e8fb09a33/go.mod h1:nn3W6eKb9PFIVwSwZW7wDeLACMBOwAV+4kddGuN+ARM= github.com/codeready-toolchain/toolchain-common v0.0.0-20230823084119-693476668406 h1:AkBhFV2tJhQaejTjntIhFC7r1FeLlQEiAL2gcn5/oW0= @@ -156,7 +153,6 @@ github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -195,12 +191,9 @@ github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4 github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= -github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/go-bindata/go-bindata v3.1.2+incompatible h1:5vjJMVhowQdPzjE1LdxyFF7YFTXg5IgGVW4gBr5IbvE= -github.com/go-bindata/go-bindata v3.1.2+incompatible/go.mod h1:xK8Dsgwmeed+BBsSy2XTopBn/8uK2HWuGSnA11C3Joo= github.com/go-bindata/go-bindata/v3 v3.1.3/go.mod h1:1/zrpXsLD8YDIbhZRqXzm1Ghc7NhEvIN9+Z6R5/xH4I= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= @@ -241,8 +234,6 @@ github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/gobuffalo/flect v0.2.3/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc= -github.com/gobuffalo/flect v0.2.5 h1:H6vvsv2an0lalEaCDRThvtBfmg44W/QHXBCYUXf/6S4= -github.com/gobuffalo/flect v0.2.5/go.mod h1:1ZyCLIbg0YD7sDkzvFdPoOydPtD8y9JQnrOROolUcM8= github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/goccy/go-yaml v1.8.1/go.mod h1:wS4gNoLalDSJxo/SpngzPQ2BN4uuZVLCmbM4S3vd4+Y= @@ -503,7 +494,6 @@ github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= -github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6UYnnDB0qG5Nq/1tD9acaOpo6xmt0Kw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -774,7 +764,6 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1032,7 +1021,6 @@ golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1224,8 +1212,6 @@ k8s.io/client-go v0.25.0 h1:CVWIaCETLMBNiTUta3d5nzRbXvY5Hy9Dpl+VvREpu5E= k8s.io/client-go v0.25.0/go.mod h1:lxykvypVfKilxhTklov0wz1FoaUZ8X4EwbhS6rpRfN8= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/code-generator v0.22.1/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= -k8s.io/code-generator v0.25.0 h1:QP8fJuXu882ztf6dsqJsso/Btm94pMd68TAZC1rE6KI= -k8s.io/code-generator v0.25.0/go.mod h1:B6jZgI3DvDFAualltPitbYMQ74NjaCFxum3YeKZZ+3w= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/component-base v0.22.1/go.mod h1:0D+Bl8rrnsPN9v0dyYvkqFfBeAd4u7n77ze+p8CMiPo= k8s.io/component-base v0.25.0 h1:haVKlLkPCFZhkcqB6WCvpVxftrg6+FK5x1ZuaIDaQ5Y= @@ -1233,8 +1219,6 @@ k8s.io/component-base v0.25.0/go.mod h1:F2Sumv9CnbBlqrpdf7rKZTmmd2meJq0HizeyY/yA k8s.io/component-helpers v0.22.1/go.mod h1:QvBcDbX+qU5I2tMZABBF5fRwAlQwiv771IGBHK9WYh4= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185 h1:TT1WdmqqXareKxZ/oNXEUSwKlLiHzPMyB0t8BaFeBYI= -k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= @@ -1265,8 +1249,6 @@ sigs.k8s.io/controller-runtime v0.10.0/go.mod h1:GCdh6kqV6IY4LK0JLwX0Zm6g233RtVG sigs.k8s.io/controller-runtime v0.13.0 h1:iqa5RNciy7ADWnIc8QxCbOX5FEKVR3uxVxKHRMc2WIQ= sigs.k8s.io/controller-runtime v0.13.0/go.mod h1:Zbz+el8Yg31jubvAEyglRZGdLAjplZl+PgtYNI6WNTI= sigs.k8s.io/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8= -sigs.k8s.io/controller-tools v0.10.0 h1:0L5DTDTFB67jm9DkfrONgTGmfc/zYow0ZaHyppizU2U= -sigs.k8s.io/controller-tools v0.10.0/go.mod h1:uvr0EW6IsprfB0jpQq6evtKy+hHyHCXNfdWI5ONPx94= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 h1:iXTIw73aPyC+oRdyqqvVJuloN1p0AC/kzH07hu3NE+k= sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= sigs.k8s.io/kustomize/api v0.8.11/go.mod h1:a77Ls36JdfCWojpUqR6m60pdGY1AYFix4AH83nJtY1g= diff --git a/test/e2e/proxy/applications_test.go b/test/e2e/proxy/applications_test.go new file mode 100644 index 000000000..031a0eecf --- /dev/null +++ b/test/e2e/proxy/applications_test.go @@ -0,0 +1,351 @@ +package proxy + +import ( + "context" + "crypto/tls" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "testing" + "time" + + . "github.com/codeready-toolchain/toolchain-e2e/testsupport" + . "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio" + appstudiov1 "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio/api/v1alpha1" + "github.com/codeready-toolchain/toolchain-e2e/testsupport/wait" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + k8serr "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" +) + +// full flow from usersignup with approval down to namespaces creation and cleanup +// +// !!! Additional context !!! +// The test uses a dummy HAS API type called Application. The reason is that the regular +// user doesn't have full permission for the standard types like ConfigMap. This means +// that we could do create/read operations on that resource from this test. +// To work around this limitation, we created a dummy HAS API type that has the same name +// and the same group as the actual one. The CRD is created as part of the test setup +// and since the CRD name & group name matches, then RBAC allow us to execute create/read +// operations on that resource using the user permissions. +func TestProxyApplicationsFlow(t *testing.T) { + // given + awaitilities := WaitForDeployments(t) + hostAwait := awaitilities.Host() + memberAwait := awaitilities.Member1() + + SetAppstudioConfig(t, hostAwait, memberAwait) + + t.Logf("Proxy URL: %s", hostAwait.APIProxyURL) + + waitForWatcher := RunWatcher(t, awaitilities) + defer func() { + t.Log("wait until the watcher is stopped") + waitForWatcher.Wait() + }() + users := CreateProxyUsersForTest(t, awaitilities) + for index, user := range users { + t.Run(user.Username, func(t *testing.T) { + // Start a new websocket watcher + w := NewWsWatcher(t, *user, user.CompliantUsername, hostAwait.APIProxyURL) + closeConnection := w.Start() + defer closeConnection() + proxyCl := user.CreateProxyClient(t, hostAwait) + applicationList := &appstudiov1.ApplicationList{} + + t.Run("use proxy to create a HAS Application CR in the user appstudio namespace via proxy API and use websocket to watch it created", func(t *testing.T) { + // Create and retrieve the application resources multiple times for the same user to make sure the proxy cache kicks in. + for i := 0; i < 2; i++ { + // given + applicationName := user.GetApplicationName(i) + expectedApp := NewApplication(applicationName, TenantNsName(user.CompliantUsername)) + + // when + err := proxyCl.Create(context.TODO(), expectedApp) + require.NoError(t, err) + + // then + // wait for the websocket watcher which uses the proxy to receive the Application CR + found, err := w.WaitForApplication( + expectedApp.Name, + ) + require.NoError(t, err) + assert.NotEmpty(t, found) + assert.Equal(t, expectedApp.Spec, found.Spec) + + proxyApp := user.GetApplication(t, proxyCl, applicationName) + assert.NotEmpty(t, proxyApp) + + // Double check that the Application does exist using a regular client (non-proxy) + noProxyApp := user.GetApplicationWithoutProxy(t, applicationName) + assert.Equal(t, expectedApp.Spec, noProxyApp.Spec) + } + + t.Run("use proxy to update a HAS Application CR in the user appstudio namespace via proxy API", func(t *testing.T) { + // Update application + applicationName := user.GetApplicationName(0) + // Get application + proxyApp := user.GetApplication(t, proxyCl, applicationName) + // Update DisplayName + changedDisplayName := fmt.Sprintf("Proxy test for user %s - updated application", TenantNsName(user.CompliantUsername)) + proxyApp.Spec.DisplayName = changedDisplayName + err := proxyCl.Update(context.TODO(), proxyApp) + require.NoError(t, err) + + // Find application and check, if it is updated + updatedApp := user.GetApplication(t, proxyCl, applicationName) + assert.Equal(t, proxyApp.Spec.DisplayName, updatedApp.Spec.DisplayName) + + // Check that the Application is updated using a regular client (non-proxy) + noProxyUpdatedApp := user.GetApplicationWithoutProxy(t, applicationName) + assert.Equal(t, proxyApp.Spec.DisplayName, noProxyUpdatedApp.Spec.DisplayName) + }) + + t.Run("use proxy to list a HAS Application CR in the user appstudio namespace", func(t *testing.T) { + // Get List of applications. + err := proxyCl.List(context.TODO(), applicationList, &client.ListOptions{Namespace: TenantNsName(user.CompliantUsername)}) + // User should be able to list applications + require.NoError(t, err) + assert.NotEmpty(t, applicationList.Items) + + // Check that the applicationList using a regular client (non-proxy) + applicationListWS := &appstudiov1.ApplicationList{} + err = user.ExpectedMemberCluster.Client.List(context.TODO(), applicationListWS, &client.ListOptions{Namespace: TenantNsName(user.CompliantUsername)}) + require.NoError(t, err) + require.Len(t, applicationListWS.Items, 2) + assert.Equal(t, applicationListWS.Items, applicationList.Items) + }) + + t.Run("use proxy to patch a HAS Application CR in the user appstudio namespace via proxy API", func(t *testing.T) { + // Patch application + applicationName := user.GetApplicationName(1) + patchString := "Patched application for proxy test" + // Get application + proxyApp := user.GetApplication(t, proxyCl, applicationName) + // Patch for DisplayName + patchPayload := []PatchStringValue{{ + Op: "replace", + Path: "/spec/displayName", + Value: patchString, + }} + patchPayloadBytes, err := json.Marshal(patchPayload) + require.NoError(t, err) + + // Appply Patch + err = proxyCl.Patch(context.TODO(), proxyApp, client.RawPatch(types.JSONPatchType, patchPayloadBytes)) + require.NoError(t, err) + + // Get patched app and verify patched DisplayName + patchedApp := user.GetApplication(t, proxyCl, applicationName) + assert.Equal(t, patchString, patchedApp.Spec.DisplayName) + + // Double check that the Application is patched using a regular client (non-proxy) + noProxyApp := user.GetApplicationWithoutProxy(t, applicationName) + assert.Equal(t, patchString, noProxyApp.Spec.DisplayName) + }) + + t.Run("use proxy to delete a HAS Application CR in the user appstudio namespace via proxy API and use websocket to watch it deleted", func(t *testing.T) { + // Delete applications + for i := 0; i < len(applicationList.Items); i++ { + // Get application + proxyApp := applicationList.Items[i].DeepCopy() + // Delete + err := proxyCl.Delete(context.TODO(), proxyApp) + require.NoError(t, err) + err = w.WaitForApplicationDeletion( + proxyApp.Name, + ) + require.NoError(t, err) + + // Check that the Application is deleted using a regular client (non-proxy) + namespacedName := types.NamespacedName{Namespace: TenantNsName(user.CompliantUsername), Name: proxyApp.Name} + originalApp := &appstudiov1.Application{} + err = user.ExpectedMemberCluster.Client.Get(context.TODO(), namespacedName, originalApp) + require.Error(t, err) //not found + require.True(t, k8serr.IsNotFound(err)) + } + }) + }) + + t.Run("try to create a resource in an unauthorized namespace", func(t *testing.T) { + // given + appName := fmt.Sprintf("%s-proxy-test-app", user.Username) + expectedApp := &appstudiov1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: hostAwait.Namespace, // user should not be allowed to create a resource in the host operator namespace + }, + Spec: appstudiov1.ApplicationSpec{ + DisplayName: "Should be forbidden", + }, + } + + // when + proxyCl = user.CreateProxyClient(t, hostAwait) + + // then + err := proxyCl.Create(context.TODO(), expectedApp) + require.EqualError(t, err, fmt.Sprintf(`invalid workspace request: access to namespace '%s' in workspace '%s' is forbidden (post applications.appstudio.redhat.com)`, hostAwait.Namespace, user.CompliantUsername)) + }) + + t.Run("unable to create a resource in the other users namespace because the workspace is not shared", func(t *testing.T) { + // given + otherUser := users[(index+1)%len(users)] + t.Log("other user: ", otherUser.Username) + // verify other user's namespace still exists + ns := &corev1.Namespace{} + namespaceName := TenantNsName(otherUser.CompliantUsername) + err := hostAwait.Client.Get(context.TODO(), types.NamespacedName{Name: namespaceName}, ns) + require.NoError(t, err, "the other user's namespace should still exist") + + // when + appName := fmt.Sprintf("%s-proxy-test-app", user.CompliantUsername) + appToCreate := &appstudiov1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: appName, + Namespace: namespaceName, // user should not be allowed to create a resource in the other user's namespace + }, + Spec: appstudiov1.ApplicationSpec{ + DisplayName: "Should be forbidden", + }, + } + workspaceName := otherUser.CompliantUsername + proxyWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(workspaceName) // set workspace context to other user's workspace + proxyCl, err := hostAwait.CreateAPIProxyClient(t, user.Token, proxyWorkspaceURL) + require.NoError(t, err) + err = proxyCl.Create(context.TODO(), appToCreate) + + // then + // note: the actual error message is "invalid workspace request: access to workspace '%s' is forbidden" but clients make api discovery calls + // that fail because in this case the api discovery calls go through the proxyWorkspaceURL which is invalid. If using oc or kubectl and you + // enable verbose logging you would see Response Body: invalid workspace request: access to workspace 'proxymember2' is forbidden + require.EqualError(t, err, `no matches for kind "Application" in version "appstudio.redhat.com/v1alpha1"`) + }) + + t.Run("successful workspace context request", func(t *testing.T) { + proxyWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(user.CompliantUsername) + // Start a new websocket watcher which watches for Application CRs in the user's namespace + w := NewWsWatcher(t, *user, user.CompliantUsername, proxyWorkspaceURL) + closeConnection := w.Start() + defer closeConnection() + workspaceCl, err := hostAwait.CreateAPIProxyClient(t, user.Token, proxyWorkspaceURL) // proxy client with workspace context + require.NoError(t, err) + + // given + applicationName := fmt.Sprintf("%s-workspace-context", user.CompliantUsername) + namespaceName := TenantNsName(user.CompliantUsername) + expectedApp := NewApplication(applicationName, namespaceName) + + // when + err = workspaceCl.Create(context.TODO(), expectedApp) + require.NoError(t, err) + + // then + // wait for the websocket watcher which uses the proxy to receive the Application CR + found, err := w.WaitForApplication( + expectedApp.Name, + ) + require.NoError(t, err) + assert.NotEmpty(t, found) + + // Double check that the Application does exist using a regular client (non-proxy) + createdApp := &appstudiov1.Application{} + err = user.ExpectedMemberCluster.Client.Get(context.TODO(), types.NamespacedName{Namespace: namespaceName, Name: applicationName}, createdApp) + require.NoError(t, err) + require.NotEmpty(t, createdApp) + assert.Equal(t, expectedApp.Spec.DisplayName, createdApp.Spec.DisplayName) + }) // end of successful workspace context request + + t.Run("successful workspace context request with proxy plugin", func(t *testing.T) { + // we are going to repurpose a well known, always running route as a proxy plugin to contact through the registration service + CreateProxyPluginWithCleanup(t, hostAwait, "openshift-console", "openshift-console", "console") + VerifyProxyPlugin(t, hostAwait, "openshift-console") + proxyPluginWorkspaceURL := hostAwait.PluginProxyURLWithWorkspaceContext("openshift-console", user.CompliantUsername) + client := http.Client{ + Timeout: 30 * time.Second, + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec + }, + } + request, err := http.NewRequest("GET", proxyPluginWorkspaceURL, nil) + require.NoError(t, err) + + request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", user.Token)) + var resp *http.Response + resp, err = client.Do(request) + require.NoError(t, err) + defer resp.Body.Close() + var body []byte + body, err = io.ReadAll(resp.Body) + require.NoError(t, err) + bodyStr := string(body) + if resp.StatusCode != http.StatusOK { + t.Errorf("unexpected http return code of %d with body text %s", resp.StatusCode, bodyStr) + } + if !strings.Contains(bodyStr, "Red") || !strings.Contains(bodyStr, "Open") { + t.Errorf("unexpected http response body %s", bodyStr) + } + }) // end of successful workspace context request with proxy plugin + + t.Run("invalid workspace context request", func(t *testing.T) { + proxyWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext("notexist") + hostAwaitWithShorterTimeout := hostAwait.WithRetryOptions(wait.TimeoutOption(time.Second * 3)) // we expect an error so we can use a shorter timeout + _, err := hostAwaitWithShorterTimeout.CreateAPIProxyClient(t, user.Token, proxyWorkspaceURL) + require.EqualError(t, err, `an error on the server ("unable to get target cluster: the requested space is not available") has prevented the request from succeeding`) + }) + + t.Run("invalid request headers", func(t *testing.T) { + // given + proxyWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(user.CompliantUsername) + rejectedHeaders := []headerKeyValue{ + {"Impersonate-Group", "system:cluster-admins"}, + {"Impersonate-Group", "system:node-admins"}, + } + client := http.Client{ + Timeout: time.Duration(5 * time.Second), // because sometimes the network connection may be a bit slow + } + client.Transport = &http.Transport{ + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, // nolint:gosec + }, + } + t.Logf("proxyWorkspaceURL: %s", proxyWorkspaceURL) + nodesURL := fmt.Sprintf("%s/api/v1/nodes", proxyWorkspaceURL) + t.Logf("nodesURL: %s", nodesURL) + + for _, header := range rejectedHeaders { + t.Run(fmt.Sprintf("k=%s,v=%s", header.Key, header.Value), func(t *testing.T) { + // given + request, err := http.NewRequest("GET", nodesURL, nil) + request.Header.Add(header.Key, header.Value) + require.NoError(t, err) + request.Header.Add("Authorization", fmt.Sprintf("Bearer %s", user.Token)) // uses the user's token with the impersonation headers + + // when + resp, err := client.Do(request) + + // then + require.NoError(t, err) + require.NotNil(t, resp) + defer resp.Body.Close() + require.Equal(t, 403, resp.StatusCode) // should be forbidden + r, _ := io.ReadAll(resp.Body) + assert.Contains(t, string(r), fmt.Sprintf(`nodes is forbidden: User \"%s\" cannot list resource \"nodes\" in API group \"\" at the cluster scope`, user.CompliantUsername)) + }) + } + + }) // end of invalid request headers + }) + } // end users loop +} + +type headerKeyValue struct { + Key, Value string +} diff --git a/test/e2e/proxy/shared_workspaces_test.go b/test/e2e/proxy/shared_workspaces_test.go new file mode 100644 index 000000000..cbeee1289 --- /dev/null +++ b/test/e2e/proxy/shared_workspaces_test.go @@ -0,0 +1,152 @@ +package proxy + +import ( + "context" + "fmt" + "testing" + + . "github.com/codeready-toolchain/toolchain-e2e/testsupport" + . "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio" + appstudiov1 "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio/api/v1alpha1" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "k8s.io/apimachinery/pkg/types" +) + +// full flow from usersignup with approval down to namespaces creation and cleanup +// +// !!! Additional context !!! +// The test uses a dummy HAS API type called Application. The reason is that the regular +// user doesn't have full permission for the standard types like ConfigMap. This means +// that we could do create/read operations on that resource from this test. +// To work around this limitation, we created a dummy HAS API type that has the same name +// and the same group as the actual one. The CRD is created as part of the test setup +// and since the CRD name & group name matches, then RBAC allow us to execute create/read +// operations on that resource using the user permissions. +func TestSharedWorkspaces(t *testing.T) { + t.Parallel() + // given + awaitilities := WaitForDeployments(t) + hostAwait := awaitilities.Host() + memberAwait := awaitilities.Member1() + + SetAppstudioConfig(t, hostAwait, memberAwait) + + t.Logf("Proxy URL: %s", hostAwait.APIProxyURL) + + waitForWatcher := RunWatcher(t, awaitilities) + defer func() { + t.Log("wait until the watcher is stopped") + waitForWatcher.Wait() + }() + + users := CreateProxyUsersForTest(t, awaitilities) + + // if there is an identity & user resources already present, but don't contain "owner" label, then they shouldn't be deleted + preexistingUser, preexistingIdentity := CreatePreexistingUserAndIdentity(t, *users[0]) + + t.Run("proxy with shared workspace use cases", func(t *testing.T) { + // given + guestUser := users[0] + primaryUser := users[1] + applicationName := fmt.Sprintf("%s-share-workspace-context", primaryUser.CompliantUsername) + workspaceName := primaryUser.CompliantUsername + primaryUserWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(workspaceName) // set workspace context using primaryUser's workspace + + // ensure the app exists in primaryUser's space + primaryUserNamespace := TenantNsName(primaryUser.CompliantUsername) + expectedApp := NewApplication(applicationName, primaryUserNamespace) + err := primaryUser.ExpectedMemberCluster.Client.Create(context.TODO(), expectedApp) + require.NoError(t, err) + + t.Run("guestUser request to unauthorized workspace", func(t *testing.T) { + proxyCl, err := hostAwait.CreateAPIProxyClient(t, guestUser.Token, primaryUserWorkspaceURL) + require.NoError(t, err) + + // when + actualApp := &appstudiov1.Application{} + err = proxyCl.Get(context.TODO(), types.NamespacedName{Name: applicationName, Namespace: primaryUserNamespace}, actualApp) + + // then + // note: the actual error message is "invalid workspace request: access to workspace '%s' is forbidden" but clients make api discovery calls + // that fail because in this case the api discovery calls go through the proxyWorkspaceURL which is invalid. If using oc or kubectl and you + // enable verbose logging you would see Response Body: invalid workspace request: access to workspace 'proxymember2' is forbidden + require.EqualError(t, err, `no matches for kind "Application" in version "appstudio.redhat.com/v1alpha1"`) + + // Double check that the Application does exist using a regular client (non-proxy) + createdApp := &appstudiov1.Application{} + err = guestUser.ExpectedMemberCluster.Client.Get(context.TODO(), types.NamespacedName{Namespace: primaryUserNamespace, Name: applicationName}, createdApp) + require.NoError(t, err) + require.NotEmpty(t, createdApp) + assert.Equal(t, expectedApp.Spec.DisplayName, createdApp.Spec.DisplayName) + }) + + t.Run("share primaryUser workspace with guestUser", func(t *testing.T) { + // given + + // share primaryUser space with guestUser + primaryUser.ShareSpaceWith(t, hostAwait, guestUser) + + // VerifySpaceRelatedResources will verify the roles and rolebindings are updated to include guestUser's SpaceBinding + VerifySpaceRelatedResources(t, awaitilities, primaryUser.Signup, "appstudio") + + // Start a new websocket watcher which watches for Application CRs in the user's namespace + w := NewWsWatcher(t, *guestUser, primaryUser.CompliantUsername, primaryUserWorkspaceURL) + closeConnection := w.Start() + defer closeConnection() + guestUserPrimaryWsCl, err := hostAwait.CreateAPIProxyClient(t, guestUser.Token, primaryUserWorkspaceURL) + require.NoError(t, err) + + // when + // user A requests the Application CR in primaryUser's namespace using the proxy + actualApp := &appstudiov1.Application{} + err = guestUserPrimaryWsCl.Get(context.TODO(), types.NamespacedName{Name: applicationName, Namespace: primaryUserNamespace}, actualApp) + + // then + require.NoError(t, err) // allowed since guestUser has access to primaryUser's space + + // wait for the websocket watcher which uses the proxy to receive the Application CR + found, err := w.WaitForApplication( + expectedApp.Name, + ) + require.NoError(t, err) + assert.NotEmpty(t, found) + + // Double check that the Application does exist using a regular client (non-proxy) + createdApp := &appstudiov1.Application{} + err = guestUser.ExpectedMemberCluster.Client.Get(context.TODO(), types.NamespacedName{Namespace: primaryUserNamespace, Name: applicationName}, createdApp) + require.NoError(t, err) + require.NotEmpty(t, createdApp) + assert.Equal(t, expectedApp.Spec.DisplayName, createdApp.Spec.DisplayName) + + t.Run("request for namespace that doesn't belong to workspace context should fail", func(t *testing.T) { + // In this test the guest user has access to the primary user's namespace since the primary user's workspace has been shared, but if they specify the wrong + // workspace context (guest user's workspace context) the request should fail. In order for proxy requests to succeed the namespace must belong to the workspace. + + // given + workspaceName := guestUser.CompliantUsername // guestUser's workspace + guestUserWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(workspaceName) + guestUserGuestWsCl, err := hostAwait.CreateAPIProxyClient(t, guestUser.Token, guestUserWorkspaceURL) + require.NoError(t, err) + + // when + actualApp := &appstudiov1.Application{} + err = guestUserGuestWsCl.Get(context.TODO(), types.NamespacedName{Name: applicationName, Namespace: primaryUserNamespace}, actualApp) // primaryUser's namespace + + // then + require.EqualError(t, err, fmt.Sprintf(`invalid workspace request: access to namespace '%s' in workspace '%s' is forbidden (get applications.appstudio.redhat.com %s)`, primaryUserNamespace, workspaceName, applicationName)) + }) + }) + + }) + + // preexisting user & identity are still there + // Verify provisioned User + _, err := memberAwait.WaitForUser(t, preexistingUser.Name) + assert.NoError(t, err) + + // Verify provisioned Identity + _, err = memberAwait.WaitForIdentity(t, preexistingIdentity.Name) + assert.NoError(t, err) +} diff --git a/test/e2e/proxy/space_listener_test.go b/test/e2e/proxy/space_listener_test.go new file mode 100644 index 000000000..7acf9dab5 --- /dev/null +++ b/test/e2e/proxy/space_listener_test.go @@ -0,0 +1,199 @@ +package proxy + +import ( + "context" + "fmt" + "testing" + + . "github.com/codeready-toolchain/toolchain-e2e/testsupport" + . "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSpaceLister(t *testing.T) { + t.Parallel() + // given + awaitilities := WaitForDeployments(t) + hostAwait := awaitilities.Host() + memberAwait := awaitilities.Member1() + memberAwait2 := awaitilities.Member2() + + SetAppstudioConfig(t, hostAwait, memberAwait) + + t.Logf("Proxy URL: %s", hostAwait.APIProxyURL) + + users := map[string]*ProxyUser{ + "car": { + ExpectedMemberCluster: memberAwait, + Username: "car", + IdentityID: uuid.Must(uuid.NewV4()), + }, + "bus": { + ExpectedMemberCluster: memberAwait2, + Username: "bus", + IdentityID: uuid.Must(uuid.NewV4()), + }, + "bicycle": { + ExpectedMemberCluster: memberAwait, + Username: "road.bicycle", // contains a '.' that is valid in the Username but should not be in the impersonation header since it should use the compliant Username + IdentityID: uuid.Must(uuid.NewV4()), + }, + } + + // create the users before the subtests, so they exist for the duration of the whole test + for _, user := range users { + CreateAppStudioUser(t, awaitilities, user) + } + + users["car"].ShareSpaceWith(t, hostAwait, users["bus"]) + users["car"].ShareSpaceWith(t, hostAwait, users["bicycle"]) + users["bus"].ShareSpaceWith(t, hostAwait, users["bicycle"]) + + t.Run("car lists workspaces", func(t *testing.T) { + // when + workspaces := users["car"].ListWorkspaces(t, hostAwait) + + // then + // car should see only car's workspace + require.Len(t, workspaces, 1) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["car"], true), workspaces...) + }) + + t.Run("car gets workspaces", func(t *testing.T) { + t.Run("can get car workspace", func(t *testing.T) { + // when + workspace, err := users["car"].GetWorkspace(t, hostAwait, users["car"].CompliantUsername) + + // then + require.NoError(t, err) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["car"], true), *workspace) + }) + + t.Run("cannot get bus workspace", func(t *testing.T) { + // when + workspace, err := users["car"].GetWorkspace(t, hostAwait, users["bus"].CompliantUsername) + + // then + require.EqualError(t, err, "the server could not find the requested resource (get workspaces.toolchain.dev.openshift.com bus)") + assert.Empty(t, workspace) + }) + }) + + t.Run("bus lists workspaces", func(t *testing.T) { + // when + workspaces := users["bus"].ListWorkspaces(t, hostAwait) + + // then + // bus should see both its own and car's workspace + require.Len(t, workspaces, 2) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["bus"], true), workspaces...) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["car"], false), workspaces...) + }) + + t.Run("bus gets workspaces", func(t *testing.T) { + t.Run("can get bus workspace", func(t *testing.T) { + // when + busWS, err := users["bus"].GetWorkspace(t, hostAwait, users["bus"].CompliantUsername) + + // then + require.NoError(t, err) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["bus"], true), *busWS) + }) + + t.Run("can get car workspace", func(t *testing.T) { + // when + carWS, err := users["bus"].GetWorkspace(t, hostAwait, users["car"].CompliantUsername) + + // then + require.NoError(t, err) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["car"], false), *carWS) + }) + }) + + t.Run("bicycle lists workspaces", func(t *testing.T) { + // when + workspaces := users["bicycle"].ListWorkspaces(t, hostAwait) + + // then + // car should see only car's workspace + require.Len(t, workspaces, 3) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["bicycle"], true), workspaces...) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["car"], false), workspaces...) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["bus"], false), workspaces...) + }) + + t.Run("bicycle gets workspaces", func(t *testing.T) { + t.Run("can get bus workspace", func(t *testing.T) { + // when + busWS, err := users["bicycle"].GetWorkspace(t, hostAwait, users["bus"].CompliantUsername) + + // then + require.NoError(t, err) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["bus"], false), *busWS) + }) + + t.Run("can get car workspace", func(t *testing.T) { + // when + carWS, err := users["bicycle"].GetWorkspace(t, hostAwait, users["car"].CompliantUsername) + + // then + require.NoError(t, err) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["car"], false), *carWS) + }) + + t.Run("can get bicycle workspace", func(t *testing.T) { + // when + bicycleWS, err := users["bicycle"].GetWorkspace(t, hostAwait, users["bicycle"].CompliantUsername) + + // then + require.NoError(t, err) + VerifyHasExpectedWorkspace(t, ExpectedWorkspaceFor(t, awaitilities.Host(), users["bicycle"], true), *bicycleWS) + }) + }) + + t.Run("other workspace actions not permitted", func(t *testing.T) { + t.Run("create not allowed", func(t *testing.T) { + // given + workspaceToCreate := ExpectedWorkspaceFor(t, awaitilities.Host(), users["bus"], false) + bicycleCl, err := hostAwait.CreateAPIProxyClient(t, users["bicycle"].Token, hostAwait.APIProxyURL) + require.NoError(t, err) + + // when + // bicycle user tries to create a workspace + err = bicycleCl.Create(context.TODO(), &workspaceToCreate) + + // then + require.EqualError(t, err, fmt.Sprintf("workspaces.toolchain.dev.openshift.com is forbidden: User \"%s\" cannot create resource \"workspaces\" in API group \"toolchain.dev.openshift.com\" at the cluster scope", users["bicycle"].CompliantUsername)) + }) + + t.Run("delete not allowed", func(t *testing.T) { + // given + workspaceToDelete, err := users["bicycle"].GetWorkspace(t, hostAwait, users["bicycle"].CompliantUsername) + require.NoError(t, err) + bicycleCl, err := hostAwait.CreateAPIProxyClient(t, users["bicycle"].Token, hostAwait.APIProxyURL) + require.NoError(t, err) + + // bicycle user tries to delete a workspace + err = bicycleCl.Delete(context.TODO(), workspaceToDelete) + + // then + require.EqualError(t, err, fmt.Sprintf("workspaces.toolchain.dev.openshift.com \"%[1]s\" is forbidden: User \"%[1]s\" cannot delete resource \"workspaces\" in API group \"toolchain.dev.openshift.com\" at the cluster scope", users["bicycle"].CompliantUsername)) + }) + + t.Run("update not allowed", func(t *testing.T) { + // when + workspaceToUpdate := ExpectedWorkspaceFor(t, awaitilities.Host(), users["bicycle"], true) + bicycleCl, err := hostAwait.CreateAPIProxyClient(t, users["bicycle"].Token, hostAwait.APIProxyURL) + require.NoError(t, err) + + // bicycle user tries to update a workspace + err = bicycleCl.Update(context.TODO(), &workspaceToUpdate) + + // then + require.EqualError(t, err, fmt.Sprintf("workspaces.toolchain.dev.openshift.com \"%[1]s\" is forbidden: User \"%[1]s\" cannot update resource \"workspaces\" in API group \"toolchain.dev.openshift.com\" at the cluster scope", users["bicycle"].CompliantUsername)) + }) + }) +} diff --git a/test/e2e/proxy_test.go b/test/e2e/proxy_test.go deleted file mode 100644 index db1e7130c..000000000 --- a/test/e2e/proxy_test.go +++ /dev/null @@ -1,1082 +0,0 @@ -package e2e - -import ( - "context" - "crypto/tls" - "encoding/base64" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "net/url" - "os" - "os/signal" - "strings" - "sync" - "testing" - "time" - - toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" - identitypkg "github.com/codeready-toolchain/toolchain-common/pkg/identity" - commonproxy "github.com/codeready-toolchain/toolchain-common/pkg/proxy" - testconfig "github.com/codeready-toolchain/toolchain-common/pkg/test/config" - . "github.com/codeready-toolchain/toolchain-e2e/testsupport" - appstudiov1 "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio/api/v1alpha1" - . "github.com/codeready-toolchain/toolchain-e2e/testsupport/spacebinding" - "github.com/codeready-toolchain/toolchain-e2e/testsupport/wait" - "k8s.io/client-go/kubernetes" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/gofrs/uuid" - "github.com/gorilla/websocket" - userv1 "github.com/openshift/api/user/v1" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" - k8serr "k8s.io/apimachinery/pkg/api/errors" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" - kubewait "k8s.io/apimachinery/pkg/util/wait" -) - -type proxyUser struct { - expectedMemberCluster *wait.MemberAwaitility - username string - token string - identityID uuid.UUID - signup *toolchainv1alpha1.UserSignup - compliantUsername string -} - -type patchStringValue struct { - Op string `json:"op"` - Path string `json:"path"` - Value string `json:"value"` -} - -func (u *proxyUser) shareSpaceWith(t *testing.T, hostAwait *wait.HostAwaitility, guestUser *proxyUser) { - // share primaryUser space with guestUser - guestUserMur, err := hostAwait.GetMasterUserRecord(guestUser.compliantUsername) - require.NoError(t, err) - primaryUserSpace, err := hostAwait.WaitForSpace(t, u.compliantUsername, wait.UntilSpaceHasAnyTargetClusterSet(), wait.UntilSpaceHasAnyTierNameSet()) - require.NoError(t, err) - CreateSpaceBinding(t, hostAwait, guestUserMur, primaryUserSpace, "admin") // creating a spacebinding gives guestUser access to primaryUser's space -} - -func (u *proxyUser) listWorkspaces(t *testing.T, hostAwait *wait.HostAwaitility) []toolchainv1alpha1.Workspace { - proxyCl := u.createProxyClient(t, hostAwait) - - workspaces := &toolchainv1alpha1.WorkspaceList{} - err := proxyCl.List(context.TODO(), workspaces) - require.NoError(t, err) - return workspaces.Items -} - -func (u *proxyUser) createProxyClient(t *testing.T, hostAwait *wait.HostAwaitility) client.Client { - proxyCl, err := hostAwait.CreateAPIProxyClient(t, u.token, hostAwait.APIProxyURL) - require.NoError(t, err) - return proxyCl -} - -func (u *proxyUser) getWorkspace(t *testing.T, hostAwait *wait.HostAwaitility, workspaceName string) (*toolchainv1alpha1.Workspace, error) { - proxyCl := u.createProxyClient(t, hostAwait) - - workspace := &toolchainv1alpha1.Workspace{} - var cause error - // only wait up to 5 seconds because in some test cases the workspace is not expected to be found - _ = kubewait.Poll(wait.DefaultRetryInterval, 5*time.Second, func() (bool, error) { - cause = proxyCl.Get(context.TODO(), types.NamespacedName{Name: workspaceName}, workspace) - return cause == nil, nil - }) - - // do not assert error before returning because in some test cases the workspace is not expected to be found - return workspace, cause -} - -func (u *proxyUser) getApplication(t *testing.T, proxyClient client.Client, applicationName string) *appstudiov1.Application { - app := &appstudiov1.Application{} - namespacedName := types.NamespacedName{Namespace: tenantNsName(u.compliantUsername), Name: applicationName} - // Get Application - err := proxyClient.Get(context.TODO(), namespacedName, app) - require.NoError(t, err) - require.NotEmpty(t, app) - return app -} - -func (u *proxyUser) getApplicationWithoutProxy(t *testing.T, applicationName string) *appstudiov1.Application { - namespacedName := types.NamespacedName{Namespace: tenantNsName(u.compliantUsername), Name: applicationName} - app := &appstudiov1.Application{} - err := u.expectedMemberCluster.Client.Get(context.TODO(), namespacedName, app) - require.NoError(t, err) - require.NotEmpty(t, app) - return app -} - -func (u *proxyUser) getApplicationName(i int) string { - return fmt.Sprintf("%s-test-app-%d", u.compliantUsername, i) -} - -// full flow from usersignup with approval down to namespaces creation and cleanup -// -// !!! Additional context !!! -// The test uses a dummy HAS API type called Application. The reason is that the regular -// user doesn't have full permission for the standard types like ConfigMap. This means -// that we could do create/read operations on that resource from this test. -// To work around this limitation, we created a dummy HAS API type that has the same name -// and the same group as the actual one. The CRD is created as part of the test setup -// and since the CRD name & group name matches, then RBAC allow us to execute create/read -// operations on that resource using the user permissions. -func TestProxyFlow(t *testing.T) { - // given - awaitilities := WaitForDeployments(t) - hostAwait := awaitilities.Host() - memberAwait := awaitilities.Member1() - memberAwait2 := awaitilities.Member2() - - setStoneSoupConfig(t, hostAwait, memberAwait) - - t.Logf("Proxy URL: %s", hostAwait.APIProxyURL) - - waitForWatcher := runWatcher(t, awaitilities) - defer func() { - t.Log("wait until the watcher is stopped") - waitForWatcher.Wait() - }() - - users := []*proxyUser{ - { - expectedMemberCluster: memberAwait, - username: "proxymember1", - identityID: uuid.Must(uuid.NewV4()), - }, - { - expectedMemberCluster: memberAwait2, - username: "proxymember2", - identityID: uuid.Must(uuid.NewV4()), - }, - { - expectedMemberCluster: memberAwait, - username: "compliant.username", // contains a '.' that is valid in the username but should not be in the impersonation header since it should use the compliant username - identityID: uuid.Must(uuid.NewV4()), - }, - } - //create the users before the subtests, so they exist for the duration of the whole "ProxyFlow" test ;) - for _, user := range users { - createAppStudioUser(t, awaitilities, user) - } - - // if there is an identity & user resources already present, but don't contain "owner" label, then they shouldn't be deleted - preexistingUser, preexistingIdentity := createPreexistingUserAndIdentity(t, *users[0]) - - for index, user := range users { - t.Run(user.username, func(t *testing.T) { - // Start a new websocket watcher - w := newWsWatcher(t, *user, user.compliantUsername, hostAwait.APIProxyURL) - closeConnection := w.Start() - defer closeConnection() - proxyCl := user.createProxyClient(t, hostAwait) - applicationList := &appstudiov1.ApplicationList{} - - t.Run("use proxy to create a HAS Application CR in the user appstudio namespace via proxy API and use websocket to watch it created", func(t *testing.T) { - // Create and retrieve the application resources multiple times for the same user to make sure the proxy cache kicks in. - for i := 0; i < 2; i++ { - // given - applicationName := user.getApplicationName(i) - expectedApp := newApplication(applicationName, tenantNsName(user.compliantUsername)) - - // when - err := proxyCl.Create(context.TODO(), expectedApp) - require.NoError(t, err) - - // then - // wait for the websocket watcher which uses the proxy to receive the Application CR - found, err := w.WaitForApplication( - expectedApp.Name, - ) - require.NoError(t, err) - assert.NotEmpty(t, found) - assert.Equal(t, expectedApp.Spec, found.Spec) - - proxyApp := user.getApplication(t, proxyCl, applicationName) - assert.NotEmpty(t, proxyApp) - - // Double check that the Application does exist using a regular client (non-proxy) - noProxyApp := user.getApplicationWithoutProxy(t, applicationName) - assert.Equal(t, expectedApp.Spec, noProxyApp.Spec) - } - - t.Run("use proxy to update a HAS Application CR in the user appstudio namespace via proxy API", func(t *testing.T) { - // Update application - applicationName := user.getApplicationName(0) - // Get application - proxyApp := user.getApplication(t, proxyCl, applicationName) - // Update DisplayName - changedDisplayName := fmt.Sprintf("Proxy test for user %s - updated application", tenantNsName(user.compliantUsername)) - proxyApp.Spec.DisplayName = changedDisplayName - err := proxyCl.Update(context.TODO(), proxyApp) - require.NoError(t, err) - - // Find application and check, if it is updated - updatedApp := user.getApplication(t, proxyCl, applicationName) - assert.Equal(t, proxyApp.Spec.DisplayName, updatedApp.Spec.DisplayName) - - // Check that the Application is updated using a regular client (non-proxy) - noProxyUpdatedApp := user.getApplicationWithoutProxy(t, applicationName) - assert.Equal(t, proxyApp.Spec.DisplayName, noProxyUpdatedApp.Spec.DisplayName) - }) - - t.Run("use proxy to list a HAS Application CR in the user appstudio namespace", func(t *testing.T) { - // Get List of applications. - err := proxyCl.List(context.TODO(), applicationList, &client.ListOptions{Namespace: tenantNsName(user.compliantUsername)}) - // User should be able to list applications - require.NoError(t, err) - assert.NotEmpty(t, applicationList.Items) - - // Check that the applicationList using a regular client (non-proxy) - applicationListWS := &appstudiov1.ApplicationList{} - err = user.expectedMemberCluster.Client.List(context.TODO(), applicationListWS, &client.ListOptions{Namespace: tenantNsName(user.compliantUsername)}) - require.NoError(t, err) - require.Len(t, applicationListWS.Items, 2) - assert.Equal(t, applicationListWS.Items, applicationList.Items) - }) - - t.Run("use proxy to patch a HAS Application CR in the user appstudio namespace via proxy API", func(t *testing.T) { - // Patch application - applicationName := user.getApplicationName(1) - patchString := "Patched application for proxy test" - // Get application - proxyApp := user.getApplication(t, proxyCl, applicationName) - // Patch for DisplayName - patchPayload := []patchStringValue{{ - Op: "replace", - Path: "/spec/displayName", - Value: patchString, - }} - patchPayloadBytes, err := json.Marshal(patchPayload) - require.NoError(t, err) - - // Appply Patch - err = proxyCl.Patch(context.TODO(), proxyApp, client.RawPatch(types.JSONPatchType, patchPayloadBytes)) - require.NoError(t, err) - - // Get patched app and verify patched DisplayName - patchedApp := user.getApplication(t, proxyCl, applicationName) - assert.Equal(t, patchString, patchedApp.Spec.DisplayName) - - // Double check that the Application is patched using a regular client (non-proxy) - noProxyApp := user.getApplicationWithoutProxy(t, applicationName) - assert.Equal(t, patchString, noProxyApp.Spec.DisplayName) - }) - - t.Run("use proxy to delete a HAS Application CR in the user appstudio namespace via proxy API and use websocket to watch it deleted", func(t *testing.T) { - // Delete applications - for i := 0; i < len(applicationList.Items); i++ { - // Get application - proxyApp := applicationList.Items[i].DeepCopy() - // Delete - err := proxyCl.Delete(context.TODO(), proxyApp) - require.NoError(t, err) - err = w.WaitForApplicationDeletion( - proxyApp.Name, - ) - require.NoError(t, err) - - // Check that the Application is deleted using a regular client (non-proxy) - namespacedName := types.NamespacedName{Namespace: tenantNsName(user.compliantUsername), Name: proxyApp.Name} - originalApp := &appstudiov1.Application{} - err = user.expectedMemberCluster.Client.Get(context.TODO(), namespacedName, originalApp) - require.Error(t, err) //not found - require.True(t, k8serr.IsNotFound(err)) - } - }) - }) - - t.Run("try to create a resource in an unauthorized namespace", func(t *testing.T) { - // given - appName := fmt.Sprintf("%s-proxy-test-app", user.username) - expectedApp := &appstudiov1.Application{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: hostAwait.Namespace, // user should not be allowed to create a resource in the host operator namespace - }, - Spec: appstudiov1.ApplicationSpec{ - DisplayName: "Should be forbidden", - }, - } - - // when - proxyCl = user.createProxyClient(t, hostAwait) - - // then - err := proxyCl.Create(context.TODO(), expectedApp) - require.EqualError(t, err, fmt.Sprintf(`invalid workspace request: access to namespace '%s' in workspace '%s' is forbidden (post applications.appstudio.redhat.com)`, hostAwait.Namespace, user.compliantUsername)) - }) - - t.Run("unable to create a resource in the other users namespace because the workspace is not shared", func(t *testing.T) { - // given - otherUser := users[(index+1)%len(users)] - t.Log("other user: ", otherUser.username) - // verify other user's namespace still exists - ns := &corev1.Namespace{} - namespaceName := tenantNsName(otherUser.compliantUsername) - err := hostAwait.Client.Get(context.TODO(), types.NamespacedName{Name: namespaceName}, ns) - require.NoError(t, err, "the other user's namespace should still exist") - - // when - appName := fmt.Sprintf("%s-proxy-test-app", user.compliantUsername) - appToCreate := &appstudiov1.Application{ - ObjectMeta: metav1.ObjectMeta{ - Name: appName, - Namespace: namespaceName, // user should not be allowed to create a resource in the other user's namespace - }, - Spec: appstudiov1.ApplicationSpec{ - DisplayName: "Should be forbidden", - }, - } - workspaceName := otherUser.compliantUsername - proxyWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(workspaceName) // set workspace context to other user's workspace - proxyCl, err := hostAwait.CreateAPIProxyClient(t, user.token, proxyWorkspaceURL) - require.NoError(t, err) - err = proxyCl.Create(context.TODO(), appToCreate) - - // then - // note: the actual error message is "invalid workspace request: access to workspace '%s' is forbidden" but clients make api discovery calls - // that fail because in this case the api discovery calls go through the proxyWorkspaceURL which is invalid. If using oc or kubectl and you - // enable verbose logging you would see Response Body: invalid workspace request: access to workspace 'proxymember2' is forbidden - require.EqualError(t, err, `no matches for kind "Application" in version "appstudio.redhat.com/v1alpha1"`) - }) - - t.Run("successful workspace context request", func(t *testing.T) { - proxyWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(user.compliantUsername) - // Start a new websocket watcher which watches for Application CRs in the user's namespace - w := newWsWatcher(t, *user, user.compliantUsername, proxyWorkspaceURL) - closeConnection := w.Start() - defer closeConnection() - workspaceCl, err := hostAwait.CreateAPIProxyClient(t, user.token, proxyWorkspaceURL) // proxy client with workspace context - require.NoError(t, err) - - // given - applicationName := fmt.Sprintf("%s-workspace-context", user.compliantUsername) - namespaceName := tenantNsName(user.compliantUsername) - expectedApp := newApplication(applicationName, namespaceName) - - // when - err = workspaceCl.Create(context.TODO(), expectedApp) - require.NoError(t, err) - - // then - // wait for the websocket watcher which uses the proxy to receive the Application CR - found, err := w.WaitForApplication( - expectedApp.Name, - ) - require.NoError(t, err) - assert.NotEmpty(t, found) - - // Double check that the Application does exist using a regular client (non-proxy) - createdApp := &appstudiov1.Application{} - err = user.expectedMemberCluster.Client.Get(context.TODO(), types.NamespacedName{Namespace: namespaceName, Name: applicationName}, createdApp) - require.NoError(t, err) - require.NotEmpty(t, createdApp) - assert.Equal(t, expectedApp.Spec.DisplayName, createdApp.Spec.DisplayName) - }) // end of successful workspace context request - - t.Run("successful workspace context request with proxy plugin", func(t *testing.T) { - // we are going to repurpose a well known, always running route as a proxy plugin to contact through the registration service - CreateProxyPluginWithCleanup(t, hostAwait, "openshift-console", "openshift-console", "console") - VerifyProxyPlugin(t, hostAwait, "openshift-console") - proxyPluginWorkspaceURL := hostAwait.PluginProxyURLWithWorkspaceContext("openshift-console", user.compliantUsername) - client := http.Client{ - Timeout: 30 * time.Second, - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, //nolint:gosec - }, - } - request, err := http.NewRequest("GET", proxyPluginWorkspaceURL, nil) - require.NoError(t, err) - - request.Header.Set("Authorization", fmt.Sprintf("Bearer %s", user.token)) - var resp *http.Response - resp, err = client.Do(request) - require.NoError(t, err) - defer resp.Body.Close() - var body []byte - body, err = io.ReadAll(resp.Body) - require.NoError(t, err) - bodyStr := string(body) - if resp.StatusCode != http.StatusOK { - t.Errorf("unexpected http return code of %d with body text %s", resp.StatusCode, bodyStr) - } - if !strings.Contains(bodyStr, "Red") || !strings.Contains(bodyStr, "Open") { - t.Errorf("unexpected http response body %s", bodyStr) - } - }) // end of successful workspace context request with proxy plugin - - t.Run("invalid workspace context request", func(t *testing.T) { - proxyWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext("notexist") - hostAwaitWithShorterTimeout := hostAwait.WithRetryOptions(wait.TimeoutOption(time.Second * 3)) // we expect an error so we can use a shorter timeout - _, err := hostAwaitWithShorterTimeout.CreateAPIProxyClient(t, user.token, proxyWorkspaceURL) - require.EqualError(t, err, `an error on the server ("unable to get target cluster: the requested space is not available") has prevented the request from succeeding`) - }) - - t.Run("invalid request headers", func(t *testing.T) { - // given - proxyWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(user.compliantUsername) - rejectedHeaders := []headerKeyValue{ - {"Impersonate-Group", "system:cluster-admins"}, - {"Impersonate-Group", "system:node-admins"}, - } - client := http.Client{ - Timeout: time.Duration(5 * time.Second), // because sometimes the network connection may be a bit slow - } - client.Transport = &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, // nolint:gosec - }, - } - t.Logf("proxyWorkspaceURL: %s", proxyWorkspaceURL) - nodesURL := fmt.Sprintf("%s/api/v1/nodes", proxyWorkspaceURL) - t.Logf("nodesURL: %s", nodesURL) - - for _, header := range rejectedHeaders { - t.Run(fmt.Sprintf("k=%s,v=%s", header.key, header.value), func(t *testing.T) { - // given - request, err := http.NewRequest("GET", nodesURL, nil) - request.Header.Add(header.key, header.value) - require.NoError(t, err) - request.Header.Add("Authorization", fmt.Sprintf("Bearer %s", user.token)) // uses the user's token with the impersonation headers - - // when - resp, err := client.Do(request) - - // then - require.NoError(t, err) - require.NotNil(t, resp) - defer resp.Body.Close() - require.Equal(t, 403, resp.StatusCode) // should be forbidden - r, _ := io.ReadAll(resp.Body) - assert.Contains(t, string(r), fmt.Sprintf(`nodes is forbidden: User \"%s\" cannot list resource \"nodes\" in API group \"\" at the cluster scope`, user.compliantUsername)) - }) - } - - }) // end of invalid request headers - }) - } // end users loop - - t.Run("proxy with shared workspace use cases", func(t *testing.T) { - // given - guestUser := users[0] - primaryUser := users[1] - applicationName := fmt.Sprintf("%s-share-workspace-context", primaryUser.compliantUsername) - workspaceName := primaryUser.compliantUsername - primaryUserWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(workspaceName) // set workspace context using primaryUser's workspace - - // ensure the app exists in primaryUser's space - primaryUserNamespace := tenantNsName(primaryUser.compliantUsername) - expectedApp := newApplication(applicationName, primaryUserNamespace) - err := primaryUser.expectedMemberCluster.Client.Create(context.TODO(), expectedApp) - require.NoError(t, err) - - t.Run("guestUser request to unauthorized workspace", func(t *testing.T) { - proxyCl, err := hostAwait.CreateAPIProxyClient(t, guestUser.token, primaryUserWorkspaceURL) - require.NoError(t, err) - - // when - actualApp := &appstudiov1.Application{} - err = proxyCl.Get(context.TODO(), types.NamespacedName{Name: applicationName, Namespace: primaryUserNamespace}, actualApp) - - // then - // note: the actual error message is "invalid workspace request: access to workspace '%s' is forbidden" but clients make api discovery calls - // that fail because in this case the api discovery calls go through the proxyWorkspaceURL which is invalid. If using oc or kubectl and you - // enable verbose logging you would see Response Body: invalid workspace request: access to workspace 'proxymember2' is forbidden - require.EqualError(t, err, `no matches for kind "Application" in version "appstudio.redhat.com/v1alpha1"`) - - // Double check that the Application does exist using a regular client (non-proxy) - createdApp := &appstudiov1.Application{} - err = guestUser.expectedMemberCluster.Client.Get(context.TODO(), types.NamespacedName{Namespace: primaryUserNamespace, Name: applicationName}, createdApp) - require.NoError(t, err) - require.NotEmpty(t, createdApp) - assert.Equal(t, expectedApp.Spec.DisplayName, createdApp.Spec.DisplayName) - }) - - t.Run("share primaryUser workspace with guestUser", func(t *testing.T) { - // given - - // share primaryUser space with guestUser - primaryUser.shareSpaceWith(t, hostAwait, guestUser) - - // VerifySpaceRelatedResources will verify the roles and rolebindings are updated to include guestUser's SpaceBinding - VerifySpaceRelatedResources(t, awaitilities, primaryUser.signup, "appstudio") - - // Start a new websocket watcher which watches for Application CRs in the user's namespace - w := newWsWatcher(t, *guestUser, primaryUser.compliantUsername, primaryUserWorkspaceURL) - closeConnection := w.Start() - defer closeConnection() - guestUserPrimaryWsCl, err := hostAwait.CreateAPIProxyClient(t, guestUser.token, primaryUserWorkspaceURL) - require.NoError(t, err) - - // when - // user A requests the Application CR in primaryUser's namespace using the proxy - actualApp := &appstudiov1.Application{} - err = guestUserPrimaryWsCl.Get(context.TODO(), types.NamespacedName{Name: applicationName, Namespace: primaryUserNamespace}, actualApp) - - // then - require.NoError(t, err) // allowed since guestUser has access to primaryUser's space - - // wait for the websocket watcher which uses the proxy to receive the Application CR - found, err := w.WaitForApplication( - expectedApp.Name, - ) - require.NoError(t, err) - assert.NotEmpty(t, found) - - // Double check that the Application does exist using a regular client (non-proxy) - createdApp := &appstudiov1.Application{} - err = guestUser.expectedMemberCluster.Client.Get(context.TODO(), types.NamespacedName{Namespace: primaryUserNamespace, Name: applicationName}, createdApp) - require.NoError(t, err) - require.NotEmpty(t, createdApp) - assert.Equal(t, expectedApp.Spec.DisplayName, createdApp.Spec.DisplayName) - - t.Run("request for namespace that doesn't belong to workspace context should fail", func(t *testing.T) { - // In this test the guest user has access to the primary user's namespace since the primary user's workspace has been shared, but if they specify the wrong - // workspace context (guest user's workspace context) the request should fail. In order for proxy requests to succeed the namespace must belong to the workspace. - - // given - workspaceName := guestUser.compliantUsername // guestUser's workspace - guestUserWorkspaceURL := hostAwait.ProxyURLWithWorkspaceContext(workspaceName) - guestUserGuestWsCl, err := hostAwait.CreateAPIProxyClient(t, guestUser.token, guestUserWorkspaceURL) - require.NoError(t, err) - - // when - actualApp := &appstudiov1.Application{} - err = guestUserGuestWsCl.Get(context.TODO(), types.NamespacedName{Name: applicationName, Namespace: primaryUserNamespace}, actualApp) // primaryUser's namespace - - // then - require.EqualError(t, err, fmt.Sprintf(`invalid workspace request: access to namespace '%s' in workspace '%s' is forbidden (get applications.appstudio.redhat.com %s)`, primaryUserNamespace, workspaceName, applicationName)) - }) - }) - - }) - - // preexisting user & identity are still there - // Verify provisioned User - _, err := memberAwait.WaitForUser(t, preexistingUser.Name) - assert.NoError(t, err) - - // Verify provisioned Identity - _, err = memberAwait.WaitForIdentity(t, preexistingIdentity.Name) - assert.NoError(t, err) -} - -// this test will: -// 1. provision a watcher user -// 2. run a goroutine which will: -// I. create a long-running GET call with a watch=true parameter -// II. the call will be terminated via a context timeout -// III. check the expected error that it was terminated via a context and not on the server side -func runWatcher(t *testing.T, awaitilities wait.Awaitilities) *sync.WaitGroup { - - // ====================================================== - // let's define two timeouts - - // contextTimeout defines the time after which the GET (watch) call will be terminated (via the context) - // this one is the expected timeout and should be bigger than the default one that was originally set - // for OpenShift Route and the RoundTripper inside proxy to make sure that the call is terminated - // via the context and not by the server. - contextTimeout := 40 * time.Second - - // this timeout will be set when initializing the go client - just to be sure that - // there is no other value set by default and is bigger than the contextTimeout. - clientConfigTimeout := 50 * time.Second - // ====================================================== - - t.Log("provisioning the watcher") - watchUser := &proxyUser{ - expectedMemberCluster: awaitilities.Member1(), - username: "watcher", - identityID: uuid.Must(uuid.NewV4()), - } - createAppStudioUser(t, awaitilities, watchUser) - - proxyConfig := awaitilities.Host().CreateAPIProxyConfig(t, watchUser.token, awaitilities.Host().APIProxyURL) - proxyConfig.Timeout = clientConfigTimeout - watcherClient, err := kubernetes.NewForConfig(proxyConfig) - require.NoError(t, err) - - // we need to get a list of ConfigMaps, so we can use the resourceVersion - // of the list resource in the watch call - t.Log("getting the first list of ConfigMaps") - list, err := watcherClient.CoreV1(). - ConfigMaps(tenantNsName(watchUser.compliantUsername)). - List(context.Background(), metav1.ListOptions{}) - require.NoError(t, err) - - var waitForWatcher sync.WaitGroup - waitForWatcher.Add(1) - // run the watch in a goroutine because it will take 40 seconds until the call is terminated - go func() { - t.Run("running the watcher", func(t *testing.T) { - defer waitForWatcher.Done() - withTimeout, cancelFunc := context.WithTimeout(context.Background(), contextTimeout) - defer cancelFunc() - - started := time.Now() - t.Log("starting the watch call") - _, err := watcherClient.RESTClient().Get(). - AbsPath(fmt.Sprintf("/api/v1/namespaces/%s/configmaps", tenantNsName(watchUser.compliantUsername))). - Param("resourceVersion", list.GetResourceVersion()). - Param("watch", "true"). - Do(withTimeout). - Get() - t.Logf("stopping the watch after %s", time.Since(started)) - - assert.EqualError(t, err, "unexpected error when reading response body. Please retry. Original error: context deadline exceeded", "The call should be terminated by the context timeout") - assert.NotContains(t, err.Error(), "unexpected EOF", "If it contains 'unexpected EOF' then the call was terminated on the server side, which is not expected.") - }) - }() - return &waitForWatcher -} - -func TestSpaceLister(t *testing.T) { - // given - awaitilities := WaitForDeployments(t) - hostAwait := awaitilities.Host() - memberAwait := awaitilities.Member1() - memberAwait2 := awaitilities.Member2() - - setStoneSoupConfig(t, hostAwait, memberAwait) - - t.Logf("Proxy URL: %s", hostAwait.APIProxyURL) - - users := map[string]*proxyUser{ - "car": { - expectedMemberCluster: memberAwait, - username: "car", - identityID: uuid.Must(uuid.NewV4()), - }, - "bus": { - expectedMemberCluster: memberAwait2, - username: "bus", - identityID: uuid.Must(uuid.NewV4()), - }, - "bicycle": { - expectedMemberCluster: memberAwait, - username: "road.bicycle", // contains a '.' that is valid in the username but should not be in the impersonation header since it should use the compliant username - identityID: uuid.Must(uuid.NewV4()), - }, - } - - // create the users before the subtests, so they exist for the duration of the whole test - for _, user := range users { - createAppStudioUser(t, awaitilities, user) - } - - users["car"].shareSpaceWith(t, hostAwait, users["bus"]) - users["car"].shareSpaceWith(t, hostAwait, users["bicycle"]) - users["bus"].shareSpaceWith(t, hostAwait, users["bicycle"]) - - t.Run("car lists workspaces", func(t *testing.T) { - // when - workspaces := users["car"].listWorkspaces(t, hostAwait) - - // then - // car should see only car's workspace - require.Len(t, workspaces, 1) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["car"], true), workspaces...) - }) - - t.Run("car gets workspaces", func(t *testing.T) { - t.Run("can get car workspace", func(t *testing.T) { - // when - workspace, err := users["car"].getWorkspace(t, hostAwait, users["car"].compliantUsername) - - // then - require.NoError(t, err) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["car"], true), *workspace) - }) - - t.Run("cannot get bus workspace", func(t *testing.T) { - // when - workspace, err := users["car"].getWorkspace(t, hostAwait, users["bus"].compliantUsername) - - // then - require.EqualError(t, err, "the server could not find the requested resource (get workspaces.toolchain.dev.openshift.com bus)") - assert.Empty(t, workspace) - }) - }) - - t.Run("bus lists workspaces", func(t *testing.T) { - // when - workspaces := users["bus"].listWorkspaces(t, hostAwait) - - // then - // bus should see both its own and car's workspace - require.Len(t, workspaces, 2) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["bus"], true), workspaces...) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["car"], false), workspaces...) - }) - - t.Run("bus gets workspaces", func(t *testing.T) { - t.Run("can get bus workspace", func(t *testing.T) { - // when - busWS, err := users["bus"].getWorkspace(t, hostAwait, users["bus"].compliantUsername) - - // then - require.NoError(t, err) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["bus"], true), *busWS) - }) - - t.Run("can get car workspace", func(t *testing.T) { - // when - carWS, err := users["bus"].getWorkspace(t, hostAwait, users["car"].compliantUsername) - - // then - require.NoError(t, err) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["car"], false), *carWS) - }) - }) - - t.Run("bicycle lists workspaces", func(t *testing.T) { - // when - workspaces := users["bicycle"].listWorkspaces(t, hostAwait) - - // then - // car should see only car's workspace - require.Len(t, workspaces, 3) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["bicycle"], true), workspaces...) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["car"], false), workspaces...) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["bus"], false), workspaces...) - }) - - t.Run("bicycle gets workspaces", func(t *testing.T) { - t.Run("can get bus workspace", func(t *testing.T) { - // when - busWS, err := users["bicycle"].getWorkspace(t, hostAwait, users["bus"].compliantUsername) - - // then - require.NoError(t, err) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["bus"], false), *busWS) - }) - - t.Run("can get car workspace", func(t *testing.T) { - // when - carWS, err := users["bicycle"].getWorkspace(t, hostAwait, users["car"].compliantUsername) - - // then - require.NoError(t, err) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["car"], false), *carWS) - }) - - t.Run("can get bicycle workspace", func(t *testing.T) { - // when - bicycleWS, err := users["bicycle"].getWorkspace(t, hostAwait, users["bicycle"].compliantUsername) - - // then - require.NoError(t, err) - verifyHasExpectedWorkspace(t, expectedWorkspaceFor(t, awaitilities.Host(), users["bicycle"], true), *bicycleWS) - }) - }) - - t.Run("other workspace actions not permitted", func(t *testing.T) { - t.Run("create not allowed", func(t *testing.T) { - // given - workspaceToCreate := expectedWorkspaceFor(t, awaitilities.Host(), users["bus"], false) - bicycleCl, err := hostAwait.CreateAPIProxyClient(t, users["bicycle"].token, hostAwait.APIProxyURL) - require.NoError(t, err) - - // when - // bicycle user tries to create a workspace - err = bicycleCl.Create(context.TODO(), &workspaceToCreate) - - // then - require.EqualError(t, err, fmt.Sprintf("workspaces.toolchain.dev.openshift.com is forbidden: User \"%s\" cannot create resource \"workspaces\" in API group \"toolchain.dev.openshift.com\" at the cluster scope", users["bicycle"].compliantUsername)) - }) - - t.Run("delete not allowed", func(t *testing.T) { - // given - workspaceToDelete, err := users["bicycle"].getWorkspace(t, hostAwait, users["bicycle"].compliantUsername) - require.NoError(t, err) - bicycleCl, err := hostAwait.CreateAPIProxyClient(t, users["bicycle"].token, hostAwait.APIProxyURL) - require.NoError(t, err) - - // bicycle user tries to delete a workspace - err = bicycleCl.Delete(context.TODO(), workspaceToDelete) - - // then - require.EqualError(t, err, fmt.Sprintf("workspaces.toolchain.dev.openshift.com \"%[1]s\" is forbidden: User \"%[1]s\" cannot delete resource \"workspaces\" in API group \"toolchain.dev.openshift.com\" at the cluster scope", users["bicycle"].compliantUsername)) - }) - - t.Run("update not allowed", func(t *testing.T) { - // when - workspaceToUpdate := expectedWorkspaceFor(t, awaitilities.Host(), users["bicycle"], true) - bicycleCl, err := hostAwait.CreateAPIProxyClient(t, users["bicycle"].token, hostAwait.APIProxyURL) - require.NoError(t, err) - - // bicycle user tries to update a workspace - err = bicycleCl.Update(context.TODO(), &workspaceToUpdate) - - // then - require.EqualError(t, err, fmt.Sprintf("workspaces.toolchain.dev.openshift.com \"%[1]s\" is forbidden: User \"%[1]s\" cannot update resource \"workspaces\" in API group \"toolchain.dev.openshift.com\" at the cluster scope", users["bicycle"].compliantUsername)) - }) - }) -} - -func tenantNsName(username string) string { - return fmt.Sprintf("%s-tenant", username) -} - -func createAppStudioUser(t *testing.T, awaitilities wait.Awaitilities, user *proxyUser) { - // Create and approve signup - req := NewSignupRequest(awaitilities). - Username(user.username). - IdentityID(user.identityID). - ManuallyApprove(). - TargetCluster(user.expectedMemberCluster). - EnsureMUR(). - RequireConditions(wait.ConditionSet(wait.Default(), wait.ApprovedByAdmin())...). - Execute(t) - user.signup, _ = req.Resources() - user.token = req.GetToken() - VerifyResourcesProvisionedForSignup(t, awaitilities, user.signup, "deactivate30", "appstudio") - user.compliantUsername = user.signup.Status.CompliantUsername - _, err := awaitilities.Host().WaitForMasterUserRecord(t, user.compliantUsername, wait.UntilMasterUserRecordHasCondition(wait.Provisioned())) - require.NoError(t, err) -} - -func createPreexistingUserAndIdentity(t *testing.T, user proxyUser) (*userv1.User, *userv1.Identity) { - preexistingUser := &userv1.User{ - ObjectMeta: metav1.ObjectMeta{ - Name: user.username, - }, - Identities: []string{ - identitypkg.NewIdentityNamingStandard(user.identityID.String(), "rhd").IdentityName(), - }, - } - require.NoError(t, user.expectedMemberCluster.CreateWithCleanup(t, preexistingUser)) - - preexistingIdentity := &userv1.Identity{ - ObjectMeta: metav1.ObjectMeta{ - Name: identitypkg.NewIdentityNamingStandard(user.identityID.String(), "rhd").IdentityName(), - }, - ProviderName: "rhd", - ProviderUserName: user.username, - User: corev1.ObjectReference{ - Name: preexistingUser.Name, - UID: preexistingUser.UID, - }, - } - require.NoError(t, user.expectedMemberCluster.CreateWithCleanup(t, preexistingIdentity)) - return preexistingUser, preexistingIdentity -} - -func newWsWatcher(t *testing.T, user proxyUser, namespace, proxyURL string) *wsWatcher { - _, err := url.Parse(proxyURL) - require.NoError(t, err) - return &wsWatcher{ - t: t, - namespace: namespace, - user: user, - proxyBaseURL: proxyURL, - } -} - -// wsWatcher represents a watcher which leverages a WebSocket connection to watch for Applications in the user's namespace. -// The connection is established with the reg-service proxy instead of direct connection to the API server. -type wsWatcher struct { - done chan interface{} - interrupt chan os.Signal - t *testing.T - user proxyUser - namespace string - connection *websocket.Conn - proxyBaseURL string - - mu sync.RWMutex - receivedApps map[string]*appstudiov1.Application -} - -// start creates a new WebSocket connection. The method returns a function which is to be used to close the connection when done. -func (w *wsWatcher) Start() func() { - w.done = make(chan interface{}) // Channel to indicate that the receiverHandler is done - w.interrupt = make(chan os.Signal) // Channel to listen for interrupt signal to terminate gracefully - - signal.Notify(w.interrupt, os.Interrupt) // Notify the interrupt channel for SIGINT - - encodedToken := base64.RawURLEncoding.EncodeToString([]byte(w.user.token)) - protocol := fmt.Sprintf("base64url.bearer.authorization.k8s.io.%s", encodedToken) - - trimmedProxyURL := strings.TrimPrefix(w.proxyBaseURL, "https://") - socketURL := fmt.Sprintf("wss://%s/apis/appstudio.redhat.com/v1alpha1/namespaces/%s/applications?watch=true", trimmedProxyURL, tenantNsName(w.namespace)) - w.t.Logf("opening connection to '%s'", socketURL) - dialer := &websocket.Dialer{ - Subprotocols: []string{protocol, "base64.binary.k8s.io"}, - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, // nolint:gosec - }, - } - - extraHeaders := make(http.Header, 1) - extraHeaders.Add("Origin", "http://localhost") - - conn, resp, err := dialer.Dial(socketURL, extraHeaders) // nolint:bodyclose // see `return func() {...}` - if errors.Is(err, websocket.ErrBadHandshake) { - r, _ := io.ReadAll(resp.Body) - defer resp.Body.Close() - w.t.Logf("handshake failed with status %d / response %s", resp.StatusCode, string(r)) - } - require.NoError(w.t, err) - w.connection = conn - w.receivedApps = make(map[string]*appstudiov1.Application) - - go w.receiveHandler() - go w.startMainLoop() - - return func() { - _ = w.connection.Close() - if resp != nil { - _, _ = io.Copy(io.Discard, resp.Body) - _ = resp.Body.Close() - } - } -} - -// startMainLoop starts the main loop for the client. Packets are sent here. -func (w *wsWatcher) startMainLoop() { - for { - select { - case <-time.After(time.Duration(1) * time.Millisecond * 1000): - // Send an echo packet every second - err := w.connection.WriteMessage(websocket.TextMessage, []byte("Hello from e2e tests!")) - if err != nil { - w.t.Logf("Exiting main loop. It's normal if the connection has been closed. Reason: %s\n", err.Error()) - return - } - case <-w.interrupt: - // Received a SIGINT (Ctrl + C). Terminate gracefully... - // Close the websocket connection - err := w.connection.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) - if err != nil { - w.t.Logf("Error during closing websocket: %s", err.Error()) - return - } - - select { - case <-w.done: - w.t.Log("Receiver Channel Closed! Exiting...") - case <-time.After(time.Duration(1) * time.Second): - w.t.Log("Timeout in closing receiving channel. Exiting...") - } - return - } - } -} - -type message struct { - MessageType string `json:"type"` - Application appstudiov1.Application `json:"object"` -} - -// receiveHandler listens to the incoming messages and stores them as Applications objects -func (w *wsWatcher) receiveHandler() { - defer close(w.done) - for { - _, msg, err := w.connection.ReadMessage() - if err != nil { - w.t.Logf("Exiting message receiving loop. It's normal if the connection has been closed. Reason: %s\n", err.Error()) - return - } - w.t.Logf("Received: %s", msg) - message := message{} - err = json.Unmarshal(msg, &message) - require.NoError(w.t, err) - copyApp := message.Application - w.mu.Lock() - if message.MessageType == "DELETED" { - delete(w.receivedApps, copyApp.Name) - } else { - w.receivedApps[copyApp.Name] = ©App - } - w.mu.Unlock() - } -} - -func (w *wsWatcher) WaitForApplication(expectedAppName string) (*appstudiov1.Application, error) { - var foundApp *appstudiov1.Application - err := kubewait.Poll(wait.DefaultRetryInterval, wait.DefaultTimeout, func() (bool, error) { - defer w.mu.RUnlock() - w.mu.RLock() - foundApp = w.receivedApps[expectedAppName] - return foundApp != nil, nil - }) - return foundApp, err -} - -func (w *wsWatcher) WaitForApplicationDeletion(expectedAppName string) error { - err := kubewait.PollImmediate(wait.DefaultRetryInterval, wait.DefaultTimeout, func() (bool, error) { - defer w.mu.RUnlock() - w.mu.RLock() - _, present := w.receivedApps[expectedAppName] - return !present, nil - }) - return err -} - -func newApplication(applicationName, namespace string) *appstudiov1.Application { - return &appstudiov1.Application{ - ObjectMeta: metav1.ObjectMeta{ - Name: applicationName, - Namespace: namespace, - }, - Spec: appstudiov1.ApplicationSpec{ - DisplayName: fmt.Sprintf("Proxy test for user %s", namespace), - }, - } -} - -// setStoneSoupConfig applies toolchain configuration for stone soup scenarios -func setStoneSoupConfig(t *testing.T, hostAwait *wait.HostAwaitility, memberAwait *wait.MemberAwaitility) { - // member cluster configured to skip user creation to mimic stonesoup configuration where user & identity resources are not created - memberConfigurationWithSkipUserCreation := testconfig.ModifyMemberOperatorConfigObj(memberAwait.GetMemberOperatorConfig(t), testconfig.SkipUserCreation(true)) - // configure default space tier to appstudio - hostAwait.UpdateToolchainConfig(t, testconfig.Tiers().DefaultUserTier("deactivate30").DefaultSpaceTier("appstudio"), testconfig.Members().Default(memberConfigurationWithSkipUserCreation.Spec)) -} - -func verifyHasExpectedWorkspace(t *testing.T, expectedWorkspace toolchainv1alpha1.Workspace, actualWorkspaces ...toolchainv1alpha1.Workspace) { - for _, actualWorkspace := range actualWorkspaces { - if actualWorkspace.Name == expectedWorkspace.Name { - assert.Equal(t, expectedWorkspace.Status, actualWorkspace.Status) - assert.NotEmpty(t, actualWorkspace.ObjectMeta.ResourceVersion, "Workspace.ObjectMeta.ResourceVersion field is empty: %#v", actualWorkspace) - assert.NotEmpty(t, actualWorkspace.ObjectMeta.Generation, "Workspace.ObjectMeta.Generation field is empty: %#v", actualWorkspace) - assert.NotEmpty(t, actualWorkspace.ObjectMeta.CreationTimestamp, "Workspace.ObjectMeta.CreationTimestamp field is empty: %#v", actualWorkspace) - assert.NotEmpty(t, actualWorkspace.ObjectMeta.UID, "Workspace.ObjectMeta.UID field is empty: %#v", actualWorkspace) - return - } - } - t.Errorf("expected workspace %s not found", expectedWorkspace.Name) -} - -func expectedWorkspaceFor(t *testing.T, hostAwait *wait.HostAwaitility, user *proxyUser, isHomeWorkspace bool) toolchainv1alpha1.Workspace { - space, err := hostAwait.WaitForSpace(t, user.compliantUsername, wait.UntilSpaceHasAnyTargetClusterSet(), wait.UntilSpaceHasAnyTierNameSet()) - require.NoError(t, err) - - ws := commonproxy.NewWorkspace(user.compliantUsername, - commonproxy.WithObjectMetaFrom(space.ObjectMeta), - commonproxy.WithNamespaces([]toolchainv1alpha1.SpaceNamespace{ - { - Name: user.compliantUsername + "-tenant", - Type: "default", - }, - }), - commonproxy.WithOwner(user.signup.Name), - commonproxy.WithRole("admin"), - ) - // if the user is the same as the one who created the workspace, then expect type should be "home" - if isHomeWorkspace { - ws.Status.Type = "home" - } - return *ws -} - -type headerKeyValue struct { - key, value string -} diff --git a/testsupport/appstudio/proxy_user.go b/testsupport/appstudio/proxy_user.go new file mode 100644 index 000000000..893996b48 --- /dev/null +++ b/testsupport/appstudio/proxy_user.go @@ -0,0 +1,170 @@ +package appstudio + +import ( + "context" + "fmt" + "testing" + "time" + + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + identitypkg "github.com/codeready-toolchain/toolchain-common/pkg/identity" + testsupport "github.com/codeready-toolchain/toolchain-e2e/testsupport" + appstudiov1 "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio/api/v1alpha1" + spacebinding "github.com/codeready-toolchain/toolchain-e2e/testsupport/spacebinding" + "github.com/codeready-toolchain/toolchain-e2e/testsupport/wait" + userv1 "github.com/openshift/api/user/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + kubewait "k8s.io/apimachinery/pkg/util/wait" +) + +type ProxyUser struct { + ExpectedMemberCluster *wait.MemberAwaitility + Username string + Token string + IdentityID uuid.UUID + Signup *toolchainv1alpha1.UserSignup + CompliantUsername string +} + +func CreateAppStudioUser(t *testing.T, awaitilities wait.Awaitilities, user *ProxyUser) { + // Create and approve signup + req := testsupport.NewSignupRequest(awaitilities). + Username(user.Username). + IdentityID(user.IdentityID). + ManuallyApprove(). + TargetCluster(user.ExpectedMemberCluster). + EnsureMUR(). + RequireConditions(wait.ConditionSet(wait.Default(), wait.ApprovedByAdmin())...). + Execute(t) + user.Signup, _ = req.Resources() + user.Token = req.GetToken() + testsupport.VerifyResourcesProvisionedForSignup(t, awaitilities, user.Signup, "deactivate30", "appstudio") + user.CompliantUsername = user.Signup.Status.CompliantUsername + _, err := awaitilities.Host().WaitForMasterUserRecord(t, user.CompliantUsername, wait.UntilMasterUserRecordHasCondition(wait.Provisioned())) + require.NoError(t, err) +} + +func CreatePreexistingUserAndIdentity(t *testing.T, user ProxyUser) (*userv1.User, *userv1.Identity) { + preexistingUser := &userv1.User{ + ObjectMeta: metav1.ObjectMeta{ + Name: user.Username, + }, + Identities: []string{ + identitypkg.NewIdentityNamingStandard(user.IdentityID.String(), "rhd").IdentityName(), + }, + } + require.NoError(t, user.ExpectedMemberCluster.CreateWithCleanup(t, preexistingUser)) + + preexistingIdentity := &userv1.Identity{ + ObjectMeta: metav1.ObjectMeta{ + Name: identitypkg.NewIdentityNamingStandard(user.IdentityID.String(), "rhd").IdentityName(), + }, + ProviderName: "rhd", + ProviderUserName: user.Username, + User: corev1.ObjectReference{ + Name: preexistingUser.Name, + UID: preexistingUser.UID, + }, + } + require.NoError(t, user.ExpectedMemberCluster.CreateWithCleanup(t, preexistingIdentity)) + return preexistingUser, preexistingIdentity +} + +func (u *ProxyUser) ShareSpaceWith(t *testing.T, hostAwait *wait.HostAwaitility, guestUser *ProxyUser) { + // share primaryUser space with guestUser + guestUserMur, err := hostAwait.GetMasterUserRecord(guestUser.CompliantUsername) + require.NoError(t, err) + primaryUserSpace, err := hostAwait.WaitForSpace(t, u.CompliantUsername, wait.UntilSpaceHasAnyTargetClusterSet(), wait.UntilSpaceHasAnyTierNameSet()) + require.NoError(t, err) + spacebinding.CreateSpaceBinding(t, hostAwait, guestUserMur, primaryUserSpace, "admin") // creating a spacebinding gives guestUser access to primaryUser's space +} + +func (u *ProxyUser) ListWorkspaces(t *testing.T, hostAwait *wait.HostAwaitility) []toolchainv1alpha1.Workspace { + proxyCl := u.createProxyClient(t, hostAwait) + + workspaces := &toolchainv1alpha1.WorkspaceList{} + err := proxyCl.List(context.TODO(), workspaces) + require.NoError(t, err) + return workspaces.Items +} + +func (u *ProxyUser) createProxyClient(t *testing.T, hostAwait *wait.HostAwaitility) client.Client { + proxyCl, err := hostAwait.CreateAPIProxyClient(t, u.Token, hostAwait.APIProxyURL) + require.NoError(t, err) + return proxyCl +} + +func (u *ProxyUser) GetWorkspace(t *testing.T, hostAwait *wait.HostAwaitility, workspaceName string) (*toolchainv1alpha1.Workspace, error) { + proxyCl := u.createProxyClient(t, hostAwait) + + workspace := &toolchainv1alpha1.Workspace{} + var cause error + // only wait up to 5 seconds because in some test cases the workspace is not expected to be found + _ = kubewait.Poll(wait.DefaultRetryInterval, 5*time.Second, func() (bool, error) { + cause = proxyCl.Get(context.TODO(), types.NamespacedName{Name: workspaceName}, workspace) + return cause == nil, nil + }) + + // do not assert error before returning because in some test cases the workspace is not expected to be found + return workspace, cause +} + +func (u *ProxyUser) GetApplication(t *testing.T, proxyClient client.Client, applicationName string) *appstudiov1.Application { + app := &appstudiov1.Application{} + namespacedName := types.NamespacedName{Namespace: TenantNsName(u.CompliantUsername), Name: applicationName} + // Get Application + err := proxyClient.Get(context.TODO(), namespacedName, app) + require.NoError(t, err) + require.NotEmpty(t, app) + return app +} + +func (u *ProxyUser) GetApplicationWithoutProxy(t *testing.T, applicationName string) *appstudiov1.Application { + namespacedName := types.NamespacedName{Namespace: TenantNsName(u.CompliantUsername), Name: applicationName} + app := &appstudiov1.Application{} + err := u.ExpectedMemberCluster.Client.Get(context.TODO(), namespacedName, app) + require.NoError(t, err) + require.NotEmpty(t, app) + return app +} + +func (u *ProxyUser) CreateProxyClient(t *testing.T, hostAwait *wait.HostAwaitility) client.Client { + proxyCl, err := hostAwait.CreateAPIProxyClient(t, u.Token, hostAwait.APIProxyURL) + require.NoError(t, err) + return proxyCl +} + +func (u *ProxyUser) GetApplicationName(i int) string { + return fmt.Sprintf("%s-test-app-%d", u.CompliantUsername, i) +} + +func CreateProxyUsersForTest(t *testing.T, awaitilities wait.Awaitilities) []*ProxyUser { + users := []*ProxyUser{ + { + ExpectedMemberCluster: awaitilities.Member1(), + Username: "proxymember1", + IdentityID: uuid.Must(uuid.NewV4()), + }, + { + ExpectedMemberCluster: awaitilities.Member2(), + Username: "proxymember2", + IdentityID: uuid.Must(uuid.NewV4()), + }, + { + ExpectedMemberCluster: awaitilities.Member1(), + Username: "compliant.username", // contains a '.' that is valid in the username but should not be in the impersonation header since it should use the compliant username + IdentityID: uuid.Must(uuid.NewV4()), + }, + } + //create the users before the subtests, so they exist for the duration of the whole "ProxyFlow" test ;) + for _, user := range users { + CreateAppStudioUser(t, awaitilities, user) + } + return users +} diff --git a/testsupport/appstudio/utils.go b/testsupport/appstudio/utils.go new file mode 100644 index 000000000..8515269a8 --- /dev/null +++ b/testsupport/appstudio/utils.go @@ -0,0 +1,82 @@ +package appstudio + +import ( + "fmt" + "testing" + + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + commonproxy "github.com/codeready-toolchain/toolchain-common/pkg/proxy" + testconfig "github.com/codeready-toolchain/toolchain-common/pkg/test/config" + appstudiov1 "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio/api/v1alpha1" + "github.com/codeready-toolchain/toolchain-e2e/testsupport/wait" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +type PatchStringValue struct { + Op string `json:"op"` + Path string `json:"path"` + Value string `json:"value"` +} + +func TenantNsName(username string) string { + return fmt.Sprintf("%s-tenant", username) +} + +func NewApplication(applicationName, namespace string) *appstudiov1.Application { + return &appstudiov1.Application{ + ObjectMeta: metav1.ObjectMeta{ + Name: applicationName, + Namespace: namespace, + }, + Spec: appstudiov1.ApplicationSpec{ + DisplayName: fmt.Sprintf("Proxy test for user %s", namespace), + }, + } +} + +// SetAppstudioConfig applies toolchain configuration for stone soup scenarios +func SetAppstudioConfig(t *testing.T, hostAwait *wait.HostAwaitility, memberAwait *wait.MemberAwaitility) { + // member cluster configured to skip user creation to mimic stonesoup configuration where user & identity resources are not created + memberConfigurationWithSkipUserCreation := testconfig.ModifyMemberOperatorConfigObj(memberAwait.GetMemberOperatorConfig(t), testconfig.SkipUserCreation(true)) + // configure default space tier to appstudio + hostAwait.UpdateToolchainConfig(t, testconfig.Tiers().DefaultUserTier("deactivate30").DefaultSpaceTier("appstudio"), testconfig.Members().Default(memberConfigurationWithSkipUserCreation.Spec)) +} + +func VerifyHasExpectedWorkspace(t *testing.T, expectedWorkspace toolchainv1alpha1.Workspace, actualWorkspaces ...toolchainv1alpha1.Workspace) { + for _, actualWorkspace := range actualWorkspaces { + if actualWorkspace.Name == expectedWorkspace.Name { + assert.Equal(t, expectedWorkspace.Status, actualWorkspace.Status) + assert.NotEmpty(t, actualWorkspace.ObjectMeta.ResourceVersion, "Workspace.ObjectMeta.ResourceVersion field is empty: %#v", actualWorkspace) + assert.NotEmpty(t, actualWorkspace.ObjectMeta.Generation, "Workspace.ObjectMeta.Generation field is empty: %#v", actualWorkspace) + assert.NotEmpty(t, actualWorkspace.ObjectMeta.CreationTimestamp, "Workspace.ObjectMeta.CreationTimestamp field is empty: %#v", actualWorkspace) + assert.NotEmpty(t, actualWorkspace.ObjectMeta.UID, "Workspace.ObjectMeta.UID field is empty: %#v", actualWorkspace) + return + } + } + t.Errorf("expected workspace %s not found", expectedWorkspace.Name) +} + +func ExpectedWorkspaceFor(t *testing.T, hostAwait *wait.HostAwaitility, user *ProxyUser, isHomeWorkspace bool) toolchainv1alpha1.Workspace { + space, err := hostAwait.WaitForSpace(t, user.CompliantUsername, wait.UntilSpaceHasAnyTargetClusterSet(), wait.UntilSpaceHasAnyTierNameSet()) + require.NoError(t, err) + + ws := commonproxy.NewWorkspace(user.CompliantUsername, + commonproxy.WithObjectMetaFrom(space.ObjectMeta), + commonproxy.WithNamespaces([]toolchainv1alpha1.SpaceNamespace{ + { + Name: user.CompliantUsername + "-tenant", + Type: "default", + }, + }), + commonproxy.WithOwner(user.Signup.Name), + commonproxy.WithRole("admin"), + ) + // if the user is the same as the one who created the workspace, then expect type should be "home" + if isHomeWorkspace { + ws.Status.Type = "home" + } + return *ws +} diff --git a/testsupport/appstudio/watcher.go b/testsupport/appstudio/watcher.go new file mode 100644 index 000000000..73f633044 --- /dev/null +++ b/testsupport/appstudio/watcher.go @@ -0,0 +1,251 @@ +package appstudio + +import ( + "context" + "crypto/tls" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "net/url" + "os" + "os/signal" + "strings" + "sync" + "testing" + "time" + + appstudiov1 "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio/api/v1alpha1" + "github.com/codeready-toolchain/toolchain-e2e/testsupport/wait" + "k8s.io/client-go/kubernetes" + + "github.com/gofrs/uuid" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + kubewait "k8s.io/apimachinery/pkg/util/wait" +) + +// wsWatcher represents a watcher which leverages a WebSocket connection to watch for Applications in the user's namespace. +// The connection is established with the reg-service proxy instead of direct connection to the API server. +type WsWatcher struct { + done chan interface{} + interrupt chan os.Signal + t *testing.T + user ProxyUser + namespace string + connection *websocket.Conn + proxyBaseURL string + + mu sync.RWMutex + receivedApps map[string]*appstudiov1.Application +} + +// this test will: +// 1. provision a watcher user +// 2. run a goroutine which will: +// I. create a long-running GET call with a watch=true parameter +// II. the call will be terminated via a context timeout +// III. check the expected error that it was terminated via a context and not on the server side +func RunWatcher(t *testing.T, awaitilities wait.Awaitilities) *sync.WaitGroup { + + // ====================================================== + // let's define two timeouts + + // contextTimeout defines the time after which the GET (watch) call will be terminated (via the context) + // this one is the expected timeout and should be bigger than the default one that was originally set + // for OpenShift Route and the RoundTripper inside proxy to make sure that the call is terminated + // via the context and not by the server. + contextTimeout := 40 * time.Second + + // this timeout will be set when initializing the go client - just to be sure that + // there is no other value set by default and is bigger than the contextTimeout. + clientConfigTimeout := 50 * time.Second + // ====================================================== + + t.Log("provisioning the watcher") + watchUser := &ProxyUser{ + ExpectedMemberCluster: awaitilities.Member1(), + Username: "watcher", + IdentityID: uuid.Must(uuid.NewV4()), + } + CreateAppStudioUser(t, awaitilities, watchUser) + + proxyConfig := awaitilities.Host().CreateAPIProxyConfig(t, watchUser.Token, awaitilities.Host().APIProxyURL) + proxyConfig.Timeout = clientConfigTimeout + watcherClient, err := kubernetes.NewForConfig(proxyConfig) + require.NoError(t, err) + + // we need to get a list of ConfigMaps, so we can use the resourceVersion + // of the list resource in the watch call + t.Log("getting the first list of ConfigMaps") + list, err := watcherClient.CoreV1(). + ConfigMaps(TenantNsName(watchUser.CompliantUsername)). + List(context.Background(), metav1.ListOptions{}) + require.NoError(t, err) + + var waitForWatcher sync.WaitGroup + waitForWatcher.Add(1) + // run the watch in a goroutine because it will take 40 seconds until the call is terminated + go func() { + t.Run("running the watcher", func(t *testing.T) { + defer waitForWatcher.Done() + withTimeout, cancelFunc := context.WithTimeout(context.Background(), contextTimeout) + defer cancelFunc() + + started := time.Now() + t.Log("starting the watch call") + _, err := watcherClient.RESTClient().Get(). + AbsPath(fmt.Sprintf("/api/v1/namespaces/%s/configmaps", TenantNsName(watchUser.CompliantUsername))). + Param("resourceVersion", list.GetResourceVersion()). + Param("watch", "true"). + Do(withTimeout). + Get() + t.Logf("stopping the watch after %s", time.Since(started)) + + assert.EqualError(t, err, "unexpected error when reading response body. Please retry. Original error: context deadline exceeded", "The call should be terminated by the context timeout") + assert.NotContains(t, err.Error(), "unexpected EOF", "If it contains 'unexpected EOF' then the call was terminated on the server side, which is not expected.") + }) + }() + return &waitForWatcher +} + +func NewWsWatcher(t *testing.T, user ProxyUser, namespace, proxyURL string) *WsWatcher { + _, err := url.Parse(proxyURL) + require.NoError(t, err) + return &WsWatcher{ + t: t, + namespace: namespace, + user: user, + proxyBaseURL: proxyURL, + } +} + +// start creates a new WebSocket connection. The method returns a function which is to be used to close the connection when done. +func (w *WsWatcher) Start() func() { + w.done = make(chan interface{}) // Channel to indicate that the receiverHandler is done + w.interrupt = make(chan os.Signal) // Channel to listen for interrupt signal to terminate gracefully + + signal.Notify(w.interrupt, os.Interrupt) // Notify the interrupt channel for SIGINT + + encodedToken := base64.RawURLEncoding.EncodeToString([]byte(w.user.Token)) + protocol := fmt.Sprintf("base64url.bearer.authorization.k8s.io.%s", encodedToken) + + trimmedProxyURL := strings.TrimPrefix(w.proxyBaseURL, "https://") + socketURL := fmt.Sprintf("wss://%s/apis/appstudio.redhat.com/v1alpha1/namespaces/%s/applications?watch=true", trimmedProxyURL, TenantNsName(w.namespace)) + w.t.Logf("opening connection to '%s'", socketURL) + dialer := &websocket.Dialer{ + Subprotocols: []string{protocol, "base64.binary.k8s.io"}, + TLSClientConfig: &tls.Config{ + InsecureSkipVerify: true, // nolint:gosec + }, + } + + extraHeaders := make(http.Header, 1) + extraHeaders.Add("Origin", "http://localhost") + + conn, resp, err := dialer.Dial(socketURL, extraHeaders) // nolint:bodyclose // see `return func() {...}` + if errors.Is(err, websocket.ErrBadHandshake) { + r, _ := io.ReadAll(resp.Body) + defer resp.Body.Close() + w.t.Logf("handshake failed with status %d / response %s", resp.StatusCode, string(r)) + } + require.NoError(w.t, err) + w.connection = conn + w.receivedApps = make(map[string]*appstudiov1.Application) + + go w.receiveHandler() + go w.startMainLoop() + + return func() { + _ = w.connection.Close() + if resp != nil { + _, _ = io.Copy(io.Discard, resp.Body) + _ = resp.Body.Close() + } + } +} + +// startMainLoop starts the main loop for the client. Packets are sent here. +func (w *WsWatcher) startMainLoop() { + for { + select { + case <-time.After(time.Duration(1) * time.Millisecond * 1000): + // Send an echo packet every second + err := w.connection.WriteMessage(websocket.TextMessage, []byte("Hello from e2e tests!")) + if err != nil { + w.t.Logf("Exiting main loop. It's normal if the connection has been closed. Reason: %s\n", err.Error()) + return + } + case <-w.interrupt: + // Received a SIGINT (Ctrl + C). Terminate gracefully... + // Close the websocket connection + err := w.connection.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + if err != nil { + w.t.Logf("Error during closing websocket: %s", err.Error()) + return + } + + select { + case <-w.done: + w.t.Log("Receiver Channel Closed! Exiting...") + case <-time.After(time.Duration(1) * time.Second): + w.t.Log("Timeout in closing receiving channel. Exiting...") + } + return + } + } +} + +type message struct { + MessageType string `json:"type"` + Application appstudiov1.Application `json:"object"` +} + +// receiveHandler listens to the incoming messages and stores them as Applications objects +func (w *WsWatcher) receiveHandler() { + defer close(w.done) + for { + _, msg, err := w.connection.ReadMessage() + if err != nil { + w.t.Logf("Exiting message receiving loop. It's normal if the connection has been closed. Reason: %s\n", err.Error()) + return + } + w.t.Logf("Received: %s", msg) + message := message{} + err = json.Unmarshal(msg, &message) + require.NoError(w.t, err) + copyApp := message.Application + w.mu.Lock() + if message.MessageType == "DELETED" { + delete(w.receivedApps, copyApp.Name) + } else { + w.receivedApps[copyApp.Name] = ©App + } + w.mu.Unlock() + } +} + +func (w *WsWatcher) WaitForApplication(expectedAppName string) (*appstudiov1.Application, error) { + var foundApp *appstudiov1.Application + err := kubewait.Poll(wait.DefaultRetryInterval, wait.DefaultTimeout, func() (bool, error) { + defer w.mu.RUnlock() + w.mu.RLock() + foundApp = w.receivedApps[expectedAppName] + return foundApp != nil, nil + }) + return foundApp, err +} + +func (w *WsWatcher) WaitForApplicationDeletion(expectedAppName string) error { + err := kubewait.PollImmediate(wait.DefaultRetryInterval, wait.DefaultTimeout, func() (bool, error) { + defer w.mu.RUnlock() + w.mu.RLock() + _, present := w.receivedApps[expectedAppName] + return !present, nil + }) + return err +}