diff --git a/setup/README.adoc b/setup/README.adoc index b4e27632d..9174a5dc8 100644 --- a/setup/README.adoc +++ b/setup/README.adoc @@ -4,7 +4,7 @@ This document describes how to use the setup tool to set up a Dev Sandbox enviro == Prereqs -. Provision the *latest available* GA version of *OCP 4.12.x* on AWS with sufficient resources: 3 `m5.12xlarge` master nodes and 3 `m5.2xlarge` worker nodes. +. Provision the *latest available* GA version of *OCP 4.13.x* on AWS with sufficient resources: 3 `m5.8xlarge` master nodes and 3 `m5.2xlarge` worker nodes. + The latest version of openshift-install can be downloaded from https://mirror.openshift.com/pub/openshift-v4/clients/ocp/ + @@ -18,7 +18,7 @@ controlPlane: name: master platform: aws: - type: "m5.12xlarge" + type: "m5.8xlarge" replicas: 3 compute: - hyperthreading: Enabled @@ -90,24 +90,16 @@ toolchain-status 0 True 2021-03-24T22:39:36Z go run setup/main.go --users 1 --default 1 --custom 0 --username setup -b 1 ``` + -After the command completes it will print performance metrics that can be used for comparison against the baseline metrics. +After the command completes it will print performance metrics that can be used for comparison against the baseline metrics. The results are saved to a .csv file to make it easier to copy the results into the spreadsheet. + Add the results to the Onboarding Performance Checklist spreadsheet in the `Onboarding Operator 1 user` column. + . Populate the cluster with 2000 users along with default and custom resources for each user. + -Creating 2000 users can take a long time so it's better to run the setup tool concurrently to speed up the process. Run the following command in 2 separate terminals using a unique username prefix in each terminal. -+ -Terminal 1: -+ -``` -go run setup/main.go --template= --users 1000 --default 1000 --custom 1000 --username cupcake --workloads namespace:deploymentName -``` -+ -Terminal 2: +Run the following command to create 2000 users + ``` -go run setup/main.go --template= --users 1000 --default 1000 --custom 1000 --username cheesecake --workloads namespace:deploymentName +go run setup/main.go --template= --users 2000 --default 2000 --custom 2000 --username cupcake --workloads namespace:deploymentName ``` + Note 1: You do not need to add the default template (https://raw.githubusercontent.com/codeready-toolchain/toolchain-e2e/master/setup/resources/user-workloads.yaml[setup/resources/user-workloads.yaml]), it is automatically added when you run the setup. You can control how many users will have the default template applied using the `--default` flag. @@ -117,7 +109,7 @@ Note 2: The `--workloads` flag tells the tool to capture the CPU and memory of a Note 3: CSV resources are automatically created for each default user as well. An all-namespaces scoped operator will be installed as part of the 'preparing' step. This operator will create a CSV resource in each namespace to mimic the behaviour observed in the production cluster. This operator install step can be skipped with the `--skip-csvgen` flag but should not be skipped without good reason. + Use `go run setup/main.go --help` to see the full set of options. + -. Grab some coffee ā˜•ļø, populating the cluster with 2000 users can take over 4 hours depending on network latency + +. Grab some coffee ā˜•ļø, populating the cluster with 2000 users usually takes about an hour but can take longer depending on network latency + Note: If for some reason the provisioning users step does not complete (eg. timeout), note down how many users were created and rerun the command with the remaining number of users to be created and a different username prefix. eg. `go run setup/main.go --template= --username zorro --users --default --custom ` + . After the command completes it will print performance metrics that can be used for comparison against the baseline metrics. @@ -130,7 +122,7 @@ With the cluster now under load, it's time to evaluate the environment. 1. Use your operators as a user would and evaluate the performance. 2. Monitor the cluster's performance using the Monitoring view in the OpenShift Console. -3. Monitor the memory usage of operators. There are many more resources created on this cluster than most operators have been tested with so it's important to look for any possible areas for concern. +3. Monitor the memory usage of operators. There are many more resources created on this cluster than most operators have been tested with so it's important to look for any possible areas of concern. 4. Compare the Results summary to the Baseline metrics provided in the onboarding doc. == Clean up @@ -155,12 +147,7 @@ make clean-e2e-resources go run setup/main.go --users 1 --default 1 --custom 0 --username setup -b 1 ``` -2. Run setup for 1000 users in terminal 1 -``` -go run setup/main.go --users 1000 --default 1000 --custom 0 --username pizza -``` - -3. Run setup for 1000 users in terminal 2 +2. Run setup for 2000 users ``` -go run setup/main.go --users 1000 --default 1000 --custom 0 --username pasta +go run setup/main.go --users 2000 --default 2000 --custom 0 --username pizza ``` diff --git a/setup/cmd/root.go b/setup/cmd/root.go index f79a86cc6..d845cc8a0 100644 --- a/setup/cmd/root.go +++ b/setup/cmd/root.go @@ -22,6 +22,7 @@ import ( "github.com/codeready-toolchain/toolchain-e2e/setup/terminal" "github.com/codeready-toolchain/toolchain-e2e/setup/users" "github.com/codeready-toolchain/toolchain-e2e/setup/wait" + "sigs.k8s.io/controller-runtime/pkg/client" appsv1 "k8s.io/api/apps/v1" "k8s.io/apimachinery/pkg/types" @@ -37,7 +38,6 @@ var ( verbose bool customTemplatePaths []string numberOfUsers int - userBatches int defaultTemplateUsers int customTemplateUsers int skipAdditionalWait bool @@ -51,9 +51,9 @@ var ( ) var ( - AverageIdlerUpdateTime time.Duration - AverageDefaultApplyTimePerUser time.Duration - AverageCustomApplyTimePerUser time.Duration + IdlerUpdateTime time.Duration + DefaultApplyTimePerUser time.Duration + CustomApplyTimePerUser time.Duration ) // Execute the setup command to fill a cluster with as many users as requested. @@ -72,7 +72,6 @@ func Execute() { cmd.Flags().StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file") cmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "if 'debug' traces should be displayed in the console") cmd.Flags().IntVarP(&numberOfUsers, "users", "u", 2000, "the number of user accounts to provision") - cmd.Flags().IntVarP(&userBatches, "batch", "b", 25, "create user accounts in batches of N, increasing batch size may cause performance problems") cmd.Flags().StringVar(&cfg.HostOperatorNamespace, "host-ns", cfg.DefaultHostNS, "the namespace of Host operator") cmd.Flags().StringVar(&cfg.MemberOperatorNamespace, "member-ns", cfg.DefaultMemberNS, "the namespace of the Member operator") cmd.Flags().StringSliceVar(&customTemplatePaths, "template", []string{}, "the path to the OpenShift template to apply for each custom user") @@ -99,27 +98,18 @@ func setup(cmd *cobra.Command, _ []string) { // nolint:gocyclo term := terminal.New(cmd.InOrStdin, cmd.OutOrStdout, verbose) // call cfg.Init() to initialize variables that are dependent on any flags eg. testname - cfg.Init() + cfg.Init(term) term.Infof("Number of Users: '%d'", numberOfUsers) term.Infof("Default Template Users: '%d'", defaultTemplateUsers) term.Infof("Custom Template Users: '%d'", customTemplateUsers) - term.Infof("User Batch Size: '%d'", userBatches) term.Infof("Host Operator Namespace: '%s'", cfg.HostOperatorNamespace) term.Infof("Member Operator Namespace: '%s'\n", cfg.MemberOperatorNamespace) - var generalResultsInfo = func() [][]string { - return [][]string{ - {"Number of Users", strconv.Itoa(numberOfUsers)}, - {"Number of Default Template Users", strconv.Itoa(defaultTemplateUsers)}, - {"Number of Custom Template Users", strconv.Itoa(customTemplateUsers)}, - {"User Batch Size", strconv.Itoa(userBatches)}, - {"Host Operator Namespace", cfg.HostOperatorNamespace}, - {"Member Operator Namespace", cfg.MemberOperatorNamespace}, - {"Average Idler Update Time (s)", fmt.Sprintf("%.2f", AverageIdlerUpdateTime.Seconds()/float64(numberOfUsers))}, - {"Average Time Per User - default (s)", fmt.Sprintf("%.2f", AverageDefaultApplyTimePerUser.Seconds()/float64(numberOfUsers))}, - {"Average Time Per User - custom (s)", fmt.Sprintf("%.2f", AverageCustomApplyTimePerUser.Seconds()/float64(numberOfUsers))}, - } + generalResultsInfo := [][]string{ + {"Number of Users", strconv.Itoa(numberOfUsers)}, + {"Number of Default Template Users", strconv.Itoa(defaultTemplateUsers)}, + {"Number of Custom Template Users", strconv.Itoa(customTemplateUsers)}, } // validate params @@ -130,9 +120,6 @@ func setup(cmd *cobra.Command, _ []string) { // nolint:gocyclo usersWithinBounds(term, defaultTemplateUsers, cfg.DefaultTemplateUsersParam) usersWithinBounds(term, customTemplateUsers, cfg.CustomTemplateUsersParam) - if numberOfUsers%userBatches != 0 { - term.Fatalf(fmt.Errorf("users value must be a multiple of the batch size '%d'", userBatches), "invalid users value '%d'", numberOfUsers) - } if operatorsLimit > len(operators.Templates) { term.Fatalf(fmt.Errorf("the operators limit value must be less than or equal to '%d'", len(operators.Templates)), "invalid operators limit value '%d'", operatorsLimit) } @@ -188,7 +175,7 @@ func setup(cmd *cobra.Command, _ []string) { // nolint:gocyclo } term.Infof("šŸ“‹ template list: %s\n", templateListStr) - if interactive && !term.PromptBoolf("šŸ‘¤ provision %d users in batches of %d on %s using the templates listed above", numberOfUsers, userBatches, config.Host) { + if interactive && !term.PromptBoolf("šŸ‘¤ provision %d users on %s using the templates listed above", numberOfUsers, config.Host) { return } @@ -196,6 +183,9 @@ func setup(cmd *cobra.Command, _ []string) { // nolint:gocyclo term.Fatalf(err, "ensure the sandbox host and member operators are installed successfully before running the setup") } + // ===================== + // begin configuration + // ===================== term.Infof("Configuring default space tier...") if err := cfg.ConfigureDefaultSpaceTier(cl); err != nil { term.Fatalf(err, "unable to set default space tier") @@ -205,6 +195,14 @@ func setup(cmd *cobra.Command, _ []string) { // nolint:gocyclo if err := cfg.DisableCopiedCSVs(cl); err != nil { term.Fatalf(err, "unable to disable OLM copy CSVs feature") } + // ===================== + // end configuration + // ===================== + + // ===================== + // begin setup + // ===================== + setupStartTime := time.Now() if !skipInstallOperators { term.Infof("ā³ installing operators...") @@ -266,110 +264,73 @@ func setup(cmd *cobra.Command, _ []string) { // nolint:gocyclo // gather and write results resultsWriter := results.New(term) + outputResults := func() { + addAndOutputResults(term, resultsWriter, func() [][]string { return generalResultsInfo }, metricsInstance.ComputeResults) + } // ensure metrics are dumped even if there's a fatal error - term.AddPreFatalExitHook(func() { - addAndOutputResults(term, resultsWriter, metricsInstance.ComputeResults, generalResultsInfo) - }) + term.AddPreFatalExitHook(outputResults) uip := uiprogress.New() uip.Start() - // start the progress bars in go routines + // start the progress bars and work in go routines var wg sync.WaitGroup - usersignupBar := uip.AddBar(numberOfUsers).AppendCompleted().PrependFunc(func(b *uiprogress.Bar) string { - return strutil.PadLeft(fmt.Sprintf("user signups (%d/%d)", b.Current(), numberOfUsers), 25, ' ') - }) - wg.Add(1) - go func() { - defer wg.Done() - for usersignupBar.Incr() { - username := fmt.Sprintf("%s-%04d", usernamePrefix, usersignupBar.Current()) - if err := users.Create(cl, username, cfg.HostOperatorNamespace, cfg.MemberOperatorNamespace); err != nil { - term.Fatalf(err, "failed to provision user '%s'", username) - } - time.Sleep(time.Millisecond * 20) - - // when the batch is done, wait for the user's namespaces to exist before proceeding - if usersignupBar.Current()%userBatches == 0 { - for i := usersignupBar.Current() - userBatches + 1; i < usersignupBar.Current(); i++ { - userToCheck := fmt.Sprintf("%s-%04d", usernamePrefix, i) - userNS := fmt.Sprintf("%s-dev", userToCheck) - if err := wait.ForNamespace(cl, userNS); err != nil { - term.Fatalf(err, "failed to find namespace '%s'", userNS) - } - } - } + + concurrentUserSignups := 10 + usersignupBar := addProgressBar(uip, "user signups", numberOfUsers) + signupUserFunc := func(cl client.Client, curUserNum int, username string) { + if err := users.Create(cl, username, cfg.HostOperatorNamespace, cfg.MemberOperatorNamespace); err != nil { + term.Fatalf(err, "failed to provision user '%s'", username) } - }() - if !skipIdlerSetup { - idlerBar := uip.AddBar(numberOfUsers).AppendCompleted().PrependFunc(func(b *uiprogress.Bar) string { - return strutil.PadLeft(fmt.Sprintf("idler setup (%d/%d)", b.Current(), numberOfUsers), 25, ' ') - }) - wg.Add(1) - go func() { - defer wg.Done() - for idlerBar.Incr() { - username := fmt.Sprintf("%s-%04d", usernamePrefix, idlerBar.Current()) - - startTime := time.Now() - // update Idlers timeout to kill workloads faster to reduce impact of memory/cpu usage during testing - if err := idlers.UpdateTimeout(cl, username, idlerDuration); err != nil { - term.Fatalf(err, "failed to update idlers for user '%s'", username) - } + if err := wait.ForSpace(cl, username); err != nil { + term.Fatalf(err, "space '%s' was not ready or not found", username) + } + } + userSignupRoutine := userRoutine(term, usersignupBar, signupUserFunc) + splitToMultipleRoutines(&wg, concurrentUserSignups, userSignupRoutine) - idlerTime := time.Since(startTime) - AverageIdlerUpdateTime += idlerTime + var idlerBar *userProgressBar + if !skipIdlerSetup { + concurrentIdlerSetups := 3 + idlerBar = addProgressBar(uip, "idler setup", numberOfUsers) + updateIdlerFunc := func(cl client.Client, curUserNum int, username string) { + // update Idlers timeout to kill workloads faster to reduce impact of memory/cpu usage during testing + if err := idlers.UpdateTimeout(cl, username, idlerDuration); err != nil { + term.Fatalf(err, "failed to update idlers for user '%s'", username) } - }() + } + ur := userRoutine(term, idlerBar, updateIdlerFunc) + splitToMultipleRoutines(&wg, concurrentIdlerSetups, ur) } + var defaultUserSetupBar *userProgressBar + concurrentUserSetups := 5 if defaultTemplateUsers > 0 { - defaultUserSetupBar := uip.AddBar(numberOfUsers).AppendCompleted().PrependFunc(func(b *uiprogress.Bar) string { - return strutil.PadLeft(fmt.Sprintf("setup users with default template (%d/%d)", b.Current(), numberOfUsers), 25, ' ') - }) - wg.Add(1) - go func() { - defer wg.Done() - for defaultUserSetupBar.Incr() { - username := fmt.Sprintf("%s-%04d", usernamePrefix, defaultUserSetupBar.Current()) - - startTime := time.Now() - - // create resources for every nth user - if defaultUserSetupBar.Current() <= defaultTemplateUsers { - if err := resources.CreateUserResourcesFromTemplateFiles(cl, scheme, username, []string{defaultTemplatePath}); err != nil { - term.Fatalf(err, "failed to create resources for user '%s'", username) - } + defaultUserSetupBar = addProgressBar(uip, "setup default template users", defaultTemplateUsers) + setupDefaultUsersFunc := func(cl client.Client, curUserNum int, username string) { + if curUserNum <= defaultTemplateUsers { + if err := resources.CreateUserResourcesFromTemplateFiles(cl, scheme, username, []string{defaultTemplatePath}); err != nil { + term.Fatalf(err, "failed to create default template resources for user '%s'", username) } - userTime := time.Since(startTime) - AverageDefaultApplyTimePerUser += userTime } - }() + } + ur := userRoutine(term, defaultUserSetupBar, setupDefaultUsersFunc) + splitToMultipleRoutines(&wg, concurrentUserSetups, ur) } + var customUserSetupBar *userProgressBar if customTemplateUsers > 0 && len(customTemplatePaths) > 0 { - customUserSetupBar := uip.AddBar(numberOfUsers).AppendCompleted().PrependFunc(func(b *uiprogress.Bar) string { - return strutil.PadLeft(fmt.Sprintf("setup users with custom templates (%d/%d)", b.Current(), numberOfUsers), 25, ' ') - }) - wg.Add(1) - go func() { - defer wg.Done() - for customUserSetupBar.Incr() { - username := fmt.Sprintf("%s-%04d", usernamePrefix, customUserSetupBar.Current()) - - startTime := time.Now() - - // create resources for users that should have the custom template applied - if customUserSetupBar.Current() <= customTemplateUsers { - if err := resources.CreateUserResourcesFromTemplateFiles(cl, scheme, username, customTemplatePaths); err != nil { - term.Fatalf(err, "failed to create resources for user '%s'", username) - } + customUserSetupBar = addProgressBar(uip, "setup custom template users", customTemplateUsers) + setupCustomUsersFunc := func(cl client.Client, curUserNum int, username string) { + if curUserNum <= customTemplateUsers { + if err := resources.CreateUserResourcesFromTemplateFiles(cl, scheme, username, customTemplatePaths); err != nil { + term.Fatalf(err, "failed to create custom template resources for user '%s'", username) } - userTime := time.Since(startTime) - AverageCustomApplyTimePerUser += userTime } - }() + } + ur := userRoutine(term, customUserSetupBar, setupCustomUsersFunc) + splitToMultipleRoutines(&wg, concurrentUserSetups, ur) } defer close(stopMetrics) @@ -389,7 +350,29 @@ func setup(cmd *cobra.Command, _ []string) { // nolint:gocyclo time.Sleep(additionalMetricsDuration) } - addAndOutputResults(term, resultsWriter, metricsInstance.ComputeResults, generalResultsInfo) + // ===================== + // end of setup + // ===================== + + totalRunningTime := time.Since(setupStartTime) + if idlerBar != nil { + IdlerUpdateTime = idlerBar.timeSpent + } + if defaultUserSetupBar != nil { + DefaultApplyTimePerUser = defaultUserSetupBar.timeSpent + } + if customUserSetupBar != nil { + CustomApplyTimePerUser = customUserSetupBar.timeSpent + } + + generalResultsInfo = append(generalResultsInfo, + []string{"Average Idler Update Time (s)", fmt.Sprintf("%.2f", IdlerUpdateTime.Seconds()/float64(numberOfUsers))}, + []string{"Average Time Per User - default (s)", fmt.Sprintf("%.2f", DefaultApplyTimePerUser.Seconds()/float64(numberOfUsers))}, + []string{"Average Time Per User - custom (s)", fmt.Sprintf("%.2f", CustomApplyTimePerUser.Seconds()/float64(numberOfUsers))}, + []string{"Total Running Time (m)", fmt.Sprintf("%f", totalRunningTime.Minutes())}, + ) + + outputResults() term.Infof("šŸ‘‹ have fun!") } @@ -400,6 +383,11 @@ func usersWithinBounds(term terminal.Terminal, value int, templateType string) { } func addAndOutputResults(term terminal.Terminal, resultsWriter *results.Results, r ...func() [][]string) { + // add header row + resultsWriter.AddResults([][]string{ + {"Item", "Value"}, + }) + // add results for _, result := range r { resultsWriter.AddResults(result()) } @@ -407,3 +395,70 @@ func addAndOutputResults(term terminal.Terminal, resultsWriter *results.Results, term.Infof("\nšŸ“ˆ Results šŸ“‰") resultsWriter.OutputResults() } + +type userProgressBar struct { + mu sync.Mutex + timeSpent time.Duration + bar *uiprogress.Bar +} + +func addProgressBar(uip *uiprogress.Progress, description string, total int) *userProgressBar { + + bar := uip.AddBar(total).AppendCompleted().PrependFunc(func(b *uiprogress.Bar) string { + return strutil.PadLeft(fmt.Sprintf("%s (%d/%d)", description, b.Current(), total), 40, ' ') + }) + + return &userProgressBar{ + bar: bar, + } +} + +func (b *userProgressBar) Incr() (bool, int) { + b.mu.Lock() + defer b.mu.Unlock() + return b.bar.Incr(), b.bar.Current() +} + +func (b *userProgressBar) AddTimeSpent(d time.Duration) { + b.mu.Lock() + b.timeSpent += d + b.mu.Unlock() +} + +func splitToMultipleRoutines(parent *sync.WaitGroup, concurrentRoutinesCount int, routine func(*sync.WaitGroup)) { + parent.Add(1) + go func() { + defer parent.Done() + var subgroup sync.WaitGroup + subgroup.Add(concurrentRoutinesCount) + for i := 0; i < concurrentRoutinesCount; i++ { + go routine(&subgroup) + } + subgroup.Wait() + }() +} + +func userRoutine(term terminal.Terminal, progressBar *userProgressBar, ua userAction) func(wg *sync.WaitGroup) { + return func(subgroup *sync.WaitGroup) { + aCl, _, _, err := cfg.NewClient(term, kubeconfig) + if err != nil { + term.Fatalf(err, "cannot create client") + } + + hasMore, curUserNum := progressBar.Incr() + for hasMore { + username := fmt.Sprintf("%s-%04d", usernamePrefix, curUserNum) + + startTime := time.Now() + + ua(aCl, curUserNum, username) + + timeSpent := time.Since(startTime) + progressBar.AddTimeSpent(timeSpent) + hasMore, curUserNum = progressBar.Incr() + } + subgroup.Done() + } +} + +type userAction func(cl client.Client, curUserNum int, username string) diff --git a/setup/configuration/configuration.go b/setup/configuration/configuration.go index febcacd6f..22d1d88e0 100644 --- a/setup/configuration/configuration.go +++ b/setup/configuration/configuration.go @@ -55,16 +55,14 @@ var ( startedTimestamp = time.Now().Format("2006-01-02_15:04:05") ) -func Init() { +func Init(term terminal.Terminal) { pwd, err := os.Getwd() if err != nil { - fmt.Printf("error getting current working directory %s", err) - os.Exit(1) + term.Fatalf(err, "error getting current working directory") } resultsDir = pwd + "/tmp/results/" if err := os.MkdirAll(resultsDir, os.ModePerm); err != nil { - fmt.Println("error creating results directory ", resultsDir, err) - os.Exit(1) + term.Fatalf(err, "error creating results directory %s", resultsDir) } if len(Testname) > 0 && Testname[0] != '-' { Testname = "-" + Testname @@ -96,6 +94,11 @@ func NewClient(term terminal.Terminal, kubeconfigPath string) (client.Client, *r term.Fatalf(err, "cannot create client config") } + // Set QPS and Burst to higher values to avoid client-side throttling issues + // prometheus uses these QPS and Burst values so it shouldn't be an issue, see https://github.com/prometheus-operator/prometheus-operator/blob/9d68ecf289d711c66bef39d2f83429265abc6986/pkg/k8sutil/k8sutil.go#L96-L97 + clientConfig.QPS = 100 + clientConfig.Burst = 100 + cl, err := client.New(clientConfig, client.Options{Scheme: s}) term.Infof("API endpoint: %s", clientConfig.Host) return cl, clientConfig, s, err diff --git a/setup/resources/create_resources.go b/setup/resources/create_resources.go index 5c4579b7a..775ad679b 100644 --- a/setup/resources/create_resources.go +++ b/setup/resources/create_resources.go @@ -31,7 +31,7 @@ func CreateUserResourcesFromTemplateFiles(cl runtimeclient.Client, s *runtime.Sc tmpl := tmpls[templatePath] // waiting for each namespace here prevents some edge cases where the setup job can progress beyond the usersignup job and fail with a timeout - if err := wait.ForNamespace(cl, userNS); err != nil { + if err := wait.ForSpace(cl, username); err != nil { return err } processor := ctemplate.NewProcessor(s) diff --git a/setup/wait/wait.go b/setup/wait/wait.go index 5d12c9456..58d3fabd1 100644 --- a/setup/wait/wait.go +++ b/setup/wait/wait.go @@ -4,8 +4,10 @@ import ( "context" "time" + "github.com/codeready-toolchain/toolchain-common/pkg/test" "github.com/codeready-toolchain/toolchain-e2e/setup/configuration" + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" "github.com/operator-framework/api/pkg/operators/v1alpha1" "github.com/pkg/errors" corev1 "k8s.io/api/core/v1" @@ -15,20 +17,31 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) -func ForNamespace(cl client.Client, namespace string) error { - ns := &corev1.Namespace{} +func ForSpace(cl client.Client, space string) error { + sp := &toolchainv1alpha1.Space{} + expectedConditions := []toolchainv1alpha1.Condition{ + { + Type: toolchainv1alpha1.ConditionReady, + Status: corev1.ConditionTrue, + Reason: "Provisioned", + }, + } + if err := k8swait.Poll(configuration.DefaultRetryInterval, configuration.DefaultTimeout, func() (bool, error) { err := cl.Get(context.TODO(), types.NamespacedName{ - Name: namespace, - }, ns) + Name: space, + Namespace: configuration.HostOperatorNamespace, + }, sp) if k8serrors.IsNotFound(err) { return false, nil } else if err != nil { return false, err + } else if !test.ConditionsMatch(sp.Status.Conditions, expectedConditions...) { + return false, nil } return true, nil }); err != nil { - return errors.Wrapf(err, "namespace '%s' does not exist", namespace) + return errors.Wrapf(err, "space '%s' is not ready yet", space) } return nil } diff --git a/setup/wait/wait_test.go b/setup/wait/wait_test.go index 9dbf178e1..4589f781d 100644 --- a/setup/wait/wait_test.go +++ b/setup/wait/wait_test.go @@ -6,6 +6,7 @@ import ( "testing" "time" + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" "github.com/codeready-toolchain/toolchain-e2e/setup/configuration" "github.com/codeready-toolchain/toolchain-e2e/setup/test" "github.com/codeready-toolchain/toolchain-e2e/setup/wait" @@ -14,7 +15,6 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -23,15 +23,15 @@ func TestForNamespace(t *testing.T) { configuration.DefaultTimeout = time.Millisecond * 1 t.Run("success", func(t *testing.T) { // given - ns := &corev1.Namespace{ + ns := &toolchainv1alpha1.Space{ ObjectMeta: metav1.ObjectMeta{ Name: "user0001-stage", }, } - cl := test.NewFakeClient(t, ns) // ns exists + cl := test.NewFakeClient(t, ns) // space exists // when - err := wait.ForNamespace(cl, "user0001-stage") + err := wait.ForSpace(cl, "user0001") // then require.NoError(t, err) @@ -45,11 +45,11 @@ func TestForNamespace(t *testing.T) { cl := test.NewFakeClient(t) // ns doesn't exist // when - err := wait.ForNamespace(cl, "user0001-missing") + err := wait.ForSpace(cl, "user0001") // then require.Error(t, err) - assert.EqualError(t, err, "namespace 'user0001-missing' does not exist: timed out waiting for the condition") + assert.EqualError(t, err, "space 'user0001' does not exist: timed out waiting for the condition") }) }) diff --git a/test/e2e/nstemplatetier_test.go b/test/e2e/nstemplatetier_test.go index adae4c754..c16d5637e 100644 --- a/test/e2e/nstemplatetier_test.go +++ b/test/e2e/nstemplatetier_test.go @@ -43,7 +43,7 @@ func TestNSTemplateTiers(t *testing.T) { Resources() // all tiers to check - keep the base as the last one, it will verify downgrade back to the default tier at the end of the test - tiersToCheck := []string{"advanced", "baseextendedidling", "baselarge", "test", "appstudio", "appstudio-env", "base1ns", "base1nsnoidling", "base1ns6didler", "base"} + tiersToCheck := []string{"advanced", "baseextendedidling", "baselarge", "test", "appstudio", "appstudio-redhat", "appstudio-env", "base1ns", "base1nsnoidling", "base1ns6didler", "base"} // when the tiers are created during the startup then we can verify them allTiers := &toolchainv1alpha1.NSTemplateTierList{} diff --git a/testsupport/tiers/checks.go b/testsupport/tiers/checks.go index c1619496c..9d7493f58 100644 --- a/testsupport/tiers/checks.go +++ b/testsupport/tiers/checks.go @@ -8,6 +8,7 @@ import ( "testing" toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + esv1beta1 "github.com/external-secrets/external-secrets/apis/externalsecrets/v1beta1" "github.com/codeready-toolchain/toolchain-e2e/testsupport/wait" "github.com/davecgh/go-spew/spew" @@ -27,6 +28,7 @@ const ( // tier names advanced = "advanced" appstudio = "appstudio" + appstudioRedhat = "appstudio-redhat" appstudioEnv = "appstudio-env" base = "base" base1ns = "base1ns" @@ -78,6 +80,9 @@ func NewChecksForTier(tier *toolchainv1alpha1.NSTemplateTier) (TierChecks, error case appstudio: return &appstudioTierChecks{tierName: appstudio}, nil + case appstudioRedhat: + return &appstudioRedhatTierChecks{tierName: appstudio-redhat}, nil + case appstudioEnv: return &appstudioEnvTierChecks{tierName: appstudioEnv}, nil @@ -382,6 +387,12 @@ func commonNetworkPolicyChecks() []namespaceObjectsCheck { } } +func externalSecretChecks() []namespaceObjectsCheck { + return []namespaceObjectsCheck{ + externalSecretSnykSharedToken(), + } +} + type advancedTierChecks struct { baseTierChecks } @@ -531,6 +542,107 @@ func (a *appstudioTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { pipelineRunnerClusterRole()) } +type appstudioRedhatTierChecks struct { + tierName string +} + +func (a *appstudioRedhatTierChecks) GetNamespaceObjectChecks(_ string) []namespaceObjectsCheck { + checks := []namespaceObjectsCheck{ + resourceQuotaComputeDeploy("20", "32Gi", "1750m", "32Gi"), + resourceQuotaComputeBuild("60", "64Gi", "6", "32Gi"), + resourceQuotaStorage("50Gi", "50Gi", "50Gi", "12"), + limitRange("2", "2Gi", "10m", "256Mi"), + numberOfLimitRanges(1), + toolchainSaReadRole(), + memberOperatorSaReadRoleBinding(), + gitOpsServiceLabel(), + redhatInternalTenantLabel(), + appstudioWorkSpaceNameLabel(), + environment("development"), + resourceQuotaAppstudioCrds("512", "512", "512"), + resourceQuotaAppstudioCrdsBuild("512"), + resourceQuotaAppstudioCrdsGitops("512", "512", "512", "512", "512"), + resourceQuotaAppstudioCrdsIntegration("512", "512", "512"), + resourceQuotaAppstudioCrdsRelease("512", "512", "512", "512", "512"), + resourceQuotaAppstudioCrdsEnterpriseContract("512"), + resourceQuotaAppstudioCrdsSPI("512", "512", "512", "512", "512"), + pipelineServiceAccount(), + pipelineRunnerRoleBinding(), + } + + checks = append(checks, append(commonNetworkPolicyChecks(), externalSecretChecks(), networkPolicyAllowFromCRW(), numberOfNetworkPolicies(6))...) + return checks +} + +func (a *appstudioRedhatTierChecks) GetSpaceRoleChecks(spaceRoles map[string][]string) ([]spaceRoleObjectsCheck, error) { + checks := []spaceRoleObjectsCheck{} + roles := 0 + rolebindings := 0 + for role, usernames := range spaceRoles { + switch role { + case "admin": + checks = append(checks, appstudioRedhatUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioRedhatUserActionsRoleBinding(userName, "admin"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + case "maintainer": + checks = append(checks, appstudioRedhatMaintainerUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioRedhatUserActionsRoleBinding(userName, "maintainer"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + case "contributor": + checks = append(checks, appstudioRedhatContributorUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioRedhatUserActionsRoleBinding(userName, "contributor"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + default: + return nil, fmt.Errorf("unexpected template name: '%s'", role) + } + } + // also count the roles, rolebindings and service accounts + checks = append(checks, + numberOfToolchainRoles(roles+1), // +1 for `toolchain-sa-read` + numberOfToolchainRoleBindings(rolebindings+2), // +2 for `member-operator-sa-read` and `appstudio-pipelines-runner-rolebinding` + ) + return checks, nil +} + +func (a *appstudioRedhatTierChecks) GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs { + templateRefs := GetTemplateRefs(t, hostAwait, a.tierName) + verifyNsTypes(t, a.tierName, templateRefs, "tenant") + return templateRefs +} + +func (a *appstudioRedhatTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaDeployments("150"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(8), + idlers(0, ""), + pipelineRunnerClusterRole()) +} + type appstudioEnvTierChecks struct { tierName string } @@ -542,6 +654,7 @@ func (a *appstudioEnvTierChecks) GetNamespaceObjectChecks(_ string) []namespaceO resourceQuotaStorage("50Gi", "50Gi", "50Gi", "12"), limitRange("2", "2Gi", "10m", "256Mi"), numberOfLimitRanges(1), + additionalArgocdReadRole(), namespaceManagerSA(), additionalArgocdReadRole(), namespaceManagerSaAdditionalArgocdReadRoleBinding(), @@ -957,6 +1070,38 @@ func limitRange(cpuLimit, memoryLimit, cpuRequest, memoryRequest string) namespa } } +func externalSecretSnykSharedToken() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { + es, err := memberAwait.WaitForExternalSecret(t, ns, "snyk-shared-token") + require.NoError(t, err) + assert.Equal(t, toolchainv1alpha1.ProviderLabelValue, es.ObjectMeta.Labels[toolchainv1alpha1.ProviderLabelKey]) + expected := &esv1beta1.ExternalSecret{ + Annotations: map[string]string{ + "argocd.argoproj.io/sync-options": "SkipDryRunOnMissingResource=true", + "argocd.argoproj.io/sync-wave": "-1", + }, + }, + Spec: esv1beta1.ExternalSecretSpec{ + DataFrom: []esv1beta1.ExternalSecretDataFromRemoteRef{ + { + Extract: &esv1beta1.ExternalSecretDataRemoteRef{ + Key: "snyk-shared-secret", + }, + }, + }, + SecretStoreRef: esv1beta1.SecretStoreRef{ + Name: "appsre-redhat-tenant-vault", + }, + Target: esv1beta1.ExternalSecretTarget{ + Name: "snyk-secret", + }, + }, + } + + assert.Equal(t, expected.Spec, es.Spec) + } +} + func networkPolicySameNamespace() namespaceObjectsCheck { return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { np, err := memberAwait.WaitForNetworkPolicy(t, ns, "allow-same-namespace") @@ -1314,6 +1459,8 @@ func clusterResourceQuotaMatches(userName, tierName string, hard map[corev1.Reso MatchLabels: map[string]string{ toolchainv1alpha1.SpaceLabelKey: userName, }, + AnnotationSelector: map[string]string{ + "openshift.io/requester": userName, }, }, Quota: corev1.ResourceQuotaSpec{ @@ -1456,6 +1603,17 @@ func appstudioWorkSpaceNameLabel() namespaceObjectsCheck { } } +func redhatInternalTenantLabel() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + + labelWaitCriterion := []wait.LabelWaitCriterion{} + labelWaitCriterion = append(labelWaitCriterion, wait.UntilObjectHasLabel("redhat-internal-tenent/label", "true")) + + _, err := memberAwait.WaitForNamespaceWithName(t, ns.Name, labelWaitCriterion...) + require.NoError(t, err) + } +} + func environment(name string) namespaceObjectsCheck { return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { _, err := memberAwait.WaitForEnvironment(t, ns.Name, name, toolchainLabelsWaitCriterion(owner)...) @@ -1548,6 +1706,97 @@ func appstudioUserActionsRole() spaceRoleObjectsCheck { } } +func appstudioRedhatUserActionsRole() spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + role, err := memberAwait.WaitForRole(t, ns, "appstudio-redhat-user-actions", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, role.Rules, 15) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"applications", "components", "componentdetectionqueries"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"promotionruns", "snapshotenvironmentbindings", "snapshots", "environments"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"deploymenttargets", "deploymenttargetclaims"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"managed-gitops.redhat.com"}, + Resources: []string{"gitopsdeployments", "gitopsdeploymentmanagedenvironments", "gitopsdeploymentrepositorycredentials", "gitopsdeploymentsyncruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"tekton.dev"}, + Resources: []string{"pipelineruns"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"results.tekton.dev"}, + Resources: []string{"results", "records", "logs"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"enterprisecontractpolicies", "integrationtestscenarios", "releases", "releasestrategies", "releaseplans", "releaseplanadmissions"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"jvmbuildservice.io"}, + Resources: []string{"jbsconfigs", "artifactbuilds"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"spiaccesstokenbindings", "spiaccesschecks", "spiaccesstokens", "spifilecontentrequests", "spiaccesstokendataupdates"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"remotesecrets"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"buildpipelineselectors"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"serviceaccounts"}, + ResourceNames: []string{"appstudio-pipeline"}, + Verbs: []string{"get", "list", "watch", "update", "patch"}, + }, + { + APIGroups: []string{"external-secrets.io"}, + Resources: []string{"externalsecrets"}, + ResourceNames: []string{"snyk-shared-token"}, + Verbs: []string{"get", "list"}, + }, + }, + } + + assert.Equal(t, expected.Rules, role.Rules) + } +} + func appstudioMaintainerUserActionsRole() spaceRoleObjectsCheck { return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { role, err := memberAwait.WaitForRole(t, ns, "appstudio-maintainer-user-actions", toolchainLabelsWaitCriterion(owner)...) @@ -1633,6 +1882,100 @@ func appstudioMaintainerUserActionsRole() spaceRoleObjectsCheck { }, } + assert.Equal(t, expected.Rules, role.Rules) + } + +func appstudioRedhatMaintainerUserActionsRole() spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + role, err := memberAwait.WaitForRole(t, ns, "appstudio-redhat-maintainer-user-actions", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, role.Rules, 16) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"applications", "components", "componentdetectionqueries"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"promotionruns", "snapshotenvironmentbindings", "snapshots", "environments"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"deploymenttargets", "deploymenttargetclaims"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"managed-gitops.redhat.com"}, + Resources: []string{"gitopsdeployments", "gitopsdeploymentmanagedenvironments", "gitopsdeploymentrepositorycredentials", "gitopsdeploymentsyncruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"tekton.dev"}, + Resources: []string{"pipelineruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"results.tekton.dev"}, + Resources: []string{"results", "records", "logs"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"integrationtestscenarios"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"enterprisecontractpolicies"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"releases", "releasestrategies", "releaseplans"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"releaseplanadmissions"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"jvmbuildservice.io"}, + Resources: []string{"jbsconfigs", "artifactbuilds"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"spiaccesstokenbindings", "spiaccesschecks", "spiaccesstokens", "spifilecontentrequests", "spiaccesstokendataupdates"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"remotesecrets"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"buildpipelineselectors"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"external-secrets.io"}, + Resources: []string{"externalsecrets"}, + ResourceNames: []string{"snyk-shared-token"}, + Verbs: []string{"get", "list"}, + }, + }, + } + assert.Equal(t, expected.Rules, role.Rules) } } @@ -1722,6 +2065,100 @@ func appstudioContributorUserActionsRole() spaceRoleObjectsCheck { }, } + assert.Equal(t, expected.Rules, role.Rules) + } + +func appstudioRedhatContributorUserActionsRole() spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + role, err := memberAwait.WaitForRole(t, ns, "appstudio-redhat-contributor-user-actions", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, role.Rules, 15) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"applications", "components", "componentdetectionqueries"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"promotionruns", "snapshotenvironmentbindings", "snapshots", "environments"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"deploymenttargets", "deploymenttargetclaims"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"managed-gitops.redhat.com"}, + Resources: []string{"gitopsdeployments", "gitopsdeploymentmanagedenvironments", "gitopsdeploymentrepositorycredentials", "gitopsdeploymentsyncruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"tekton.dev"}, + Resources: []string{"pipelineruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"results.tekton.dev"}, + Resources: []string{"results", "records", "logs"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"integrationtestscenarios"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"enterprisecontractpolicies"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"releases", "releasestrategies", "releaseplans"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"releaseplanadmissions"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"jvmbuildservice.io"}, + Resources: []string{"jbsconfigs", "artifactbuilds"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"spiaccesstokenbindings", "spiaccesschecks", "spiaccesstokens", "spifilecontentrequests"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"remotesecrets"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"buildpipelineselectors"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"external-secrets.io"}, + Resources: []string{"externalsecrets"}, + ResourceNames: []string{"snyk-shared-token"}, + Verbs: []string{"get", "list"}, + }, + }, + } + assert.Equal(t, expected.Rules, role.Rules) } } @@ -1746,6 +2183,27 @@ func appstudioUserActionsRoleBinding(userName string, role string) spaceRoleObje assert.Equal(t, "Role", rb.RoleRef.Kind) assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) } + +func appstudioRedhatUserActionsRoleBinding(userName string, role string) spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rbName := "" + roleName := "" + if role == "admin" { + roleName = "appstudio-redhat-user-actions" + rbName = fmt.Sprintf("appstudio-redhat-%s-actions-user", userName) + } else { + roleName = fmt.Sprintf("appstudio-redhat-%s-user-actions", role) + rbName = fmt.Sprintf("appstudio-redhat-%s-%s-actions-user", role, userName) + } + rb, err := memberAwait.WaitForRoleBinding(t, ns, rbName, toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "User", rb.Subjects[0].Kind) + assert.Equal(t, userName, rb.Subjects[0].Name) + assert.Equal(t, roleName, rb.RoleRef.Name) + assert.Equal(t, "Role", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } } func appstudioViewRoleBinding(userName string) spaceRoleObjectsCheck { diff --git a/testsupport/tiers/checks.go.modified b/testsupport/tiers/checks.go.modified new file mode 100644 index 000000000..9e5ea40cd --- /dev/null +++ b/testsupport/tiers/checks.go.modified @@ -0,0 +1,2019 @@ +package tiers + +import ( + "context" + "fmt" + "reflect" + "strings" + "testing" + + toolchainv1alpha1 "github.com/codeready-toolchain/api/api/v1alpha1" + esv1beta1 "github.com/external-secrets/external-secrets/apis/externalsecrets/v1beta1" + "github.com/codeready-toolchain/toolchain-e2e/testsupport/wait" + + "github.com/davecgh/go-spew/spew" + quotav1 "github.com/openshift/api/quota/v1" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + netv1 "k8s.io/api/networking/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +const ( + // tier names + advanced = "advanced" + appstudio = "appstudio" + appstudio = "appstudio-redhat" + appstudioEnv = "appstudio-env" + base = "base" + base1ns = "base1ns" + base1ns6didler = "base1ns6didler" + base1nsnoidling = "base1nsnoidling" + baseextendedidling = "baseextendedidling" + baselarge = "baselarge" + testTier = "test" + + // common CPU limits + defaultCPULimit = "1" + cpuLimit = "20000m" // All tiers +) + +var ( + providerMatchingLabels = client.MatchingLabels(map[string]string{toolchainv1alpha1.ProviderLabelKey: toolchainv1alpha1.ProviderLabelValue}) +) + +type TierChecks interface { + GetClusterObjectChecks() []clusterObjectsCheck + GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs + GetNamespaceObjectChecks(nsType string) []namespaceObjectsCheck + GetSpaceRoleChecks(spaceRoles map[string][]string) ([]spaceRoleObjectsCheck, error) +} + +func NewChecksForTier(tier *toolchainv1alpha1.NSTemplateTier) (TierChecks, error) { + switch tier.Name { + case base: + return &baseTierChecks{tierName: base}, nil + + case base1ns: + return &base1nsTierChecks{tierName: base1ns}, nil + + case base1nsnoidling: + return &base1nsnoidlingTierChecks{base1nsTierChecks{tierName: base1nsnoidling}}, nil + + case base1ns6didler: + return &base1ns6didlerTierChecks{base1nsTierChecks{tierName: base1ns6didler}}, nil + + case baselarge: + return &baselargeTierChecks{baseTierChecks{tierName: baselarge}}, nil + + case baseextendedidling: + return &baseextendedidlingTierChecks{baseTierChecks{tierName: baseextendedidling}}, nil + + case advanced: + return &advancedTierChecks{baseTierChecks{tierName: advanced}}, nil + + case appstudio: + return &appstudioTierChecks{tierName: appstudio}, nil + + case appstudio: + return &appstudioRedhatTierChecks{tierName: appstudio}, nil + + case appstudioEnv: + return &appstudioEnvTierChecks{tierName: appstudioEnv}, nil + + case testTier: + return &testTierChecks{tierName: testTier}, nil + + default: + return nil, fmt.Errorf("no assertion implementation found for %s", tier.Name) + } +} + +// NewChecksForCustomTier returns a `TierChecks` initialized with the tiers used in the CustomNSTemplateTier +func NewChecksForCustomTier(t *testing.T, tier *CustomNSTemplateTier) TierChecks { + c := &customTierChecks{ + t: t, + tier: tier, + } + return c +} + +var _ TierChecks = &customTierChecks{} + +type customTierChecks struct { + t *testing.T + tier *CustomNSTemplateTier +} + +func (c *customTierChecks) GetNamespaceObjectChecks(nsType string) []namespaceObjectsCheck { + checks, err := NewChecksForTier(c.tier.NamespaceResourcesTier) + require.NoError(c.t, err) + return checks.GetNamespaceObjectChecks(nsType) +} + +func (c *customTierChecks) GetSpaceRoleChecks(spaceRoles map[string][]string) ([]spaceRoleObjectsCheck, error) { + checks, err := NewChecksForTier(c.tier.SpaceRolesTier) + require.NoError(c.t, err) + return checks.GetSpaceRoleChecks(spaceRoles) +} + +func (c *customTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + checks, err := NewChecksForTier(c.tier.ClusterResourcesTier) + require.NoError(c.t, err) + return checks.GetClusterObjectChecks() +} + +func (c *customTierChecks) GetExpectedTemplateRefs(_ *testing.T, _ *wait.HostAwaitility) TemplateRefs { + var clusterResourcesTmplRef *string + if c.tier.NSTemplateTier.Spec.ClusterResources != nil { + clusterResourcesTmplRef = &c.tier.NSTemplateTier.Spec.ClusterResources.TemplateRef + } + namespaceTmplRefs := make([]string, len(c.tier.NSTemplateTier.Spec.Namespaces)) + for i, ns := range c.tier.NSTemplateTier.Spec.Namespaces { + namespaceTmplRefs[i] = ns.TemplateRef + } + + return TemplateRefs{ + ClusterResources: clusterResourcesTmplRef, + Namespaces: namespaceTmplRefs, + } +} + +type baseTierChecks struct { + tierName string +} + +func (a *baseTierChecks) GetNamespaceObjectChecks(nsType string) []namespaceObjectsCheck { + checks := []namespaceObjectsCheck{ + numberOfLimitRanges(1), + limitRange(defaultCPULimit, "750Mi", "10m", "64Mi"), + execPodsRole(), + crtadminPodsRoleBinding(), + crtadminViewRoleBinding(), + } + checks = append(checks, commonNetworkPolicyChecks()...) + var otherNamespaceKind string + switch nsType { + case "dev": + otherNamespaceKind = "stage" + case "stage": + otherNamespaceKind = "dev" + } + checks = append(checks, networkPolicyAllowFromCRW(), networkPolicyAllowFromOtherNamespace(otherNamespaceKind), numberOfNetworkPolicies(7)) + + return checks +} + +func (a *baseTierChecks) GetSpaceRoleChecks(spaceRoles map[string][]string) ([]spaceRoleObjectsCheck, error) { + checks := []spaceRoleObjectsCheck{} + roles := 0 + rolebindings := 0 + for role, usernames := range spaceRoles { + switch role { + case "admin": + checks = append(checks, rbacEditRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + rbacEditRoleBinding(userName), + userEditRoleBinding(userName), + ) + rolebindings += 2 + } + default: + return nil, fmt.Errorf("unexpected template name: '%s'", role) + } + } + // also count the roles, rolebindings + checks = append(checks, + numberOfToolchainRoles(roles+1), // +1 for `exec-pods` + numberOfToolchainRoleBindings(rolebindings+2), // +2 for `crtadmin-pods` and `crtadmin-view` + ) + return checks, nil +} + +func (a *baseTierChecks) GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs { + templateRefs := GetTemplateRefs(t, hostAwait, a.tierName) + verifyNsTypes(t, a.tierName, templateRefs, "dev", "stage") + return templateRefs +} + +func (a *baseTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaCompute(cpuLimit, "1750m", "7Gi", "40Gi"), + clusterResourceQuotaDeployments("50"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(9), + idlers(43200, "dev", "stage")) +} + +type base1nsTierChecks struct { + tierName string +} + +func (a *base1nsTierChecks) GetNamespaceObjectChecks(_ string) []namespaceObjectsCheck { + checks := []namespaceObjectsCheck{ + resourceQuotaComputeDeploy("20", "14Gi", "3", "14Gi"), + resourceQuotaComputeBuild("20", "14Gi", "3", "14Gi"), + resourceQuotaStorage("15Gi", "40Gi", "15Gi", "5"), + limitRange(defaultCPULimit, "1000Mi", "10m", "64Mi"), + numberOfLimitRanges(1), + execPodsRole(), + crtadminPodsRoleBinding(), + crtadminViewRoleBinding(), + } + checks = append(checks, commonNetworkPolicyChecks()...) + checks = append(checks, networkPolicyAllowFromCRW(), numberOfNetworkPolicies(6)) + return checks +} + +func (a *base1nsTierChecks) GetSpaceRoleChecks(spaceRoles map[string][]string) ([]spaceRoleObjectsCheck, error) { + checks := []spaceRoleObjectsCheck{} + roles := 0 + rolebindings := 0 + for role, usernames := range spaceRoles { + switch role { + case "admin": + checks = append(checks, rbacEditRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + rbacEditRoleBinding(userName), + userEditRoleBinding(userName), + ) + rolebindings += 2 + } + default: + return nil, fmt.Errorf("unexpected template name: '%s'", role) + } + } + // also count the roles, rolebindings + checks = append(checks, + numberOfToolchainRoles(roles+1), // +1 for `exec-pods` + numberOfToolchainRoleBindings(rolebindings+2), // +2 for `crtadmin-pods` and `crtadmin-view` + ) + return checks, nil +} + +func (a *base1nsTierChecks) GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs { + templateRefs := GetTemplateRefs(t, hostAwait, a.tierName) + verifyNsTypes(t, a.tierName, templateRefs, "dev") + return templateRefs +} + +func (a *base1nsTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaDeployments("50"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(8), + idlers(43200, "dev")) +} + +type base1nsnoidlingTierChecks struct { + base1nsTierChecks +} + +func (a *base1nsnoidlingTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaDeployments("50"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(8), + idlers(0, "dev")) +} + +type base1ns6didlerTierChecks struct { + base1nsTierChecks +} + +func (a *base1ns6didlerTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaDeployments("50"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(8), + idlers(518400, "dev")) +} + +type baselargeTierChecks struct { + baseTierChecks +} + +func (a *baselargeTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaCompute(cpuLimit, "1750m", "16Gi", "40Gi"), + clusterResourceQuotaDeployments("50"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(9), + idlers(43200, "dev", "stage")) +} + +type baseextendedidlingTierChecks struct { + baseTierChecks +} + +func (a *baseextendedidlingTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaCompute(cpuLimit, "1750m", "7Gi", "40Gi"), + clusterResourceQuotaDeployments("50"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(9), + idlers(518400, "dev", "stage")) +} + +// toolchainLabelsWaitCriterion create a slice of LabelWaitCriterion containing all the required toolchain labels and values. +func toolchainLabelsWaitCriterion(userName string) []wait.LabelWaitCriterion { + var labelsWaitCriterion []wait.LabelWaitCriterion + for labelKey, labelValue := range toolchainLabels(userName) { + labelsWaitCriterion = append(labelsWaitCriterion, wait.UntilObjectHasLabel(labelKey, labelValue)) + } + return labelsWaitCriterion +} + +// toolchainLabels returns a map containing the expected kubernetes labels that a toolchain resource should have. +func toolchainLabels(userName string) map[string]string { + return map[string]string{ + toolchainv1alpha1.SpaceLabelKey: userName, + toolchainv1alpha1.ProviderLabelKey: toolchainv1alpha1.ProviderLabelValue, + } +} + +func commonNetworkPolicyChecks() []namespaceObjectsCheck { + return []namespaceObjectsCheck{ + networkPolicySameNamespace(), + networkPolicyAllowFromMonitoring(), + networkPolicyAllowFromIngress(), + networkPolicyAllowFromOlmNamespaces(), + networkPolicyAllowFromConsoleNamespaces(), + } +} + +type advancedTierChecks struct { + baseTierChecks +} + +func (a *advancedTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaCompute(cpuLimit, "1750m", "16Gi", "40Gi"), + clusterResourceQuotaDeployments("50"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(9), + idlers(0, "dev", "stage")) +} + +func (a *advancedTierChecks) GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs { + templateRefs := GetTemplateRefs(t, hostAwait, a.tierName) + verifyNsTypes(t, a.tierName, templateRefs, "dev", "stage") + return templateRefs +} + +// testTierChecks checks only that the "test" tier exists and has correct template references. +// It does not check the test tier resources +type testTierChecks struct { + tierName string +} + +func (a *testTierChecks) GetNamespaceObjectChecks(_ string) []namespaceObjectsCheck { + return []namespaceObjectsCheck{} +} + +func (a *testTierChecks) GetSpaceRoleChecks(_ map[string][]string) ([]spaceRoleObjectsCheck, error) { + return []spaceRoleObjectsCheck{}, nil +} + +func (a *testTierChecks) GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs { + templateRefs := GetTemplateRefs(t, hostAwait, a.tierName) + verifyNsTypes(t, a.tierName, templateRefs, "dev") + return templateRefs +} + +func (a *testTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return []clusterObjectsCheck{} +} + +type appstudioTierChecks struct { + tierName string +} + +func (a *appstudioTierChecks) GetNamespaceObjectChecks(_ string) []namespaceObjectsCheck { + checks := []namespaceObjectsCheck{ + resourceQuotaComputeDeploy("20", "32Gi", "1750m", "32Gi"), + resourceQuotaComputeBuild("60", "64Gi", "6", "32Gi"), + resourceQuotaStorage("50Gi", "50Gi", "50Gi", "12"), + limitRange("2", "2Gi", "10m", "256Mi"), + numberOfLimitRanges(1), + toolchainSaReadRole(), + memberOperatorSaReadRoleBinding(), + gitOpsServiceLabel(), + appstudioWorkSpaceNameLabel(), + environment("development"), + resourceQuotaAppstudioCrds("512", "512", "512"), + resourceQuotaAppstudioCrdsBuild("512"), + resourceQuotaAppstudioCrdsGitops("512", "512", "512", "512", "512"), + resourceQuotaAppstudioCrdsIntegration("512", "512", "512"), + resourceQuotaAppstudioCrdsRelease("512", "512", "512", "512", "512"), + resourceQuotaAppstudioCrdsEnterpriseContract("512"), + resourceQuotaAppstudioCrdsSPI("512", "512", "512", "512", "512"), + pipelineServiceAccount(), + pipelineRunnerRoleBinding(), + } + + checks = append(checks, append(commonNetworkPolicyChecks(), networkPolicyAllowFromCRW(), numberOfNetworkPolicies(6))...) + return checks +} + +func (a *appstudioTierChecks) GetSpaceRoleChecks(spaceRoles map[string][]string) ([]spaceRoleObjectsCheck, error) { + checks := []spaceRoleObjectsCheck{} + roles := 0 + rolebindings := 0 + for role, usernames := range spaceRoles { + switch role { + case "admin": + checks = append(checks, appstudioUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioUserActionsRoleBinding(userName, "admin"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + case "maintainer": + checks = append(checks, appstudioMaintainerUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioUserActionsRoleBinding(userName, "maintainer"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + case "contributor": + checks = append(checks, appstudioContributorUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioUserActionsRoleBinding(userName, "contributor"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + default: + return nil, fmt.Errorf("unexpected template name: '%s'", role) + } + } + // also count the roles, rolebindings and service accounts + checks = append(checks, + numberOfToolchainRoles(roles+1), // +1 for `toolchain-sa-read` + numberOfToolchainRoleBindings(rolebindings+2), // +2 for `member-operator-sa-read` and `appstudio-pipelines-runner-rolebinding` + ) + return checks, nil +} + +func (a *appstudioTierChecks) GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs { + templateRefs := GetTemplateRefs(t, hostAwait, a.tierName) + verifyNsTypes(t, a.tierName, templateRefs, "tenant") + return templateRefs +} + +func (a *appstudioTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaDeployments("150"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(8), + idlers(0, ""), + pipelineRunnerClusterRole()) +} + +type appstudioRedhatTierChecks struct { + tierName string +} + +func (a *appstudioRedhatTierChecks) GetNamespaceObjectChecks(_ string) []namespaceObjectsCheck { + checks := []namespaceObjectsCheck{ + resourceQuotaComputeDeploy("20", "32Gi", "1750m", "32Gi"), + resourceQuotaComputeBuild("60", "64Gi", "6", "32Gi"), + resourceQuotaStorage("50Gi", "50Gi", "50Gi", "12"), + limitRange("2", "2Gi", "10m", "256Mi"), + numberOfLimitRanges(1), + toolchainSaReadRole(), + memberOperatorSaReadRoleBinding(), + gitOpsServiceLabel(), + appstudioWorkSpaceNameLabel(), + redhatInternalTenantLabel(), + environment("development"), + resourceQuotaAppstudioCrds("512", "512", "512"), + resourceQuotaAppstudioCrdsBuild("512"), + resourceQuotaAppstudioCrdsGitops("512", "512", "512", "512", "512"), + resourceQuotaAppstudioCrdsIntegration("512", "512", "512"), + resourceQuotaAppstudioCrdsRelease("512", "512", "512", "512", "512"), + resourceQuotaAppstudioCrdsEnterpriseContract("512"), + resourceQuotaAppstudioCrdsSPI("512", "512", "512", "512", "512"), + pipelineServiceAccount(), + pipelineRunnerRoleBinding(), + } + + checks = append(checks, append(commonNetworkPolicyChecks(), networkPolicyAllowFromCRW(), numberOfNetworkPolicies(6))...) + return checks +} + +func (a *appstudioRedhatTierChecks) GetSpaceRoleChecks(spaceRoles map[string][]string) ([]spaceRoleObjectsCheck, error) { + checks := []spaceRoleObjectsCheck{} + roles := 0 + rolebindings := 0 + for role, usernames := range spaceRoles { + switch role { + case "admin": + checks = append(checks, appstudioUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioUserActionsRoleBinding(userName, "admin"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + case "maintainer": + checks = append(checks, appstudioMaintainerUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioUserActionsRoleBinding(userName, "maintainer"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + case "contributor": + checks = append(checks, appstudioContributorUserActionsRole()) + roles++ + for _, userName := range usernames { + checks = append(checks, + appstudioUserActionsRoleBinding(userName, "contributor"), + appstudioViewRoleBinding(userName), + ) + rolebindings += 2 + } + default: + return nil, fmt.Errorf("unexpected template name: '%s'", role) + } + } + // also count the roles, rolebindings and service accounts + checks = append(checks, + numberOfToolchainRoles(roles+1), // +1 for `toolchain-sa-read` + numberOfToolchainRoleBindings(rolebindings+2), // +2 for `member-operator-sa-read` and `appstudio-pipelines-runner-rolebinding` + ) + return checks, nil +} + +func (a *appstudioRedhatTierChecks) GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs { + templateRefs := GetTemplateRefs(t, hostAwait, a.tierName) + verifyNsTypes(t, a.tierName, templateRefs, "tenant") + return templateRefs +} + +func (a *appstudioRedhatTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaDeployments("150"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(8), + idlers(0, ""), + pipelineRunnerClusterRole()) +} + +type appstudioEnvTierChecks struct { + tierName string +} + +func (a *appstudioEnvTierChecks) GetNamespaceObjectChecks(_ string) []namespaceObjectsCheck { + checks := []namespaceObjectsCheck{ + resourceQuotaComputeDeploy("20", "32Gi", "1750m", "32Gi"), + zeroResourceQuotaComputeBuild(), + resourceQuotaStorage("50Gi", "50Gi", "50Gi", "12"), + limitRange("2", "2Gi", "10m", "256Mi"), + numberOfLimitRanges(1), + namespaceManagerSA(), + namespaceManagerSaEditRoleBinding(), + gitOpsServiceLabel(), + appstudioWorkSpaceNameLabel(), + } + + checks = append(checks, append(commonNetworkPolicyChecks(), networkPolicyAllowFromCRW(), numberOfNetworkPolicies(6))...) + return checks +} + +func (a *appstudioEnvTierChecks) GetSpaceRoleChecks(spaceRoles map[string][]string) ([]spaceRoleObjectsCheck, error) { + for role := range spaceRoles { + switch role { + case "admin": + // no permissions granted + case "maintainer": + // no permissions granted + case "contributor": + //no permissions granted + default: + return nil, fmt.Errorf("unexpected template name: '%s'", role) + } + } + // count the roles, rolebindings + return []spaceRoleObjectsCheck{ + numberOfToolchainRoles(0), + numberOfToolchainRoleBindings(1), // 1 for `namespace-manager` + }, nil +} + +func (a *appstudioEnvTierChecks) GetExpectedTemplateRefs(t *testing.T, hostAwait *wait.HostAwaitility) TemplateRefs { + templateRefs := GetTemplateRefs(t, hostAwait, a.tierName) + verifyNsTypes(t, a.tierName, templateRefs, "env") + return templateRefs +} + +func (a *appstudioEnvTierChecks) GetClusterObjectChecks() []clusterObjectsCheck { + return clusterObjectsChecks( + clusterResourceQuotaDeployments("150"), + clusterResourceQuotaReplicas(), + clusterResourceQuotaRoutes(), + clusterResourceQuotaJobs(), + clusterResourceQuotaServices(), + clusterResourceQuotaBuildConfig(), + clusterResourceQuotaSecrets(), + clusterResourceQuotaConfigMap(), + numberOfClusterResourceQuotas(8), + idlers(0, "env")) +} + +// verifyNsTypes checks that there's a namespace.TemplateRef that begins with `-` for each given templateRef (and no more, no less) +func verifyNsTypes(t *testing.T, tier string, templateRefs TemplateRefs, expectedNSTypes ...string) { + require.Len(t, templateRefs.Namespaces, len(expectedNSTypes)) + actualNSTypes := make([]string, len(expectedNSTypes)) + for i, templateRef := range templateRefs.Namespaces { + actualTier, actualType, err := wait.TierAndType(templateRef) + require.NoError(t, err) + require.Equal(t, tier, actualTier) + actualNSTypes[i] = actualType + } + // now compare with the given `nsTypes` + assert.ElementsMatch(t, expectedNSTypes, actualNSTypes) +} + +type namespaceObjectsCheck func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) + +type spaceRoleObjectsCheck func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) + +type clusterObjectsCheck func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) + +func userEditRoleBinding(userName string) spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rb, err := memberAwait.WaitForRoleBinding(t, ns, userName+"-edit", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "User", rb.Subjects[0].Kind) + assert.Equal(t, userName, rb.Subjects[0].Name) + assert.Equal(t, "edit", rb.RoleRef.Name) + assert.Equal(t, "ClusterRole", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func rbacEditRoleBinding(userName string) spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rb, err := memberAwait.WaitForRoleBinding(t, ns, userName+"-rbac-edit", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "User", rb.Subjects[0].Kind) + assert.Equal(t, userName, rb.Subjects[0].Name) + assert.Equal(t, "rbac-edit", rb.RoleRef.Name) + assert.Equal(t, "Role", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func crtadminViewRoleBinding() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rb, err := memberAwait.WaitForRoleBinding(t, ns, "crtadmin-view", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "Group", rb.Subjects[0].Kind) + assert.Equal(t, "crtadmin-users-view", rb.Subjects[0].Name) + assert.Equal(t, "view", rb.RoleRef.Name) + assert.Equal(t, "ClusterRole", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func crtadminPodsRoleBinding() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { + rb, err := memberAwait.WaitForRoleBinding(t, ns, "crtadmin-pods", toolchainLabelsWaitCriterion(userName)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "Group", rb.Subjects[0].Kind) + assert.Equal(t, "crtadmin-users-view", rb.Subjects[0].Name) + assert.Equal(t, "exec-pods", rb.RoleRef.Name) + assert.Equal(t, "Role", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func execPodsRole() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { + role, err := memberAwait.WaitForRole(t, ns, "exec-pods", toolchainLabelsWaitCriterion(userName)...) + require.NoError(t, err) + assert.Len(t, role.Rules, 1) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"pods/exec"}, + Verbs: []string{"get", "list", "watch", "create", "delete", "update"}, + }, + }, + } + + assert.Equal(t, expected.Rules, role.Rules) + } +} + +func rbacEditRole() spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + role, err := memberAwait.WaitForRole(t, ns, "rbac-edit", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, role.Rules, 1) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"authorization.openshift.io", "rbac.authorization.k8s.io"}, + Resources: []string{"roles", "rolebindings"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + }, + } + + assert.Equal(t, expected.Rules, role.Rules) + } +} + +func resourceQuotaComputeDeploy(cpuLimit, memoryLimit, cpuRequest, memoryRequest string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeNotTerminating}, + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard[corev1.ResourceLimitsCPU], err = resource.ParseQuantity(cpuLimit) + require.NoError(t, err) + spec.Hard[corev1.ResourceLimitsMemory], err = resource.ParseQuantity(memoryLimit) + require.NoError(t, err) + spec.Hard[corev1.ResourceRequestsCPU], err = resource.ParseQuantity(cpuRequest) + require.NoError(t, err) + spec.Hard[corev1.ResourceRequestsMemory], err = resource.ParseQuantity(memoryRequest) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "compute-deploy", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "compute-deploy", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaComputeBuild(cpuLimit, memoryLimit, cpuRequest, memoryRequest string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeTerminating}, + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard[corev1.ResourceLimitsCPU], err = resource.ParseQuantity(cpuLimit) + require.NoError(t, err) + spec.Hard[corev1.ResourceLimitsMemory], err = resource.ParseQuantity(memoryLimit) + require.NoError(t, err) + spec.Hard[corev1.ResourceRequestsCPU], err = resource.ParseQuantity(cpuRequest) + require.NoError(t, err) + spec.Hard[corev1.ResourceRequestsMemory], err = resource.ParseQuantity(memoryRequest) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "compute-build", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "compute-build", criteria) + require.NoError(t, err) + } +} + +func zeroResourceQuotaComputeBuild() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Scopes: []corev1.ResourceQuotaScope{corev1.ResourceQuotaScopeTerminating}, + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard[corev1.ResourceCPU], err = resource.ParseQuantity("0") + require.NoError(t, err) + spec.Hard[corev1.ResourceMemory], err = resource.ParseQuantity("0") + require.NoError(t, err) + spec.Hard[corev1.ResourcePods], err = resource.ParseQuantity("0") + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "compute-build", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "compute-build", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaStorage(ephemeralLimit, storageRequest, ephemeralRequest, pvcs string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard[corev1.ResourceLimitsEphemeralStorage], err = resource.ParseQuantity(ephemeralLimit) + require.NoError(t, err) + spec.Hard[corev1.ResourceRequestsStorage], err = resource.ParseQuantity(storageRequest) + require.NoError(t, err) + spec.Hard[corev1.ResourceRequestsEphemeralStorage], err = resource.ParseQuantity(ephemeralRequest) + require.NoError(t, err) + spec.Hard[count(corev1.ResourcePersistentVolumeClaims)], err = resource.ParseQuantity(pvcs) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "storage", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "storage", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaAppstudioCrds(applicationsLimit, componentsLimit, componentDetectionQueriesLimit string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard["count/applications.appstudio.redhat.com"], err = resource.ParseQuantity(applicationsLimit) + require.NoError(t, err) + spec.Hard["count/components.appstudio.redhat.com"], err = resource.ParseQuantity(componentsLimit) + require.NoError(t, err) + spec.Hard["count/componentdetectionqueries.appstudio.redhat.com"], err = resource.ParseQuantity(componentDetectionQueriesLimit) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "appstudio-crds", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "appstudio-crds", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaAppstudioCrdsBuild(buildpipelineselectorsLimit string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard["count/buildpipelineselectors.appstudio.redhat.com"], err = resource.ParseQuantity(buildpipelineselectorsLimit) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "appstudio-crds-build", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "appstudio-crds-build", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaAppstudioCrdsGitops(environmentsLimit, promotionrunsLimit, deploymenttargetclaimsLimit, deploymenttargetclassesLimit, deploymenttargetsLimit string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard["count/environments.appstudio.redhat.com"], err = resource.ParseQuantity(environmentsLimit) + require.NoError(t, err) + spec.Hard["count/promotionruns.appstudio.redhat.com"], err = resource.ParseQuantity(promotionrunsLimit) + require.NoError(t, err) + spec.Hard["count/deploymenttargetclaims.appstudio.redhat.com"], err = resource.ParseQuantity(deploymenttargetclaimsLimit) + require.NoError(t, err) + spec.Hard["count/deploymenttargetclasses.appstudio.redhat.com"], err = resource.ParseQuantity(deploymenttargetclassesLimit) + require.NoError(t, err) + spec.Hard["count/deploymenttargets.appstudio.redhat.com"], err = resource.ParseQuantity(deploymenttargetsLimit) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "appstudio-crds-gitops", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "appstudio-crds-gitops", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaAppstudioCrdsIntegration(integrationtestscenariosLimit, snapshotsLimit, snapshotenvironmentbindingsLimit string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard["count/integrationtestscenarios.appstudio.redhat.com"], err = resource.ParseQuantity(integrationtestscenariosLimit) + require.NoError(t, err) + spec.Hard["count/snapshots.appstudio.redhat.com"], err = resource.ParseQuantity(snapshotsLimit) + require.NoError(t, err) + spec.Hard["count/snapshotenvironmentbindings.appstudio.redhat.com"], err = resource.ParseQuantity(snapshotenvironmentbindingsLimit) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "appstudio-crds-integration", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "appstudio-crds-integration", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaAppstudioCrdsRelease(releaseplanadmissionsLimit, releaseplansLimit, releasesLimit, releasestrategiesLimit, internalrequestsLimit string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard["count/releaseplanadmissions.appstudio.redhat.com"], err = resource.ParseQuantity(releaseplanadmissionsLimit) + require.NoError(t, err) + spec.Hard["count/releaseplans.appstudio.redhat.com"], err = resource.ParseQuantity(releaseplansLimit) + require.NoError(t, err) + spec.Hard["count/releases.appstudio.redhat.com"], err = resource.ParseQuantity(releasesLimit) + require.NoError(t, err) + spec.Hard["count/releasestrategies.appstudio.redhat.com"], err = resource.ParseQuantity(releasestrategiesLimit) + require.NoError(t, err) + spec.Hard["count/internalrequests.appstudio.redhat.com"], err = resource.ParseQuantity(internalrequestsLimit) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "appstudio-crds-release", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "appstudio-crds-release", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaAppstudioCrdsEnterpriseContract(enterprisecontractpoliciesLimit string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard["count/enterprisecontractpolicies.appstudio.redhat.com"], err = resource.ParseQuantity(enterprisecontractpoliciesLimit) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "appstudio-crds-enterprisecontract", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "appstudio-crds-enterprisecontract", criteria) + require.NoError(t, err) + } +} + +func resourceQuotaAppstudioCrdsSPI(spiaccesschecksLimit, spiaccesstokenbindingsLimit, spiaccesstokendataupdatesLimit, spiaccesstokensLimit, spifilecontentrequestsLimit string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + var err error + spec := corev1.ResourceQuotaSpec{ + Hard: make(map[corev1.ResourceName]resource.Quantity), + } + spec.Hard["count/spiaccesschecks.appstudio.redhat.com"], err = resource.ParseQuantity(spiaccesschecksLimit) + require.NoError(t, err) + spec.Hard["count/spiaccesstokenbindings.appstudio.redhat.com"], err = resource.ParseQuantity(spiaccesstokenbindingsLimit) + require.NoError(t, err) + spec.Hard["count/spiaccesstokendataupdates.appstudio.redhat.com"], err = resource.ParseQuantity(spiaccesstokendataupdatesLimit) + require.NoError(t, err) + spec.Hard["count/spiaccesstokens.appstudio.redhat.com"], err = resource.ParseQuantity(spiaccesstokensLimit) + require.NoError(t, err) + spec.Hard["count/spifilecontentrequests.appstudio.redhat.com"], err = resource.ParseQuantity(spifilecontentrequestsLimit) + require.NoError(t, err) + + criteria := resourceQuotaMatches(ns.Name, "appstudio-crds-spi", spec) + _, err = memberAwait.WaitForResourceQuota(t, ns.Name, "appstudio-crds-spi", criteria) + require.NoError(t, err) + } +} + +func limitRange(cpuLimit, memoryLimit, cpuRequest, memoryRequest string) namespaceObjectsCheck { // nolint:unparam + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { + lr, err := memberAwait.WaitForLimitRange(t, ns, "resource-limits") + require.NoError(t, err) + def := make(map[corev1.ResourceName]resource.Quantity) + def[corev1.ResourceCPU], err = resource.ParseQuantity(cpuLimit) + require.NoError(t, err) + def[corev1.ResourceMemory], err = resource.ParseQuantity(memoryLimit) + require.NoError(t, err) + defReq := make(map[corev1.ResourceName]resource.Quantity) + defReq[corev1.ResourceCPU], err = resource.ParseQuantity(cpuRequest) + require.NoError(t, err) + defReq[corev1.ResourceMemory], err = resource.ParseQuantity(memoryRequest) + require.NoError(t, err) + assert.Equal(t, toolchainv1alpha1.ProviderLabelValue, lr.ObjectMeta.Labels[toolchainv1alpha1.ProviderLabelKey]) + expected := &corev1.LimitRange{ + Spec: corev1.LimitRangeSpec{ + Limits: []corev1.LimitRangeItem{ + { + Type: "Container", + Default: def, + DefaultRequest: defReq, + }, + }, + }, + } + + assert.Equal(t, expected.Spec, lr.Spec) + } +} + +func externalSecretSnykSharedToken() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { + es, err := memberAwait.WaitForExternalSecret(t, ns, "snyk-shared-token") + require.NoError(t, err) + assert.Equal(t, toolchainv1alpha1.ProviderLabelValue, es.ObjectMeta.Labels[toolchainv1alpha1.ProviderLabelKey]) + expected := &esv1beta1.ExternalSecret{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: ExternalSecretName, + // Namespace: ExternalSecretNamespace, + // Annotations: map[string]string{ + // "argocd.argoproj.io/sync-options": "SkipDryRunOnMissingResource=true", + // "argocd.argoproj.io/sync-wave": "-1", + // }, + // }, + Spec: esv1beta1.ExternalSecretSpec{ + DataFrom: []esv1beta1.ExternalSecretDataFromRemoteRef{ + { + Extract: &esv1beta1.ExternalSecretDataRemoteRef{ + Key: "snyk-shared-secret", + }, + }, + }, + //RefreshInterval: &metav1.Duration{Duration: time.Second} + //RefreshInterval: &metav1.Duration{Duration: 1h} + SecretStoreRef: esv1beta1.SecretStoreRef{ + Name: "appsre-redhat-tenant-vault", + }, + Target: esv1beta1.ExternalSecretTarget{ + Name: "snyk-secret", + }, + }, + Spec: esv1beta1.ExternalSecretSpec{ + Ingress: []etv1.NetworkPolicyIngressRule{ + { + From: []netv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{}, + }, + }, + }, + }, + PolicyTypes: []netv1.PolicyType{netv1.PolicyTypeIngress}, + }, + } + + assert.Equal(t, expected.Spec, es.Spec) + } +} + +func networkPolicySameNamespace() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { + np, err := memberAwait.WaitForNetworkPolicy(t, ns, "allow-same-namespace") + require.NoError(t, err) + assert.Equal(t, toolchainv1alpha1.ProviderLabelValue, np.ObjectMeta.Labels[toolchainv1alpha1.ProviderLabelKey]) + expected := &netv1.NetworkPolicy{ + Spec: netv1.NetworkPolicySpec{ + Ingress: []netv1.NetworkPolicyIngressRule{ + { + From: []netv1.NetworkPolicyPeer{ + { + PodSelector: &metav1.LabelSelector{}, + }, + }, + }, + }, + PolicyTypes: []netv1.PolicyType{netv1.PolicyTypeIngress}, + }, + } + + assert.Equal(t, expected.Spec, np.Spec) + } +} + +func networkPolicyAllowFromOtherNamespace(otherNamespaceKinds ...string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { + var networkPolicyPeers []netv1.NetworkPolicyPeer + for _, other := range otherNamespaceKinds { + networkPolicyPeers = append(networkPolicyPeers, netv1.NetworkPolicyPeer{ + NamespaceSelector: &metav1.LabelSelector{ + MatchLabels: map[string]string{ + "name": fmt.Sprintf("%s-%s", userName, other), + }, + }, + }) + } + + np, err := memberAwait.WaitForNetworkPolicy(t, ns, "allow-from-other-user-namespaces") + require.NoError(t, err) + expected := &netv1.NetworkPolicy{ + Spec: netv1.NetworkPolicySpec{ + PodSelector: metav1.LabelSelector{}, + Ingress: []netv1.NetworkPolicyIngressRule{ + { + From: networkPolicyPeers, + }, + }, + PolicyTypes: []netv1.PolicyType{netv1.PolicyTypeIngress}, + }, + } + + assert.Equal(t, expected.Spec, np.Spec) + } +} + +func networkPolicyAllowFromIngress() namespaceObjectsCheck { + return networkPolicyIngressFromPolicyGroup("allow-from-openshift-ingress", "ingress") +} + +func networkPolicyAllowFromMonitoring() namespaceObjectsCheck { + return networkPolicyIngressFromPolicyGroup("allow-from-openshift-monitoring", "monitoring") +} + +func networkPolicyAllowFromOlmNamespaces() namespaceObjectsCheck { + return networkPolicyIngress("allow-from-olm-namespaces", "openshift.io/scc", "anyuid") +} + +func networkPolicyAllowFromConsoleNamespaces() namespaceObjectsCheck { + return networkPolicyIngressFromPolicyGroup("allow-from-console-namespaces", "console") +} + +func networkPolicyAllowFromCRW() namespaceObjectsCheck { + return networkPolicyIngressFromPolicyGroup("allow-from-codeready-workspaces-operator", "codeready-workspaces") +} + +func networkPolicyIngressFromPolicyGroup(name, group string) namespaceObjectsCheck { + return networkPolicyIngress(name, "network.openshift.io/policy-group", group) +} + +func networkPolicyIngress(name, labelName, labelValue string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, userName string) { + np, err := memberAwait.WaitForNetworkPolicy(t, ns, name) + require.NoError(t, err) + assert.Equal(t, toolchainv1alpha1.ProviderLabelValue, np.ObjectMeta.Labels[toolchainv1alpha1.ProviderLabelKey]) + expected := &netv1.NetworkPolicy{ + Spec: netv1.NetworkPolicySpec{ + Ingress: []netv1.NetworkPolicyIngressRule{ + { + From: []netv1.NetworkPolicyPeer{ + { + NamespaceSelector: &metav1.LabelSelector{MatchLabels: map[string]string{labelName: labelValue}}, + }, + }, + }, + }, + PolicyTypes: []netv1.PolicyType{netv1.PolicyTypeIngress}, + }, + } + + assert.Equal(t, expected.Spec, np.Spec) + } +} + +type clusterObjectsCheckCreator func() clusterObjectsCheck + +func clusterObjectsChecks(checkCreator ...clusterObjectsCheckCreator) []clusterObjectsCheck { + var checks []clusterObjectsCheck + for _, createCheck := range checkCreator { + checks = append(checks, createCheck()) + } + return checks +} + +func idlers(timeoutSeconds int, namespaceTypes ...string) clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + idlerWaitCriterion := []wait.IdlerWaitCriterion{ + wait.IdlerHasTier(tierLabel), + wait.IdlerHasTimeoutSeconds(timeoutSeconds), + } + // cast generic labels wait criterion into idler wait criterion + for expectedKey, expectedValue := range toolchainLabels(userName) { + idlerWaitCriterion = append(idlerWaitCriterion, wait.IdlerHasLabel(expectedKey, expectedValue)) + } + for _, nt := range namespaceTypes { + var idlerName string + if nt == "" { + idlerName = userName + } else { + idlerName = fmt.Sprintf("%s-%s", userName, nt) + } + _, err := memberAwait.WaitForIdler(t, idlerName, idlerWaitCriterion...) + require.NoError(t, err) + } + + // Make sure there is no unexpected idlers + idlers := &toolchainv1alpha1.IdlerList{} + err := memberAwait.Client.List(context.TODO(), idlers, + client.MatchingLabels(map[string]string{ + toolchainv1alpha1.ProviderLabelKey: toolchainv1alpha1.ProviderLabelValue, + toolchainv1alpha1.SpaceLabelKey: userName, + })) + require.NoError(t, err) + assert.Len(t, idlers.Items, len(namespaceTypes)) + } + } +} + +func clusterResourceQuotaCompute(cpuLimit, cpuRequest, memoryLimit, storageLimit string) clusterObjectsCheckCreator { // nolint:unparam + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[corev1.ResourceLimitsCPU], err = resource.ParseQuantity(cpuLimit) + require.NoError(t, err) + hard[corev1.ResourceLimitsMemory], err = resource.ParseQuantity(memoryLimit) + require.NoError(t, err) + hard[corev1.ResourceLimitsEphemeralStorage], err = resource.ParseQuantity("7Gi") + require.NoError(t, err) + hard[corev1.ResourceRequestsCPU], err = resource.ParseQuantity(cpuRequest) + require.NoError(t, err) + hard[corev1.ResourceRequestsMemory], err = resource.ParseQuantity(memoryLimit) + require.NoError(t, err) + hard[corev1.ResourceRequestsStorage], err = resource.ParseQuantity(storageLimit) + require.NoError(t, err) + hard[corev1.ResourceRequestsEphemeralStorage], err = resource.ParseQuantity("7Gi") + require.NoError(t, err) + hard[count(corev1.ResourcePersistentVolumeClaims)], err = resource.ParseQuantity("5") + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-compute", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +// crqToolchainLabelsWaitCriterion checks that expected labels are set on the ClusterResourceQuota. +func crqToolchainLabelsWaitCriterion(userName string) wait.ClusterResourceQuotaWaitCriterion { + return wait.ClusterResourceQuotaWaitCriterion{ + Match: func(actual *quotav1.ClusterResourceQuota) bool { + for expectedLabelKey, expectedLabelValue := range toolchainLabels(userName) { + actualVal, found := actual.Labels[expectedLabelKey] + if !found || expectedLabelValue != actualVal { + return false + } + } + // all expected labels are matching + return true + }, + Diff: func(actual *quotav1.ClusterResourceQuota) string { + return fmt.Sprintf("unable to match expected labels on ClusterResourceQuota: %s.\n%s", actual.Name, wait.Diff(toolchainLabels(userName), actual.GetLabels())) + }, + } +} + +func clusterResourceQuotaDeployments(pods string) clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[count("deployments.apps")], err = resource.ParseQuantity("30") + require.NoError(t, err) + hard[count("deploymentconfigs.apps")], err = resource.ParseQuantity("30") + require.NoError(t, err) + hard[count(corev1.ResourcePods)], err = resource.ParseQuantity(pods) + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-deployments", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +func clusterResourceQuotaReplicas() clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[count("replicasets.apps")], err = resource.ParseQuantity("30") + require.NoError(t, err) + hard[count(corev1.ResourceReplicationControllers)], err = resource.ParseQuantity("30") + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-replicas", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +func clusterResourceQuotaRoutes() clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[count("routes.route.openshift.io")], err = resource.ParseQuantity("30") + require.NoError(t, err) + hard[count("ingresses.extensions")], err = resource.ParseQuantity("30") + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-routes", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +func clusterResourceQuotaJobs() clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[count("daemonsets.apps")], err = resource.ParseQuantity("30") + require.NoError(t, err) + hard[count("statefulsets.apps")], err = resource.ParseQuantity("30") + require.NoError(t, err) + hard[count("jobs.batch")], err = resource.ParseQuantity("30") + require.NoError(t, err) + hard[count("cronjobs.batch")], err = resource.ParseQuantity("30") + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-jobs", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +func clusterResourceQuotaServices() clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[count(corev1.ResourceServices)], err = resource.ParseQuantity("30") + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-services", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +func clusterResourceQuotaBuildConfig() clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[count("buildconfigs.build.openshift.io")], err = resource.ParseQuantity("30") + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-bc", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +func clusterResourceQuotaSecrets() clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[count(corev1.ResourceSecrets)], err = resource.ParseQuantity("100") + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-secrets", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +func clusterResourceQuotaConfigMap() clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + var err error + hard := make(map[corev1.ResourceName]resource.Quantity) + hard[count(corev1.ResourceConfigMaps)], err = resource.ParseQuantity("100") + require.NoError(t, err) + + _, err = memberAwait.WaitForClusterResourceQuota(t, fmt.Sprintf("for-%s-cm", userName), + crqToolchainLabelsWaitCriterion(userName), + clusterResourceQuotaMatches(userName, tierLabel, hard), + ) + require.NoError(t, err) + } + } +} + +func clusterResourceQuotaMatches(userName, tierName string, hard map[corev1.ResourceName]resource.Quantity) wait.ClusterResourceQuotaWaitCriterion { + return wait.ClusterResourceQuotaWaitCriterion{ + Match: func(actual *quotav1.ClusterResourceQuota) bool { + expectedQuotaSpec := quotav1.ClusterResourceQuotaSpec{ + Selector: quotav1.ClusterResourceQuotaSelector{ + AnnotationSelector: map[string]string{ + "openshift.io/requester": userName, + }, + }, + Quota: corev1.ResourceQuotaSpec{ + Hard: hard, + }, + } + return actual.Labels != nil && tierName == actual.Labels["toolchain.dev.openshift.com/tier"] && + reflect.DeepEqual(expectedQuotaSpec, actual.Spec) + }, + Diff: func(actual *quotav1.ClusterResourceQuota) string { + return fmt.Sprintf("expected ClusterResourceQuota to match for %s/%s: %s", userName, tierName, wait.Diff(hard, actual.Spec.Quota.Hard)) + }, + } +} + +func resourceQuotaMatches(namespace, name string, spec corev1.ResourceQuotaSpec) wait.ResourceQuotaWaitCriterion { + return wait.ResourceQuotaWaitCriterion{ + Match: func(actual *corev1.ResourceQuota) bool { + expectedQuotaSpec := spec + return reflect.DeepEqual(expectedQuotaSpec, actual.Spec) + }, + Diff: func(actual *corev1.ResourceQuota) string { + return fmt.Sprintf("expected ResourceQuota to match (namespace=%s, name=%s):\n%s", namespace, name, wait.Diff(spec, actual.Spec)) + }, + } +} + +func count(resource corev1.ResourceName) corev1.ResourceName { + return corev1.ResourceName(fmt.Sprintf("count/%s", resource)) +} + +func numberOfToolchainRoles(number int) spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + roles := &rbacv1.RoleList{} + err := memberAwait.WaitForExpectedNumberOfResources(t, ns.Name, "Roles", number, func() (int, error) { + err := memberAwait.Client.List(context.TODO(), roles, providerMatchingLabels, client.InNamespace(ns.Name)) + require.NoError(t, err) + return len(roles.Items), err + }) + if err != nil { + rs := make([]string, len(roles.Items)) + for i, r := range roles.Items { + rs[i] = r.Name + } + t.Logf("found %d roles: %s", len(roles.Items), spew.Sdump(rs)) + if nsTmplSet, err := memberAwait.WaitForNSTmplSet(t, owner); err == nil { + t.Logf("associated NSTemplateSet: %s", spew.Sdump(nsTmplSet)) + } + } + require.NoError(t, err) + } +} + +func numberOfToolchainRoleBindings(number int) spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + roleBindings := &rbacv1.RoleBindingList{} + err := memberAwait.WaitForExpectedNumberOfResources(t, ns.Name, "RoleBindings", number, func() (int, error) { + err := memberAwait.Client.List(context.TODO(), roleBindings, providerMatchingLabels, client.InNamespace(ns.Name)) + require.NoError(t, err) + return len(roleBindings.Items), err + }) + if err != nil { + rbs := make([]string, len(roleBindings.Items)) + for i, rb := range roleBindings.Items { + rbs[i] = rb.Name + } + t.Logf("found %d role bindings: %s", len(roleBindings.Items), spew.Sdump(rbs)) + if nsTmplSet, err := memberAwait.WaitForNSTmplSet(t, owner); err == nil { + t.Logf("associated NSTemplateSet: %s", spew.Sdump(nsTmplSet)) + } + } + require.NoError(t, err) + } +} + +func numberOfLimitRanges(number int) namespaceObjectsCheck { // nolint:unparam + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + err := memberAwait.WaitForExpectedNumberOfResources(t, ns.Name, "LimitRanges", number, func() (int, error) { + limitRanges := &corev1.LimitRangeList{} + err := memberAwait.Client.List(context.TODO(), limitRanges, providerMatchingLabels, client.InNamespace(ns.Name)) + require.NoError(t, err) + return len(limitRanges.Items), err + }) + require.NoError(t, err) + } +} + +func numberOfNetworkPolicies(number int) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + err := memberAwait.WaitForExpectedNumberOfResources(t, ns.Name, "NetworkPolicies", number, func() (int, error) { + nps := &netv1.NetworkPolicyList{} + err := memberAwait.Client.List(context.TODO(), nps, providerMatchingLabels, client.InNamespace(ns.Name)) + require.NoError(t, err) + return len(nps.Items), err + }) + require.NoError(t, err) + } +} + +func numberOfClusterResourceQuotas(number int) clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + err := memberAwait.WaitForExpectedNumberOfClusterResources(t, "ClusterResourceQuotas", number, func() (int, error) { + quotas := "av1.ClusterResourceQuotaList{} + matchingLabels := client.MatchingLabels(map[string]string{ // make sure we only list the ClusterResourceQuota resources associated with the given "userName" + toolchainv1alpha1.ProviderLabelKey: toolchainv1alpha1.ProviderLabelValue, + toolchainv1alpha1.SpaceLabelKey: userName, + }) + err := memberAwait.Client.List(context.TODO(), quotas, matchingLabels) + require.NoError(t, err) + return len(quotas.Items), err + }) + require.NoError(t, err) + } + } +} + +// Appstudio tier specific objects + +func gitOpsServiceLabel() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, _ string) { + // TODO fix for migration/existing namespaces cases + labelWaitCriterion := []wait.LabelWaitCriterion{} + if !strings.HasPrefix(ns.Name, "migration-") { + labelWaitCriterion = append(labelWaitCriterion, wait.UntilObjectHasLabel("argocd.argoproj.io/managed-by", "gitops-service-argocd")) + } + _, err := memberAwait.WaitForNamespaceWithName(t, ns.Name, labelWaitCriterion...) + require.NoError(t, err) + } +} + +func appstudioWorkSpaceNameLabel() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + + labelWaitCriterion := []wait.LabelWaitCriterion{} + labelWaitCriterion = append(labelWaitCriterion, wait.UntilObjectHasLabel("appstudio.redhat.com/workspace_name", owner)) + + _, err := memberAwait.WaitForNamespaceWithName(t, ns.Name, labelWaitCriterion...) + require.NoError(t, err) + } +} + +func redhatInternalTenantLabel() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + + labelWaitCriterion := []wait.LabelWaitCriterion{} + labelWaitCriterion = append(labelWaitCriterion, wait.UntilObjectHasLabel("redhat-internal-tenent/label", "true")) + + _, err := memberAwait.WaitForNamespaceWithName(t, ns.Name, labelWaitCriterion...) + require.NoError(t, err) + } +} + +func environment(name string) namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + _, err := memberAwait.WaitForEnvironment(t, ns.Name, name, toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + } +} + +func appstudioUserActionsRole() spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + role, err := memberAwait.WaitForRole(t, ns, "appstudio-user-actions", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, role.Rules, 14) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"applications", "components", "componentdetectionqueries"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"promotionruns", "snapshotenvironmentbindings", "snapshots", "environments"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"deploymenttargets", "deploymenttargetclaims"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"managed-gitops.redhat.com"}, + Resources: []string{"gitopsdeployments", "gitopsdeploymentmanagedenvironments", "gitopsdeploymentrepositorycredentials", "gitopsdeploymentsyncruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"tekton.dev"}, + Resources: []string{"pipelineruns"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"results.tekton.dev"}, + Resources: []string{"results", "records", "logs"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"enterprisecontractpolicies", "integrationtestscenarios", "releases", "releasestrategies", "releaseplans", "releaseplanadmissions"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"jvmbuildservice.io"}, + Resources: []string{"jbsconfigs", "artifactbuilds"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"spiaccesstokenbindings", "spiaccesschecks", "spiaccesstokens", "spifilecontentrequests", "spiaccesstokendataupdates"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"remotesecrets"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"secrets"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"buildpipelineselectors"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch", "delete"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"serviceaccounts"}, + ResourceNames: []string{"appstudio-pipeline"}, + Verbs: []string{"get", "list", "watch", "update", "patch"}, + }, + }, + } + + assert.Equal(t, expected.Rules, role.Rules) + } +} + +func appstudioMaintainerUserActionsRole() spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + role, err := memberAwait.WaitForRole(t, ns, "appstudio-maintainer-user-actions", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, role.Rules, 15) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"applications", "components", "componentdetectionqueries"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"promotionruns", "snapshotenvironmentbindings", "snapshots", "environments"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"deploymenttargets", "deploymenttargetclaims"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"managed-gitops.redhat.com"}, + Resources: []string{"gitopsdeployments", "gitopsdeploymentmanagedenvironments", "gitopsdeploymentrepositorycredentials", "gitopsdeploymentsyncruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"tekton.dev"}, + Resources: []string{"pipelineruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"results.tekton.dev"}, + Resources: []string{"results", "records", "logs"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"integrationtestscenarios"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"enterprisecontractpolicies"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"releases", "releasestrategies", "releaseplans"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"releaseplanadmissions"}, + Verbs: []string{"*"}, + }, + { + APIGroups: []string{"jvmbuildservice.io"}, + Resources: []string{"jbsconfigs", "artifactbuilds"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"spiaccesstokenbindings", "spiaccesschecks", "spiaccesstokens", "spifilecontentrequests", "spiaccesstokendataupdates"}, + Verbs: []string{"get", "list", "watch", "create", "update", "patch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"remotesecrets"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"buildpipelineselectors"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + } + + assert.Equal(t, expected.Rules, role.Rules) + } +} + +func appstudioContributorUserActionsRole() spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + role, err := memberAwait.WaitForRole(t, ns, "appstudio-contributor-user-actions", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, role.Rules, 15) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"applications", "components", "componentdetectionqueries"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"promotionruns", "snapshotenvironmentbindings", "snapshots", "environments"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"deploymenttargets", "deploymenttargetclaims"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"managed-gitops.redhat.com"}, + Resources: []string{"gitopsdeployments", "gitopsdeploymentmanagedenvironments", "gitopsdeploymentrepositorycredentials", "gitopsdeploymentsyncruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"tekton.dev"}, + Resources: []string{"pipelineruns"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"results.tekton.dev"}, + Resources: []string{"results", "records", "logs"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"integrationtestscenarios"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"enterprisecontractpolicies"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"releases", "releasestrategies", "releaseplans"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"releaseplanadmissions"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"jvmbuildservice.io"}, + Resources: []string{"jbsconfigs", "artifactbuilds"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"spiaccesstokenbindings", "spiaccesschecks", "spiaccesstokens", "spifilecontentrequests"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"remotesecrets"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"configmaps"}, + Verbs: []string{"get", "list", "watch"}, + }, + { + APIGroups: []string{"appstudio.redhat.com"}, + Resources: []string{"buildpipelineselectors"}, + Verbs: []string{"get", "list", "watch"}, + }, + }, + } + + assert.Equal(t, expected.Rules, role.Rules) + } +} + +func appstudioUserActionsRoleBinding(userName string, role string) spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rbName := "" + roleName := "" + if role == "admin" { + roleName = "appstudio-user-actions" + rbName = fmt.Sprintf("appstudio-%s-actions-user", userName) + } else { + roleName = fmt.Sprintf("appstudio-%s-user-actions", role) + rbName = fmt.Sprintf("appstudio-%s-%s-actions-user", role, userName) + } + rb, err := memberAwait.WaitForRoleBinding(t, ns, rbName, toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "User", rb.Subjects[0].Kind) + assert.Equal(t, userName, rb.Subjects[0].Name) + assert.Equal(t, roleName, rb.RoleRef.Name) + assert.Equal(t, "Role", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func appstudioViewRoleBinding(userName string) spaceRoleObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rb, err := memberAwait.WaitForRoleBinding(t, ns, fmt.Sprintf("appstudio-%s-view-user", userName), toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "User", rb.Subjects[0].Kind) + assert.Equal(t, userName, rb.Subjects[0].Name) + assert.Equal(t, "view", rb.RoleRef.Name) + assert.Equal(t, "ClusterRole", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func memberOperatorSaReadRoleBinding() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rb, err := memberAwait.WaitForRoleBinding(t, ns, "member-operator-sa-read", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "Group", rb.Subjects[0].Kind) + assert.Equal(t, "system:serviceaccounts:"+memberAwait.Namespace, rb.Subjects[0].Name) + assert.Equal(t, "rbac.authorization.k8s.io", rb.Subjects[0].APIGroup) + assert.Equal(t, "toolchain-sa-read", rb.RoleRef.Name) + assert.Equal(t, "Role", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func namespaceManagerSaEditRoleBinding() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rb, err := memberAwait.WaitForRoleBinding(t, ns, toolchainv1alpha1.AdminServiceAccountName, toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "ServiceAccount", rb.Subjects[0].Kind) + assert.Equal(t, toolchainv1alpha1.AdminServiceAccountName, rb.Subjects[0].Name) + assert.Equal(t, "edit", rb.RoleRef.Name) + assert.Equal(t, "ClusterRole", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func toolchainSaReadRole() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + role, err := memberAwait.WaitForRole(t, ns, "toolchain-sa-read", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + expected := &rbacv1.Role{ + Rules: []rbacv1.PolicyRule{ + { + APIGroups: []string{""}, + Resources: []string{"secrets", "serviceaccounts"}, + Verbs: []string{"get", "list"}, + }, + { + APIGroups: []string{""}, + Resources: []string{"serviceaccounts/token"}, + Verbs: []string{"create"}, + }, + }, + } + + assert.Equal(t, expected.Rules, role.Rules) + } +} + +func namespaceManagerSA() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + _, err := memberAwait.WaitForServiceAccount(t, ns.Name, toolchainv1alpha1.AdminServiceAccountName) + require.NoError(t, err) + // fixme: decide if we want to check labels also on serviceaccounts + //assertExpectedToolchainLabels(t, sa, owner) + } +} + +func pipelineServiceAccount() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + _, err := memberAwait.WaitForServiceAccount(t, ns.Name, "appstudio-pipeline") + require.NoError(t, err) + // fixme: decide if we want to check labels also on serviceaccounts + //assertExpectedToolchainLabels(t, sa, owner) + } +} + +func pipelineRunnerRoleBinding() namespaceObjectsCheck { + return func(t *testing.T, ns *corev1.Namespace, memberAwait *wait.MemberAwaitility, owner string) { + rb, err := memberAwait.WaitForRoleBinding(t, ns, "appstudio-pipelines-runner-rolebinding", toolchainLabelsWaitCriterion(owner)...) + require.NoError(t, err) + assert.Len(t, rb.Subjects, 1) + assert.Equal(t, "ServiceAccount", rb.Subjects[0].Kind) + assert.Equal(t, "appstudio-pipeline", rb.Subjects[0].Name) + assert.Equal(t, ns.Name, rb.Subjects[0].Namespace) + assert.Equal(t, "appstudio-pipelines-runner", rb.RoleRef.Name) + assert.Equal(t, "ClusterRole", rb.RoleRef.Kind) + assert.Equal(t, "rbac.authorization.k8s.io", rb.RoleRef.APIGroup) + } +} + +func pipelineRunnerClusterRole() clusterObjectsCheckCreator { + return func() clusterObjectsCheck { + return func(t *testing.T, memberAwait *wait.MemberAwaitility, userName, tierLabel string) { + clusterRole := &rbacv1.ClusterRole{} + // we don't wait because this should have been already created by OLM as part of the operator deployment + err := memberAwait.Client.Get(context.TODO(), types.NamespacedName{Name: "appstudio-pipelines-runner"}, clusterRole) + require.NoError(t, err) + assert.NotEmpty(t, clusterRole.Rules) + // we don't care much about the content, it should be applied and maintained by the OLM + } + } +} diff --git a/testsupport/wait/member.go b/testsupport/wait/member.go index 5999f46e2..3aed03779 100644 --- a/testsupport/wait/member.go +++ b/testsupport/wait/member.go @@ -12,6 +12,7 @@ import ( "github.com/codeready-toolchain/toolchain-common/pkg/cluster" "github.com/codeready-toolchain/toolchain-common/pkg/test" appstudiov1 "github.com/codeready-toolchain/toolchain-e2e/testsupport/appstudio/api/v1alpha1" + esv1beta1 "github.com/external-secrets/external-secrets/apis/externalsecrets/v1beta1" "github.com/davecgh/go-spew/spew" "github.com/ghodss/yaml" quotav1 "github.com/openshift/api/quota/v1" @@ -936,6 +937,31 @@ func (a *MemberAwaitility) WaitForNetworkPolicy(t *testing.T, namespace *corev1. return np, err } +// WaitForExternalSecret waits until a ExternalSecret with the given name exists in the given namespace +func (a *MemberAwaitility) WaitForExternalSecret(t *testing.T, namespace *corev1.Namespace, name string) (*esv1beta1.ExternalSecret, error) { + t.Logf("waiting for ExternalSecret '%s' in namespace '%s'", name, namespace.Name) + es := &esv1beta1.ExternalSecret{} + err := wait.Poll(a.RetryInterval, a.Timeout, func() (done bool, err error) { + obj := &esv1beta1.ExternalSecret{} + if err := a.Client.Get(context.TODO(), types.NamespacedName{Namespace: namespace.Name, Name: name}, obj); err != nil { + if errors.IsNotFound(err) { + allESs := &esv1beta1.ExternalSecretList{} + if err := a.Client.List(context.TODO(), allESs, client.InNamespace(namespace)); err != nil { + return false, err + } + return false, nil + } + return false, err + } + es = obj + return true, nil + }) + if err != nil { + t.Logf("failed to wait for ExternalSecret '%s' in namespace '%s'", name, namespace.Name) + } + return es, err +} + // WaitForRole waits until a Role with the given name exists in the given namespace func (a *MemberAwaitility) WaitForRole(t *testing.T, namespace *corev1.Namespace, name string, criteria ...LabelWaitCriterion) (*rbacv1.Role, error) { t.Logf("waiting for Role '%s' in namespace '%s'", name, namespace.Name)