Skip to content

Commit

Permalink
Merge branch 'master' into vapopov/add-autoupdate-resources-rm-tctl
Browse files Browse the repository at this point in the history
  • Loading branch information
vapopov authored Nov 28, 2024
2 parents ca5047d + 98c53d5 commit b8fdf2a
Show file tree
Hide file tree
Showing 10 changed files with 256 additions and 41 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,7 @@ below:

```yaml
kind: role
version: v6
version: v7
metadata:
name: employee
spec:
Expand Down Expand Up @@ -120,7 +120,7 @@ For example, the following role enables a user to search for resources that the
```yaml
# requester.yaml
kind: role
version: v6
version: v7
metadata:
name: k8s-requester
spec:
Expand Down Expand Up @@ -215,7 +215,7 @@ You can specify the `max_duration` option with a role like the following:

```yaml
kind: role
version: v6
version: v7
metadata:
name: temp-dba
spec:
Expand Down Expand Up @@ -271,7 +271,7 @@ your ticket ID":

```yaml
kind: role
version: v6
version: v7
metadata:
name: employee
spec:
Expand All @@ -294,6 +294,41 @@ user:
1. If one of the user's roles includes the `reason` strategy, the user must
provide a reason when authenticating.

## Requiring request reasons

The `allow.request.reason.mode` field controls whether a reason is required when users submit
Access Requests.

Allowed values are:

|Value|Meaning|
|---|---|
| `optional` | The default. The user does not need to provide a reason when making a request. |
| `required` | The user must provide a non-empty reason when making a request. |

Example:

```yaml
kind: role
version: v7
metadata:
name: node-requester
spec:
allow:
request:
roles:
- 'node-access'
search_as_roles:
- 'root-node-access'
reason:
mode: 'required'
```

If a user with "node-requester" role assigned makes an Access Request for "node-access" role or any
resource allowed by "root-node-access" they will be required to provide a reason. If a user's
role set includes multiple roles governing Access Requests to the same roles and resources,
"require" mode takes precedence.

## Review thresholds

You can configure a user's roles to specify the criteria that an Access Request
Expand All @@ -307,7 +342,7 @@ roles:

```yaml
kind: role
version: v6
version: v7
metadata:
name: devops
spec:
Expand Down Expand Up @@ -430,7 +465,7 @@ The following role adds the suggested reviewers `user1` and `user2`:

```yaml
kind: role
version: v6
version: v7
metadata:
name: employee
spec:
Expand Down Expand Up @@ -559,7 +594,7 @@ them, use the `allow.review_requests.preview_as_roles` and

```yaml
kind: role
version: v6
version: v7
metadata:
name: reviewer
spec:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ title: teleport-plugin-email Chart Reference
description: Values that can be set using the teleport-plugin-email Helm chart
---

The `teleport-plugin-slack` Helm chart is used to configure the email Teleport plugin, which allows users to receive Access Requests via emails.
The `teleport-plugin-email` Helm chart is used to configure the email Teleport plugin, which allows users to receive Access Requests via emails.

You can [browse the source on GitHub](https://github.com/gravitational/teleport/tree/v(=teleport.version=)/examples/chart/access/email).

Expand Down
12 changes: 9 additions & 3 deletions lib/srv/discovery/common/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,8 @@ type WatcherConfig struct {
DiscoveryGroup string
// Origin is used to specify what type of origin watcher's resources are
Origin string
// PreFetchHookFn is called before starting a new fetch cycle.
PreFetchHookFn func()
}

// CheckAndSetDefaults validates the config.
Expand Down Expand Up @@ -103,12 +105,12 @@ func NewWatcher(ctx context.Context, config WatcherConfig) (*Watcher, error) {
if err := config.CheckAndSetDefaults(); err != nil {
return nil, trace.Wrap(err)
}
watcher := &Watcher{

return &Watcher{
cfg: config,
ctx: ctx,
resourcesC: make(chan types.ResourcesWithLabels),
}
return watcher, nil
}, nil
}

// Start starts fetching cloud resources and sending them to the channel.
Expand Down Expand Up @@ -141,6 +143,10 @@ func (w *Watcher) Start() {

// fetchAndSend fetches resources from all fetchers and sends them to the channel.
func (w *Watcher) fetchAndSend() {
if w.cfg.PreFetchHookFn != nil {
w.cfg.PreFetchHookFn()
}

var (
newFetcherResources = make(types.ResourcesWithLabels, 0, 50)
fetchersLock sync.Mutex
Expand Down
7 changes: 7 additions & 0 deletions lib/srv/discovery/common/watcher_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ package common

import (
"context"
"sync/atomic"
"testing"
"time"

Expand Down Expand Up @@ -61,11 +62,15 @@ func TestWatcher(t *testing.T) {
}

clock := clockwork.NewFakeClock()
fetchIterations := atomic.Uint32{}
watcher, err := NewWatcher(ctx, WatcherConfig{
FetchersFn: StaticFetchers([]Fetcher{appFetcher, noAuthFetcher, dbFetcher}),
Interval: time.Hour,
Clock: clock,
Origin: types.OriginCloud,
PreFetchHookFn: func() {
fetchIterations.Add(1)
},
})
require.NoError(t, err)
go watcher.Start()
Expand All @@ -77,6 +82,8 @@ func TestWatcher(t *testing.T) {
// Watcher should fetch again after interval.
clock.Advance(time.Hour + time.Minute)
assertFetchResources(t, watcher, wantResources)

require.Equal(t, uint32(2), fetchIterations.Load())
}

func TestWatcherWithDynamicFetchers(t *testing.T) {
Expand Down
53 changes: 43 additions & 10 deletions lib/srv/discovery/database_watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,22 +64,30 @@ func (s *Server) startDatabaseWatchers() error {
return trace.Wrap(err)
}

watcher, err := common.NewWatcher(s.ctx, common.WatcherConfig{
FetchersFn: s.getAllDatabaseFetchers,
Log: s.LegacyLogger.WithField("kind", types.KindDatabase),
DiscoveryGroup: s.DiscoveryGroup,
Interval: s.PollInterval,
TriggerFetchC: s.newDiscoveryConfigChangedSub(),
Origin: types.OriginCloud,
Clock: s.clock,
})
watcher, err := common.NewWatcher(s.ctx,
common.WatcherConfig{
FetchersFn: s.getAllDatabaseFetchers,
Log: s.LegacyLogger.WithField("kind", types.KindDatabase),
DiscoveryGroup: s.DiscoveryGroup,
Interval: s.PollInterval,
TriggerFetchC: s.newDiscoveryConfigChangedSub(),
Origin: types.OriginCloud,
Clock: s.clock,
PreFetchHookFn: func() {
s.awsRDSResourcesStatus.reset()
},
},
)
if err != nil {
return trace.Wrap(err)
}
go watcher.Start()

go func() {
for {
discoveryConfigsChanged := map[string]struct{}{}
resourcesFoundByGroup := make(map[awsResourceGroup]int)

select {
case newResources := <-watcher.ResourcesC():
dbs := make([]types.Database, 0, len(newResources))
Expand All @@ -89,21 +97,46 @@ func (s *Server) startDatabaseWatchers() error {
continue
}

resourceGroup := awsResourceGroupFromLabels(db.GetStaticLabels())
resourcesFoundByGroup[resourceGroup] += 1
discoveryConfigsChanged[resourceGroup.discoveryConfig] = struct{}{}

dbs = append(dbs, db)
}
mu.Lock()
newDatabases = dbs
mu.Unlock()

for group, count := range resourcesFoundByGroup {
s.awsRDSResourcesStatus.incrementFound(group, count)
}

if err := reconciler.Reconcile(s.ctx); err != nil {
s.Log.WarnContext(s.ctx, "Unable to reconcile database resources", "error", err)
} else if s.onDatabaseReconcile != nil {

// When reconcile fails, it is assumed that everything failed.
for group, count := range resourcesFoundByGroup {
s.awsRDSResourcesStatus.incrementFailed(group, count)
}

break
}

for group, count := range resourcesFoundByGroup {
s.awsRDSResourcesStatus.incrementEnrolled(group, count)
}

if s.onDatabaseReconcile != nil {
s.onDatabaseReconcile()
}

case <-s.ctx.Done():
return
}

for dc := range discoveryConfigsChanged {
s.updateDiscoveryConfigStatus(dc)
}
}
}()
return nil
Expand Down
5 changes: 5 additions & 0 deletions lib/srv/discovery/discovery.go
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,8 @@ type Server struct {

awsSyncStatus awsSyncStatus
awsEC2ResourcesStatus awsResourcesStatus
awsRDSResourcesStatus awsResourcesStatus
awsEKSResourcesStatus awsResourcesStatus
awsEC2Tasks awsEC2Tasks

// caRotationCh receives nodes that need to have their CAs rotated.
Expand Down Expand Up @@ -411,6 +413,9 @@ func New(ctx context.Context, cfg *Config) (*Server, error) {
dynamicTAGSyncFetchers: make(map[string][]aws_sync.AWSSync),
dynamicDiscoveryConfig: make(map[string]*discoveryconfig.DiscoveryConfig),
awsSyncStatus: awsSyncStatus{},
awsEC2ResourcesStatus: newAWSResourceStatusCollector(types.AWSMatcherEC2),
awsRDSResourcesStatus: newAWSResourceStatusCollector(types.AWSMatcherRDS),
awsEKSResourcesStatus: newAWSResourceStatusCollector(types.AWSMatcherEKS),
}
s.discardUnsupportedMatchers(&s.Matchers)

Expand Down
57 changes: 57 additions & 0 deletions lib/srv/discovery/discovery_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1872,6 +1872,8 @@ func TestDiscoveryDatabase(t *testing.T) {
awsRDSDBWithRole.SetAWSAssumeRole("arn:aws:iam::123456789012:role/test-role")
awsRDSDBWithRole.SetAWSExternalID("test123")

eksAWSResource, _ := makeEKSCluster(t, "aws-eks", "us-east-1", rewriteDiscoveryLabelsParams{discoveryGroup: mainDiscoveryGroup, integration: integrationName, discoveryConfig: discoveryConfigName})

matcherForDiscoveryConfigFn := func(t *testing.T, discoveryGroup string, m Matchers) *discoveryconfig.DiscoveryConfig {
dc, err := discoveryconfig.NewDiscoveryConfig(
header.Metadata{Name: discoveryConfigName},
Expand Down Expand Up @@ -1906,6 +1908,9 @@ func TestDiscoveryDatabase(t *testing.T) {
&azure.ARMRedisEnterpriseClusterMock{},
&azure.ARMRedisEnterpriseDatabaseMock{},
),
EKS: &mocks.EKSMock{
Clusters: []*eks.Cluster{eksAWSResource},
},
}

tcs := []struct {
Expand All @@ -1916,6 +1921,7 @@ func TestDiscoveryDatabase(t *testing.T) {
azureMatchers []types.AzureMatcher
expectDatabases []types.Database
discoveryConfigs func(*testing.T) []*discoveryconfig.DiscoveryConfig
discoveryConfigStatusCheck func(*testing.T, discoveryconfig.Status)
wantEvents int
}{
{
Expand Down Expand Up @@ -2114,6 +2120,12 @@ func TestDiscoveryDatabase(t *testing.T) {
return []*discoveryconfig.DiscoveryConfig{dc1}
},
wantEvents: 1,
discoveryConfigStatusCheck: func(t *testing.T, s discoveryconfig.Status) {
require.Equal(t, uint64(1), s.DiscoveredResources)
require.Equal(t, uint64(1), s.IntegrationDiscoveredResources[integrationName].AwsRds.Enrolled)
require.Equal(t, uint64(1), s.IntegrationDiscoveredResources[integrationName].AwsRds.Found)
require.Zero(t, s.IntegrationDiscoveredResources[integrationName].AwsRds.Failed)
},
},
{
name: "running in integrations-only-mode with a matcher without an integration, must find 1 database",
Expand All @@ -2127,6 +2139,27 @@ func TestDiscoveryDatabase(t *testing.T) {
expectDatabases: []types.Database{awsRedshiftDBWithIntegration},
wantEvents: 1,
},
{
name: "running in integrations-only-mode with a dynamic matcher with an integration, must find 1 eks cluster",
discoveryConfigs: func(t *testing.T) []*discoveryconfig.DiscoveryConfig {
dc1 := matcherForDiscoveryConfigFn(t, mainDiscoveryGroup, Matchers{
AWS: []types.AWSMatcher{{
Types: []string{types.AWSMatcherEKS},
Tags: map[string]utils.Strings{types.Wildcard: {types.Wildcard}},
Regions: []string{"us-east-1"},
Integration: integrationName,
}},
})
return []*discoveryconfig.DiscoveryConfig{dc1}
},
expectDatabases: []types.Database{},
wantEvents: 0,
discoveryConfigStatusCheck: func(t *testing.T, s discoveryconfig.Status) {
require.Equal(t, uint64(1), s.DiscoveredResources)
require.Equal(t, uint64(1), s.IntegrationDiscoveredResources[integrationName].AwsEks.Found)
require.Zero(t, s.IntegrationDiscoveredResources[integrationName].AwsEks.Enrolled)
},
},
}

for _, tc := range tcs {
Expand Down Expand Up @@ -2227,6 +2260,13 @@ func TestDiscoveryDatabase(t *testing.T) {
return reporter.ResourceCreateEventCount() != 0
}, time.Second, 100*time.Millisecond)
}

if tc.discoveryConfigStatusCheck != nil {
dc, err := tlsServer.Auth().GetDiscoveryConfig(ctx, discoveryConfigName)
require.NoError(t, err)

tc.discoveryConfigStatusCheck(t, dc.Status)
}
})
}
}
Expand Down Expand Up @@ -2389,6 +2429,23 @@ func TestDiscoveryDatabaseRemovingDiscoveryConfigs(t *testing.T) {
})
}

func makeEKSCluster(t *testing.T, name, region string, discoveryParams rewriteDiscoveryLabelsParams) (*eks.Cluster, types.KubeCluster) {
t.Helper()
eksAWSCluster := &eks.Cluster{
Name: aws.String(name),
Arn: aws.String(fmt.Sprintf("arn:aws:eks:%s:123456789012:cluster/%s", region, name)),
Status: aws.String(eks.ClusterStatusActive),
Tags: map[string]*string{
"env": aws.String("prod"),
},
}
actual, err := common.NewKubeClusterFromAWSEKS(aws.StringValue(eksAWSCluster.Name), aws.StringValue(eksAWSCluster.Arn), eksAWSCluster.Tags)
require.NoError(t, err)
discoveryParams.matcherType = types.AWSMatcherEKS
rewriteCloudResource(t, actual, discoveryParams)
return eksAWSCluster, actual
}

func makeRDSInstance(t *testing.T, name, region string, discoveryParams rewriteDiscoveryLabelsParams) (*rds.DBInstance, types.Database) {
instance := &rds.DBInstance{
DBInstanceArn: aws.String(fmt.Sprintf("arn:aws:rds:%v:123456789012:db:%v", region, name)),
Expand Down
Loading

0 comments on commit b8fdf2a

Please sign in to comment.