Skip to content

Commit

Permalink
Merge pull request #72 from G-Lenz/main
Browse files Browse the repository at this point in the history
Release v2.7.1
  • Loading branch information
abewub authored Aug 27, 2024
2 parents 2ab62c9 + 2ad4fd9 commit 46dd9e9
Show file tree
Hide file tree
Showing 16 changed files with 185 additions and 83 deletions.
12 changes: 12 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,17 @@
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).

## [2.7.1] - 2024-08
### Added
- Security.md file

### Fixed
- Workspace analysis failing when pervious data is recorded in database but empty.
- Workspace analysis failing when timestamps from user connected data doesn't exist in other metric data.
- Workspace only reporting 24 hour period
- Workspace not reporting tags
- Updated micromatch to mitigate [CVE-2024-4067](https://avd.aquasec.com/nvd/2024/cve-2024-4067).

## [2.7.0] - 2024-07
### Added
- Workspace performance metrics
Expand All @@ -13,6 +24,7 @@
- Powertools logging
- Operational insights CloudWatch dashboard
- Support for G4DN workspaces

### Fixed
- sts token expired after one hour

Expand Down
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -148,6 +148,7 @@ npm run synth
├── LICENSE.txt
├── NOTICE.txt
├── README.md
├── SECURITY.md
├── buildspec.yml
├── deployment
│   ├── build-open-source-dist.sh
Expand Down
6 changes: 6 additions & 0 deletions SECURITY.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
Reporting Security Issues
----------------------------------------------------------------------------------------------------------
We take all security reports seriously. When we receive such reports, we will investigate and
subsequently address any potential vulnerabilities as quickly as possible. If you discover a potential
security issue in this project, please notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/) or
directly via email to [AWS Security](mailto:[email protected]). Please do not create a public GitHub issue in this project.
4 changes: 4 additions & 0 deletions source/lib/components/ecs-cluster-resources.ts
Original file line number Diff line number Diff line change
Expand Up @@ -403,6 +403,10 @@ export class EcsClusterResources extends Construct {
name: "NumberOfMonthsForTerminationCheck",
value: props.numberOfmonthsForTerminationCheck,
},
{
name: "ImageVersion",
value: image,
},
],
},
],
Expand Down
8 changes: 4 additions & 4 deletions source/package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

8 changes: 4 additions & 4 deletions source/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@
"test": "jest --coverage",
"license-report": "license-report --output=csv --delimiter=' under ' --fields=name --fields=licenseType",
"cdk": "cdk",
"bootstrap": "SOLUTION_VERSION=v2.7.0 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk bootstrap",
"deploy": "SOLUTION_VERSION=v2.7.0 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk deploy cost-optimizer-for-amazon-workspaces",
"deploySpoke": "SOLUTION_VERSION=v2.7.0 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk deploy cost-optimizer-for-amazon-workspaces-spoke",
"synth": "SOLUTION_VERSION=v2.7.0 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces DIST_OUTPUT_BUCKET=solutions-reference cdk synth"
"bootstrap": "SOLUTION_VERSION=v2.7.1 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk bootstrap",
"deploy": "SOLUTION_VERSION=v2.7.1 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk deploy cost-optimizer-for-amazon-workspaces",
"deploySpoke": "SOLUTION_VERSION=v2.7.1 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces cdk deploy cost-optimizer-for-amazon-workspaces-spoke",
"synth": "SOLUTION_VERSION=v2.7.1 SOLUTION_NAME=cost-optimizer-for-amazon-workspaces SOLUTION_TRADEMARKEDNAME=cost-optimizer-for-amazon-workspaces DIST_OUTPUT_BUCKET=solutions-reference cdk synth"
},
"devDependencies": {
"@aws-cdk/assert": "2.68.0",
Expand Down
22 changes: 22 additions & 0 deletions source/test/__snapshots__/hub-snapshot.test.ts.snap
Original file line number Diff line number Diff line change
Expand Up @@ -1388,6 +1388,28 @@ exports[`hub stack synth matches the existing snapshot 1`] = `
"Ref": "NumberOfMonthsForTerminationCheck",
},
},
{
"Name": "ImageVersion",
"Value": {
"Fn::If": [
"UseStableTagCondition",
{
"Fn::FindInMap": [
"Solution",
"Data",
"StableImage",
],
},
{
"Fn::FindInMap": [
"Solution",
"Data",
"Image",
],
},
],
},
},
],
"Essential": true,
"Image": {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,6 @@ def ws_description(**kwargs):
"username": "test-user",
"computer_name": "test-computer",
"initial_mode": "test-mode",
"tags": ["tag1", "tag2"],
}
filtered_args = {
key: value for key, value in kwargs.items() if key in default_args.keys()
Expand Down Expand Up @@ -118,6 +117,7 @@ def ws_record(ws_billing_data, ws_metrics):
report_date="test-report-date",
last_reported_metric_period="test-last-period",
last_known_user_connection="test-last-connection",
tags="[{'key1': 'tag1'}, {'key2': 'tag2'}]",
)


Expand Down Expand Up @@ -203,7 +203,7 @@ def test_process_directory_without_ddb_item(
unittest.mock.ANY, unittest.mock.ANY, dashboard_metrics
)
report_header = "WorkspaceID,Billable Hours,Usage Threshold,Change Reported,Bundle Type,Initial Mode,New Mode,Username,Computer Name,DirectoryId,WorkspaceTerminated,insessionlatency,cpuusage,memoryusage,rootvolumediskusage,uservolumediskusage,udppacketlossrate,Tags,ReportDate,\n"
list_processed_workspaces = "test-ws-id,20,100,No change,test-bundle,test-mode,test-mode,test-user,test-computer,test-dir-id,,93.42,94.42,95.42,96.42,97.42,98.42,\"['tag1', 'tag2']\",test-report-date\n"
list_processed_workspaces = "test-ws-id,20,100,No change,test-bundle,test-mode,test-mode,test-user,test-computer,test-dir-id,,93.42,94.42,95.42,96.42,97.42,98.42,[{'key1': 'tag1'}, {'key2': 'tag2'}],test-report-date\n"
header_field_count = len(str.split(report_header, ","))
data_field_count = len(str.split(list_processed_workspaces, ","))
assert header_field_count == data_field_count
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,6 @@ def ws_description(**kwargs):
"username": "test-user",
"computer_name": "test-computer",
"initial_mode": "test-mode",
"tags": ["tag1", "tag2"],
}
filtered_args = {
key: value for key, value in kwargs.items() if key in default_args.keys()
Expand Down Expand Up @@ -91,6 +90,7 @@ def ws_record(ws_billing_data, ws_metrics):
report_date="test-report-date",
last_reported_metric_period="test-last-period",
last_known_user_connection="test-last-connection",
tags="[{'key1': 'tag1'}, {'key2': 'tag2'}]",
)


Expand Down Expand Up @@ -180,13 +180,18 @@ def performance_metric_factory(length, start):

def metric_data_factory(indices, length, start):
metrics = {}
user_connected_timestamps = user_session_timestamps_factory(length)
timestamps = user_session_timestamps_factory(length)
for metric in METRIC_LIST:
if metric == "UserConnected":
data = user_connected_data_factory(indices, length)
metrics[metric.lower()] = {
"timestamps": user_connected_timestamps,
"values": data,
}
else:
data = performance_metric_factory(length, start)
metrics[metric.lower()] = {"timestamps": timestamps, "values": data}
metrics[metric.lower()] = {"timestamps": timestamps, "values": data}
return metrics


Expand Down Expand Up @@ -241,9 +246,14 @@ def expected_sessions_factory(user_session_data, active_indices, zero_limit):
session.setdefault("active_sessions", []).append(
user_session_data["cpuusage"]["timestamps"][active_index]
)
expected_avg = WeightedAverage(
current_avg = WeightedAverage(
user_session_data["cpuusage"]["values"][active_index], 1
).merge(expected_avg)
)
expected_avg = (
current_avg.merge(expected_avg)
if expected_avg is not None
else current_avg
)
if session:
duration_hours = math.ceil(
(session["active_sessions"][-1] - session["active_sessions"][0]).seconds
Expand Down Expand Up @@ -691,7 +701,7 @@ def test_get_billable_hours_and_performance(mocker, session, ws_record, metric_d
metrics_helper, "get_list_data_points", return_value=metric_data
)
mocker.patch.object(metrics_helper, "get_user_connected_hours")
mocker.patch.object(metrics_helper, "get_user_sessions")
mock_user_session = mocker.patch.object(metrics_helper, "get_user_sessions")
mocker.patch.object(metrics_helper.session_table, "update_ddb_items"),
spy_get_time_range = mocker.spy(metrics_helper, "get_time_range")
spy_get_cloudwatch_metric_data_points = mocker.spy(
Expand All @@ -712,7 +722,13 @@ def test_get_billable_hours_and_performance(mocker, session, ws_record, metric_d
)
spy_get_cloudwatch_metric_data_points.assert_called_once()
spy_get_list_data_points.assert_called_once()
spy_get_user_connected_hours.assert_called_once()
spy_get_user_connected_hours.assert_called_once_with(
mock_user_session(),
ws_record.description.workspace_id,
ws_record.description.initial_mode,
60,
ws_record.billing_data.billable_hours,
)
spy_get_user_sessions.assert_called_once()


Expand Down Expand Up @@ -807,6 +823,9 @@ def test_get_user_sessions(session, ws_record):
total_values = 26
start_value = 1
user_session_data = metric_data_factory(active_indices, total_values, start_value)
user_session_data["userconnected"]["timestamps"][-1] += datetime.timedelta(
minutes=5
)
result = metrics_helper.get_user_sessions(
user_session_data,
ws_description(),
Expand Down Expand Up @@ -1436,7 +1455,7 @@ def test_get_user_sessions_32(session, ws_record):
def test_process_performance_metrics(session, ws_record, metric_data):
metrics_helper = MetricsHelper(session, "us-east-1", "test-table")
current_weighted_avg = mean(metric_data["cpuusage"]["values"]) * 3
previous_weighted_avg = ws_record.performance_metrics.cpu_usage.weighted_avg
previous_weighted_avg = ws_record.performance_metrics.cpu_usage.weighted_avg()
expected_avg = Decimal(
(current_weighted_avg + previous_weighted_avg)
/ (ws_record.performance_metrics.cpu_usage.count + 3),
Expand All @@ -1462,9 +1481,8 @@ def test_process_performance_metrics_with_no_available_data_in_last_report(
assert result.memory_usage.avg == Decimal("5")
assert result.memory_usage.count == 3

# test when current dat doesn't exist
assert result.udp_packet_loss_rate.avg == None
assert result.udp_packet_loss_rate.count == 0
# test when current data doesn't exist
assert result.udp_packet_loss_rate == None


def test_process_performance_metrics_with_zero_avg(session, ws_record, metric_data):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@
# SPDX-License-Identifier: Apache-2.0

# Standard Library
from dataclasses import fields
from decimal import Decimal

# Third Party Libraries
Expand Down Expand Up @@ -35,7 +34,6 @@ def ws_description():
username="test-user",
computer_name="test-computer",
initial_mode="test-mode",
tags=["tag1", "tag2"],
)


Expand Down Expand Up @@ -75,6 +73,7 @@ def ws_record(ws_description, ws_billing_data, ws_metrics):
report_date="test-report-date",
last_reported_metric_period="test-last-period",
last_known_user_connection="test-last-connection",
tags="[{'key1': 'tag1'}, {'key2': 'tag2'}]",
)


Expand Down Expand Up @@ -155,7 +154,7 @@ def ddb_item(ws_record):
"N": str(ws_record.performance_metrics.udp_packet_loss_rate.count)
},
"Tags": {
"L": list(map(lambda x: {"S": x}, ws_record.description.tags)),
"S": ws_record.tags,
},
"ReportDate": {"S": ws_record.report_date},
"LastReportedMetricPeriod": {"S": ws_record.last_reported_metric_period},
Expand Down Expand Up @@ -245,11 +244,10 @@ def test_ddb_attr_to_class_field_with_caps():
assert result == "test_string"


def test_weighted_avg_post_init_sets_weighted_avg_field(ws_metrics):
fields = vars(ws_metrics)
for field in fields:
value = getattr(ws_metrics, field)
assert value.weighted_avg == value.avg * value.count
def test_weighted_avg(ws_metrics):
weighted_avg = ws_metrics.cpu_usage.weighted_avg()

assert weighted_avg == ws_metrics.cpu_usage.avg * ws_metrics.cpu_usage.count


def test_weighted_average_merge(ws_metrics):
Expand All @@ -258,12 +256,11 @@ def test_weighted_average_merge(ws_metrics):

merged_wa = wa_1.merge(wa_2)
expected_count = wa_1.count + wa_2.count
expected_avg = Decimal((wa_1.weighted_avg + wa_2.weighted_avg) / expected_count)
expected_avg = Decimal((wa_1.weighted_avg() + wa_2.weighted_avg()) / expected_count)
assert merged_wa.avg == expected_avg
assert merged_wa.count == expected_count
assert merged_wa.weighted_avg == expected_count * expected_avg


def test_to_csv(ws_record):
expected = "test-ws-id,20,100,ToHourly,test-bundle,test-mode,test-mode,test-user,test-computer,test-dir-id,,93.42,94.42,95.42,96.42,97.42,98.42,\"['tag1', 'tag2']\",test-report-date\n"
expected = "test-ws-id,20,100,ToHourly,test-bundle,test-mode,test-mode,test-user,test-computer,test-dir-id,,93.42,94.42,95.42,96.42,97.42,98.42,[{'key1': 'tag1'}, {'key2': 'tag2'}],test-report-date\n"
assert ws_record.to_csv() == expected
Loading

0 comments on commit 46dd9e9

Please sign in to comment.