Skip to content

Commit

Permalink
fix: fix: dfx deploy --by-proposal no longer sends chunk data in Prop…
Browse files Browse the repository at this point in the history
  • Loading branch information
ericswanson-dfinity authored Dec 17, 2024
1 parent 26a976d commit 0ca8ca0
Show file tree
Hide file tree
Showing 6 changed files with 82 additions and 9 deletions.
13 changes: 13 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,19 @@

# UNRELEASED

### fix: `dfx deploy --by-proposal` no longer sends chunk data in ProposeCommitBatch

Recently we made `dfx deploy` include some chunk data in CommitBatch, in order to streamline
deploys for smaller projects. `dfx deploy` splits up larger change lists and submits them in
smaller batches, in order to remain within message and compute limits.

This change also applied to `dfx deploy --by-proposal`, which submits all changes in a single
message. This made it more likely that `dfx deploy --by-proposal` will fail due to exceeding
message limits.

This fix makes it so `dfx deploy --by-proposal` never includes this chunk data in
ProposeCommitBatch, which will allow for more changes before hitting message limits.

### feat: `dfx start --pocketic` supports `--force` and shared networks.

`dfx start --pocketic` is now compatible with `--force` and shared networks.
Expand Down
29 changes: 29 additions & 0 deletions e2e/tests-dfx/assetscanister.bash
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,35 @@ check_permission_failure() {
assert_contains "cache-control: max-age=888"
}

@test "deploy --by-proposal lots of small assets should not overflow message limits" {
assert_command dfx identity new controller --storage-mode plaintext
assert_command dfx identity new prepare --storage-mode plaintext
assert_command dfx identity new commit --storage-mode plaintext
assert_command dfx identity use anonymous

CONTROLLER_PRINCIPAL=$(dfx identity get-principal --identity controller)
PREPARE_PRINCIPAL=$(dfx identity get-principal --identity prepare)
COMMIT_PRINCIPAL=$(dfx identity get-principal --identity commit)

dfx_start
assert_command dfx deploy --identity controller

assert_command dfx canister call e2e_project_frontend grant_permission "(record { to_principal=principal \"$PREPARE_PRINCIPAL\"; permission = variant { Prepare }; })" --identity controller
assert_command dfx canister call e2e_project_frontend grant_permission "(record { to_principal=principal \"$COMMIT_PRINCIPAL\"; permission = variant { Commit }; })" --identity controller

for a in $(seq 1 1400); do
# 1400 files * ~1200 header bytes: 1,680,000 bytes
# 1400 files * 650 content bytes = 910,000 bytes: small enough that chunk uploader won't upload before finalize
# commit batch without content: 1,978,870 bytes
# commit batch with content: 2,889,392 bytes
# change finalize_upload to always pass MAX_CHUNK_SIZE/2 to see this fail
dd if=/dev/random of=src/e2e_project_frontend/assets/"$a" bs=650 count=1
done

assert_command dfx deploy e2e_project_frontend --by-proposal --identity prepare
assert_match "Proposed commit of batch 2 with evidence [0-9a-z]*. Either commit it by proposal, or delete it."
}

@test "deploy --by-proposal all assets" {
assert_command dfx identity new controller --storage-mode plaintext
assert_command dfx identity new prepare --storage-mode plaintext
Expand Down
32 changes: 25 additions & 7 deletions src/canisters/frontend/ic-asset/src/batch_upload/plumbing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,12 @@ pub(crate) struct ProjectAsset {
pub(crate) encodings: HashMap<String, ProjectAssetEncoding>,
}

#[derive(Debug, PartialEq)]
pub enum Mode {
ByProposal,
NormalDeploy,
}

type IdMapping = BTreeMap<usize, Nat>;
type UploadQueue = Vec<(usize, Vec<u8>)>;
pub(crate) struct ChunkUploader<'agent> {
Expand Down Expand Up @@ -108,9 +114,17 @@ impl<'agent> ChunkUploader<'agent> {
pub(crate) async fn finalize_upload(
&self,
semaphores: &Semaphores,
mode: Mode,
) -> Result<(), CreateChunkError> {
// Crude estimate: If `MAX_CHUNK_SIZE / 2` bytes are added as data to the `commit_batch` args the message won't be above the message size limit.
self.upload_chunks(MAX_CHUNK_SIZE / 2, semaphores).await
let max_retained_bytes = if mode == Mode::ByProposal {
// Never add data to the commit_batch args, because they have to fit in a single call.
0
} else {
// Crude estimate: If `MAX_CHUNK_SIZE / 2` bytes are added as data to the `commit_batch` args the message won't be above the message size limit.
MAX_CHUNK_SIZE / 2
};

self.upload_chunks(max_retained_bytes, semaphores).await
}

pub(crate) fn bytes(&self) -> usize {
Expand Down Expand Up @@ -412,6 +426,7 @@ pub(crate) async fn make_project_assets(
chunk_upload_target: Option<&ChunkUploader<'_>>,
asset_descriptors: Vec<AssetDescriptor>,
canister_assets: &HashMap<String, AssetDetails>,
mode: Mode,
logger: &Logger,
) -> Result<HashMap<String, ProjectAsset>, CreateProjectAssetError> {
let semaphores = Semaphores::new();
Expand All @@ -430,11 +445,14 @@ pub(crate) async fn make_project_assets(
.collect();
let project_assets = try_join_all(project_asset_futures).await?;
if let Some(uploader) = chunk_upload_target {
uploader.finalize_upload(&semaphores).await.map_err(|err| {
CreateProjectAssetError::CreateEncodingError(CreateEncodingError::CreateChunkFailed(
err,
))
})?;
uploader
.finalize_upload(&semaphores, mode)
.await
.map_err(|err| {
CreateProjectAssetError::CreateEncodingError(
CreateEncodingError::CreateChunkFailed(err),
)
})?;
}

let mut hm = HashMap::new();
Expand Down
10 changes: 8 additions & 2 deletions src/canisters/frontend/ic-asset/src/evidence/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,14 @@ pub async fn compute_evidence(
logger,
"Computing evidence for batch operations for assets in the project.",
);
let project_assets =
make_project_assets(None, asset_descriptors, &canister_assets, logger).await?;
let project_assets = make_project_assets(
None,
asset_descriptors,
&canister_assets,
crate::batch_upload::plumbing::Mode::ByProposal,
logger,
)
.await?;

let mut operations = assemble_batch_operations(
None,
Expand Down
5 changes: 5 additions & 0 deletions src/canisters/frontend/ic-asset/src/sync.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ use crate::asset::config::{
};
use crate::batch_upload::operations::BATCH_UPLOAD_API_VERSION;
use crate::batch_upload::plumbing::ChunkUploader;
use crate::batch_upload::plumbing::Mode::{ByProposal, NormalDeploy};
use crate::batch_upload::{
self,
operations::AssetDeletionReason,
Expand Down Expand Up @@ -48,6 +49,7 @@ pub async fn upload_content_and_assemble_sync_operations(
canister_api_version: u16,
dirs: &[&Path],
no_delete: bool,
mode: batch_upload::plumbing::Mode,
logger: &Logger,
) -> Result<CommitBatchArguments, UploadContentError> {
let asset_descriptors = gather_asset_descriptors(dirs, logger)?;
Expand Down Expand Up @@ -82,6 +84,7 @@ pub async fn upload_content_and_assemble_sync_operations(
Some(&chunk_uploader),
asset_descriptors,
&canister_assets,
mode,
logger,
)
.await
Expand Down Expand Up @@ -133,6 +136,7 @@ pub async fn sync(
canister_api_version,
dirs,
no_delete,
NormalDeploy,
logger,
)
.await?;
Expand Down Expand Up @@ -214,6 +218,7 @@ pub async fn prepare_sync_for_proposal(
canister_api_version,
dirs,
false,
ByProposal,
logger,
)
.await?;
Expand Down
2 changes: 2 additions & 0 deletions src/canisters/frontend/ic-asset/src/upload.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,6 @@
use crate::asset::config::AssetConfig;
use crate::batch_upload::operations::BATCH_UPLOAD_API_VERSION;
use crate::batch_upload::plumbing::Mode::NormalDeploy;
use crate::batch_upload::{
self,
operations::AssetDeletionReason,
Expand Down Expand Up @@ -49,6 +50,7 @@ pub async fn upload(
Some(&chunk_upload_target),
asset_descriptors,
&canister_assets,
NormalDeploy,
logger,
)
.await?;
Expand Down

0 comments on commit 0ca8ca0

Please sign in to comment.