diff --git a/config/inference/default_binary.yaml b/config/inference/default_binary.yaml index 2039cce1..718cf6fa 100644 --- a/config/inference/default_binary.yaml +++ b/config/inference/default_binary.yaml @@ -6,6 +6,7 @@ inference: model_path: ${general.save_weights_dir}/ output_path: checkpoint_dir: # (string, optional): directory in which to save the object if url + batch_size: 8 chunk_size: # if empty, will be calculated automatically from max_pix_per_mb_gpu # Maximum number of pixels each Mb of GPU Ram to allow. E.g. if GPU has 1000 Mb of Ram and this parameter is set to # 10, chunk_size will be set to sqrt(1000 * 10) = 100. diff --git a/config/inference/default_multiclass.yaml b/config/inference/default_multiclass.yaml index ac8b0954..76bfd63a 100644 --- a/config/inference/default_multiclass.yaml +++ b/config/inference/default_multiclass.yaml @@ -6,6 +6,7 @@ inference: model_path: ${general.save_weights_dir}/ output_path: checkpoint_dir: # (string, optional): directory in which to save the object if url + batch_size: 8 chunk_size: # if empty, will be calculated automatically from max_pix_per_mb_gpu # Maximum number of pixels each Mb of GPU Ram to allow. E.g. if GPU has 1000 Mb of Ram and this parameter is set to # 10, chunk_size will be set to sqrt(1000 * 10) = 100.