diff --git a/networks/ai85-autoencoder.yaml b/networks/ai85-autoencoder.yaml index 5a8c7dad..e8223c02 100644 --- a/networks/ai85-autoencoder.yaml +++ b/networks/ai85-autoencoder.yaml @@ -1,47 +1,47 @@ ---- -arch: ai85autoencoder -dataset: SampleMotorDataLimerick -# Define layer parameters in order of the layer sequence -layers: - # Layer 0: 256 channels in --> 64 processors, 4 passes. Conv1D - - pad: 0 - activate: ReLU - out_offset: 0x2000 - processors: 0xffffffffffffffff - operation: Conv1d - kernel_size: 1 - data_format: HWC - # Layer 1: 128 channels in --> 64 processors, 2 passes. Conv1D - - pad: 0 - activate: ReLU - out_offset: 0x0000 - processors: 0xffffffffffffffff - kernel_size: 3 - operation: Conv1d - # Layer 2: 64 inputs --> 64 processors, 1 passes. MLP - - pad: 0 - activate: ReLU - out_offset: 0x2000 - processors: 0xffffffffffffffff - operation: MLP - # Layer 3: 32 inputs --> 32 processors, 1 pass. MLP - - activate: ReLU - out_offset: 0x0000 - processors: 0x00000000ffffffff - operation: MLP - # Layer 4: 16 inputs --> 16 processors, 1 pass. MLP - - activate: ReLU - out_offset: 0x2000 - processors: 0x000000000000000f - operation: MLP - # Layer 5: 32 inputs --> 32 processors, 1 pass. MLP - - activate: ReLU - out_offset: 0x4000 - processors: 0x00000000ffffffff - operation: MLP - # Layer 6: 96 inputs --> 48 processors, 2 passes. MLP - - pad: 0 - activate: None - out_offset: 0x0000 - processors: 0x0000ffffffffffff - operation: MLP +--- +arch: ai85autoencoder +dataset: SampleMotorDataLimerick +# Define layer parameters in order of the layer sequence +layers: + # Layer 0: 256 channels in --> 64 processors, 4 passes. Conv1D + - pad: 0 + activate: ReLU + out_offset: 0x2000 + processors: 0xffffffffffffffff + operation: Conv1d + kernel_size: 1 + data_format: HWC + # Layer 1: 128 channels in --> 64 processors, 2 passes. Conv1D + - pad: 0 + activate: ReLU + out_offset: 0x0000 + processors: 0xffffffffffffffff + kernel_size: 3 + operation: Conv1d + # Layer 2: 64 inputs --> 64 processors, 1 passes. MLP + - pad: 0 + activate: ReLU + out_offset: 0x2000 + processors: 0xffffffffffffffff + operation: MLP + # Layer 3: 32 inputs --> 32 processors, 1 pass. MLP + - activate: ReLU + out_offset: 0x0000 + processors: 0x00000000ffffffff + operation: MLP + # Layer 4: 16 inputs --> 16 processors, 1 pass. MLP + - activate: ReLU + out_offset: 0x2000 + processors: 0x000000000000000f + operation: MLP + # Layer 5: 32 inputs --> 32 processors, 1 pass. MLP + - activate: ReLU + out_offset: 0x4000 + processors: 0x00000000ffffffff + operation: MLP + # Layer 6: 96 inputs --> 48 processors, 2 passes. MLP + - pad: 0 + activate: None + out_offset: 0x0000 + processors: 0x0000ffffffffffff + operation: MLP