Skip to content

Commit

Permalink
[Linting] Run pre-commit on files
Browse files Browse the repository at this point in the history
  • Loading branch information
auphelia committed Aug 2, 2023
1 parent 5942839 commit a4c15df
Show file tree
Hide file tree
Showing 4 changed files with 13 additions and 44 deletions.
4 changes: 1 addition & 3 deletions src/finn/custom_op/fpgadataflow/fmpadding_pixel.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,9 +288,7 @@ def pragmas(self):
self.code_gen_dict["$PRAGMAS$"].append(
"#pragma HLS INTERFACE axis port=out name=out_" + self.hls_sname()
)
self.code_gen_dict["$PRAGMAS$"].append(
"#pragma HLS INTERFACE ap_ctrl_none port=return"
)
self.code_gen_dict["$PRAGMAS$"].append("#pragma HLS INTERFACE ap_ctrl_none port=return")

def execute_node(self, context, graph):
mode = self.get_nodeattr("exec_mode")
Expand Down
33 changes: 7 additions & 26 deletions src/finn/transformation/fpgadataflow/infer_pixel_padding_deconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,10 +28,7 @@ def apply(self, model):
idt = model.get_tensor_datatype(deconv_input)
odt = model.get_tensor_datatype(deconv_output)
if not idt.is_integer():
warnings.warn(
"%s : Input is not int. Can't infer PixelPaddingDeconv."
% n.name
)
warnings.warn("%s : Input is not int. Can't infer PixelPaddingDeconv." % n.name)
continue
# extract conv transpose parameters
k_h = get_by_name(n.attribute, "kernel_shape").ints[0]
Expand Down Expand Up @@ -86,13 +83,9 @@ def apply(self, model):
# Im2Col node belongs to a depthwise convolution
dw = False
if group == ifm_ch and ofm_ch == ifm_ch:
W_sparse = np.zeros(
(ifm_ch, ofm_ch, k_h, k_w)
) # (IFM, OFM, k_H, k_W)
W_sparse = np.zeros((ifm_ch, ofm_ch, k_h, k_w)) # (IFM, OFM, k_H, k_W)
for ch in range(ofm_ch):
W_sparse[ch][ch] = W_conv[ch][
0
] # W_conv = [IFM, OFM, k_H, k_W]
W_sparse[ch][ch] = W_conv[ch][0] # W_conv = [IFM, OFM, k_H, k_W]
W_conv = W_sparse.astype(np.float32)
# we need to store information of the
# sparsity of the weight matrix. For this
Expand Down Expand Up @@ -148,13 +141,7 @@ def apply(self, model):
padding = 0

# k_h=k_w==1: pointwise convolution, thus no im2col needed
if (
k_h == 1
and k_w == 1
and padding == 0
and stride_h == 1
and stride_w == 1
):
if k_h == 1 and k_w == 1 and padding == 0 and stride_h == 1 and stride_w == 1:
need_im2col = False

if need_im2col:
Expand Down Expand Up @@ -208,17 +195,13 @@ def apply(self, model):
stride=[1, 1],
kernel_size=[k_h, k_w],
pad_amount=conv_padding,
input_shape="(1,{},{},{})".format(
padded_odim_h, padded_odim_w, ifm_ch
),
input_shape="(1,{},{},{})".format(padded_odim_h, padded_odim_w, ifm_ch),
depthwise=dw,
dilations=dilation,
)

# do matmul
matmul_node = helper.make_node(
"MatMul", [matmul_input, weight_name], [matmul_out]
)
matmul_node = helper.make_node("MatMul", [matmul_input, weight_name], [matmul_out])
# NHWC -> NCHW
out_trans_node = helper.make_node(
"Transpose", [matmul_out], [deconv_output], perm=[0, 3, 1, 2]
Expand All @@ -237,8 +220,6 @@ def apply(self, model):
# remove old nodes
graph.node.remove(n)

model = model.transform(
InferConvInpGen(use_rtl_variant=self.use_convinpgen_rtl_variant)
)
model = model.transform(InferConvInpGen(use_rtl_variant=self.use_convinpgen_rtl_variant))
model = model.transform(InferQuantizedMatrixVectorActivation())
return (model, graph_modified)
4 changes: 1 addition & 3 deletions tests/brevitas/test_brevitas_deconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,9 +66,7 @@ def test_brevitas_QTransposeConv(ifm_ch, ofm_ch, mh, mw, padding, stride, kw, bi
bias=bias,
)
# outp = el(inp) # expects NCHW data format
export_qonnx(
b_deconv.cpu(), input_t=inp.cpu(), export_path=export_path, opset_version=11
)
export_qonnx(b_deconv.cpu(), input_t=inp.cpu(), export_path=export_path, opset_version=11)
model = ModelWrapper(export_path)
qonnx_cleanup(model)
model = model.transform(ConvertQONNXtoFINN())
Expand Down
16 changes: 4 additions & 12 deletions tests/fpgadataflow/test_fpgadataflow_deconv.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,7 @@ def set_up_reference_model(idt, wdt, k, idim, ifm_ch, ofm_ch, stride, padding):
idim_w,
],
)
outp = helper.make_tensor_value_info(
"outp", TensorProto.FLOAT, [1, ofm_ch, odim_h, odim_w]
)
outp = helper.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_ch, odim_h, odim_w])

W = helper.make_tensor_value_info("W", TensorProto.FLOAT, [ifm_ch, ofm_ch, k, k])

Expand Down Expand Up @@ -148,9 +146,7 @@ def test_fpgadataflow_deconv(idim, stride, ifm_ch, ofm_ch, simd, pe, k, padding)
else:
convinpgen_rtl = True

ref_model = set_up_reference_model(
idt, wdt, k, idim, ifm_ch, ofm_ch, stride, padding
)
ref_model = set_up_reference_model(idt, wdt, k, idim, ifm_ch, ofm_ch, stride, padding)

odim_h = (idim_h - 1) * stride_h - 2 * padding + (k - 1) + 1
odim_w = (idim_w - 1) * stride_w - 2 * padding + (k - 1) + 1
Expand Down Expand Up @@ -198,15 +194,11 @@ def test_fpgadataflow_deconv(idim, stride, ifm_ch, ofm_ch, simd, pe, k, padding)
dataflow_model_filename = sdp_node.get_nodeattr("model")
model = ModelWrapper(dataflow_model_filename)
model.save("after_partition.onnx")
model = model.transform(
CreateStitchedIP(test_fpga_part, target_clk_ns, vitis=False)
)
model = model.transform(CreateStitchedIP(test_fpga_part, target_clk_ns, vitis=False))
model = model.transform(PrepareRTLSim())
model = model.transform(GiveReadableTensorNames())
model = model.transform(SetExecMode("rtlsim"))
model.save("stitched_ip.onnx")
y_produced = oxe.execute_onnx(model, input_dict_tr)["global_out"].transpose(
0, 3, 1, 2
)
y_produced = oxe.execute_onnx(model, input_dict_tr)["global_out"].transpose(0, 3, 1, 2)
assert y_produced.shape == expected_oshape
assert (y_produced == y_expected).all()

0 comments on commit a4c15df

Please sign in to comment.