From 18aa16baf9498bf6abbc42afdf03a7113f059946 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Fri, 8 Nov 2024 08:56:57 -0500 Subject: [PATCH 01/11] adapt constant layers in pytorch to work with Constant class introduced with QONNX parser --- hls4ml/converters/pytorch/core.py | 16 +++++++++ hls4ml/converters/pytorch_to_hls.py | 51 +++++++++++++++++++++++++---- 2 files changed, 61 insertions(+), 6 deletions(-) diff --git a/hls4ml/converters/pytorch/core.py b/hls4ml/converters/pytorch/core.py index 2c05b7501f..994446d56c 100644 --- a/hls4ml/converters/pytorch/core.py +++ b/hls4ml/converters/pytorch/core.py @@ -1,5 +1,21 @@ from hls4ml.converters.pytorch_to_hls import pytorch_handler +import numpy as np +@pytorch_handler('Constant') +#def parse_constant_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): +def parse_constant_layer(operation, layer_name, node): + assert 'Constant' in operation + + layer = {} + + layer['class_name'] = 'Constant' + layer['name'] = layer_name + + constant = np.array(node._args) + layer['value'] = constant + output_shape = constant.shape + + return layer, output_shape @pytorch_handler('Linear') def parse_linear_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): diff --git a/hls4ml/converters/pytorch_to_hls.py b/hls4ml/converters/pytorch_to_hls.py index 79ca1fa5c6..c9508e2d23 100644 --- a/hls4ml/converters/pytorch_to_hls.py +++ b/hls4ml/converters/pytorch_to_hls.py @@ -1,4 +1,5 @@ import torch +import numpy as np from hls4ml.model import ModelGraph @@ -159,6 +160,31 @@ def parse_pytorch_model(config, verbose=True): n_inputs = 0 + print(traced_model.graph) + + # check for constant nodes + merge_layers = ['add','mul','sub','fmin','fmax'] + i=0 # count number of consts and use it in the name + for node in traced_model.graph.nodes: + if node.name in merge_layers: + for arg in node.args: + if np.isscalar(arg): + # add an input node with the constant value + new_node = traced_model.graph.placeholder( + name='const_'+str(i), + type_expr=torch.Tensor, + default_value=arg + ) + node.prepend(new_node) + node.update_arg(1,new_node) + i += 1 + + print(traced_model.graph) + #import pdb; breakpoint() + traced_model.graph.lint() + + + for node in traced_model.graph.nodes: if node.op == 'call_module': # modules that are part of a torch.nn.Sequential with name 'name' have target names 'name.x', @@ -249,13 +275,26 @@ def parse_pytorch_model(config, verbose=True): input_layer = {} input_layer['name'] = node.name - input_layer['class_name'] = 'InputLayer' - input_layer['input_shape'] = list(input_shapes[n_inputs][1:]) - layer_list.insert(n_inputs, input_layer) - output_shapes[input_layer['name']] = list(input_shapes[n_inputs]) - input_layers.append(input_layer['name']) - n_inputs += 1 + if 'const' in node.name: + pytorch_class = "Constant" + layer, output_shape = layer_handlers[pytorch_class](pytorch_class, node.name, node) + + layer_list.append(layer) + + assert output_shape is not None + output_shapes[layer['name']] = output_shape + + else: + + input_layer['class_name'] = 'InputLayer' + input_layer['input_shape'] = list(input_shapes[n_inputs][1:]) + layer_list.insert(n_inputs, input_layer) + + output_shapes[input_layer['name']] = list(input_shapes[n_inputs]) + + input_layers.append(input_layer['name']) + n_inputs += 1 layer_counter += 1 From c50c09e1e13e42dec39350130a0c80a9427d2c93 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Tue, 12 Nov 2024 10:45:13 -0500 Subject: [PATCH 02/11] apply pre-commit --- hls4ml/converters/pytorch/core.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/hls4ml/converters/pytorch/core.py b/hls4ml/converters/pytorch/core.py index 994446d56c..29fbf69394 100644 --- a/hls4ml/converters/pytorch/core.py +++ b/hls4ml/converters/pytorch/core.py @@ -1,8 +1,10 @@ -from hls4ml.converters.pytorch_to_hls import pytorch_handler import numpy as np +from hls4ml.converters.pytorch_to_hls import pytorch_handler + + @pytorch_handler('Constant') -#def parse_constant_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): +# def parse_constant_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): def parse_constant_layer(operation, layer_name, node): assert 'Constant' in operation @@ -17,6 +19,7 @@ def parse_constant_layer(operation, layer_name, node): return layer, output_shape + @pytorch_handler('Linear') def parse_linear_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): assert 'Linear' in operation From 3413cb5f143d5b1efb068f56e874328049b8cd15 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Tue, 12 Nov 2024 10:49:07 -0500 Subject: [PATCH 03/11] apply pre-commit --- hls4ml/converters/pytorch_to_hls.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/hls4ml/converters/pytorch_to_hls.py b/hls4ml/converters/pytorch_to_hls.py index c9508e2d23..a8b8c2f4a2 100644 --- a/hls4ml/converters/pytorch_to_hls.py +++ b/hls4ml/converters/pytorch_to_hls.py @@ -1,5 +1,5 @@ -import torch import numpy as np +import torch from hls4ml.model import ModelGraph @@ -162,29 +162,25 @@ def parse_pytorch_model(config, verbose=True): print(traced_model.graph) - # check for constant nodes - merge_layers = ['add','mul','sub','fmin','fmax'] - i=0 # count number of consts and use it in the name + # check for constant nodes + merge_layers = ['add', 'mul', 'sub', 'fmin', 'fmax'] + i = 0 # count number of consts and use it in the name for node in traced_model.graph.nodes: if node.name in merge_layers: for arg in node.args: if np.isscalar(arg): # add an input node with the constant value new_node = traced_model.graph.placeholder( - name='const_'+str(i), - type_expr=torch.Tensor, - default_value=arg + name='const_' + str(i), type_expr=torch.Tensor, default_value=arg ) node.prepend(new_node) - node.update_arg(1,new_node) + node.update_arg(1, new_node) i += 1 print(traced_model.graph) - #import pdb; breakpoint() + # import pdb; breakpoint() traced_model.graph.lint() - - for node in traced_model.graph.nodes: if node.op == 'call_module': # modules that are part of a torch.nn.Sequential with name 'name' have target names 'name.x', @@ -278,12 +274,12 @@ def parse_pytorch_model(config, verbose=True): if 'const' in node.name: pytorch_class = "Constant" - layer, output_shape = layer_handlers[pytorch_class](pytorch_class, node.name, node) - + layer, output_shape = layer_handlers[pytorch_class](pytorch_class, node.name, node) + layer_list.append(layer) - + assert output_shape is not None - output_shapes[layer['name']] = output_shape + output_shapes[layer['name']] = output_shape else: @@ -292,7 +288,7 @@ def parse_pytorch_model(config, verbose=True): layer_list.insert(n_inputs, input_layer) output_shapes[input_layer['name']] = list(input_shapes[n_inputs]) - + input_layers.append(input_layer['name']) n_inputs += 1 From b39fedaa298b133861ed0c9ea6d0e2ff63063237 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Tue, 12 Nov 2024 10:51:53 -0500 Subject: [PATCH 04/11] clean print statements and commented code --- hls4ml/converters/pytorch/core.py | 1 - hls4ml/converters/pytorch_to_hls.py | 4 ---- 2 files changed, 5 deletions(-) diff --git a/hls4ml/converters/pytorch/core.py b/hls4ml/converters/pytorch/core.py index 29fbf69394..7cc20fc282 100644 --- a/hls4ml/converters/pytorch/core.py +++ b/hls4ml/converters/pytorch/core.py @@ -4,7 +4,6 @@ @pytorch_handler('Constant') -# def parse_constant_layer(operation, layer_name, input_names, input_shapes, node, class_object, data_reader, config): def parse_constant_layer(operation, layer_name, node): assert 'Constant' in operation diff --git a/hls4ml/converters/pytorch_to_hls.py b/hls4ml/converters/pytorch_to_hls.py index a8b8c2f4a2..06f7b95364 100644 --- a/hls4ml/converters/pytorch_to_hls.py +++ b/hls4ml/converters/pytorch_to_hls.py @@ -160,8 +160,6 @@ def parse_pytorch_model(config, verbose=True): n_inputs = 0 - print(traced_model.graph) - # check for constant nodes merge_layers = ['add', 'mul', 'sub', 'fmin', 'fmax'] i = 0 # count number of consts and use it in the name @@ -177,8 +175,6 @@ def parse_pytorch_model(config, verbose=True): node.update_arg(1, new_node) i += 1 - print(traced_model.graph) - # import pdb; breakpoint() traced_model.graph.lint() for node in traced_model.graph.nodes: From 92d65d9c0cf18b0eacd4942612267d5f692a32fc Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Tue, 12 Nov 2024 11:09:21 -0500 Subject: [PATCH 05/11] adhere to quote style --- hls4ml/converters/pytorch_to_hls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hls4ml/converters/pytorch_to_hls.py b/hls4ml/converters/pytorch_to_hls.py index 06f7b95364..8af2830365 100644 --- a/hls4ml/converters/pytorch_to_hls.py +++ b/hls4ml/converters/pytorch_to_hls.py @@ -269,7 +269,7 @@ def parse_pytorch_model(config, verbose=True): input_layer['name'] = node.name if 'const' in node.name: - pytorch_class = "Constant" + pytorch_class = 'Constant' layer, output_shape = layer_handlers[pytorch_class](pytorch_class, node.name, node) layer_list.append(layer) From 68b4c4fec42a7a115573ffa2a1a5cc439e052c0a Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Tue, 12 Nov 2024 11:46:59 -0500 Subject: [PATCH 06/11] fix name matching for multiple instances of mul etc --- hls4ml/converters/pytorch/core.py | 1 + hls4ml/converters/pytorch_to_hls.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hls4ml/converters/pytorch/core.py b/hls4ml/converters/pytorch/core.py index 7cc20fc282..57c42f401f 100644 --- a/hls4ml/converters/pytorch/core.py +++ b/hls4ml/converters/pytorch/core.py @@ -8,6 +8,7 @@ def parse_constant_layer(operation, layer_name, node): assert 'Constant' in operation layer = {} + layer['inputs'] = [] layer['class_name'] = 'Constant' layer['name'] = layer_name diff --git a/hls4ml/converters/pytorch_to_hls.py b/hls4ml/converters/pytorch_to_hls.py index 8af2830365..f3615200d0 100644 --- a/hls4ml/converters/pytorch_to_hls.py +++ b/hls4ml/converters/pytorch_to_hls.py @@ -164,7 +164,7 @@ def parse_pytorch_model(config, verbose=True): merge_layers = ['add', 'mul', 'sub', 'fmin', 'fmax'] i = 0 # count number of consts and use it in the name for node in traced_model.graph.nodes: - if node.name in merge_layers: + if node.name.split("_")[0] in merge_layers: for arg in node.args: if np.isscalar(arg): # add an input node with the constant value From e1aeaa55cdf070b6f980659924c71a96ad3ba763 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 09:49:40 -0500 Subject: [PATCH 07/11] [pre-commit.ci] pre-commit autoupdate (#1159) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/asottile/pyupgrade: v3.19.0 → v3.19.1](https://github.com/asottile/pyupgrade/compare/v3.19.0...v3.19.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0601a84b2d..d45ffbdd27 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -30,7 +30,7 @@ repos: args: ["--profile", "black", --line-length=125] - repo: https://github.com/asottile/pyupgrade - rev: v3.19.0 + rev: v3.19.1 hooks: - id: pyupgrade args: ["--py36-plus"] From f7038f7594040b24f42610f94a67d27c6523fad0 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Tue, 7 Jan 2025 13:03:28 -0500 Subject: [PATCH 08/11] fix transposes before reshape layers and catch 3D transposes in io_stream --- hls4ml/model/optimizer/passes/convert_to_channels_last.py | 7 ++++++- hls4ml/utils/config.py | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hls4ml/model/optimizer/passes/convert_to_channels_last.py b/hls4ml/model/optimizer/passes/convert_to_channels_last.py index 0b5f12c008..606f42e54b 100644 --- a/hls4ml/model/optimizer/passes/convert_to_channels_last.py +++ b/hls4ml/model/optimizer/passes/convert_to_channels_last.py @@ -97,12 +97,17 @@ def transform(self, model, node): if ( isinstance(node, Reshape) and len(node.attributes['target_shape']) == 1 - and not model.config.config['HLSConfig']['Model']['ChannelsLastConversion'] == "internal" + and not model.config.config['HLSConfig']['Model']['ChannelsLastConversion'] == "off" ): previous_node = node.get_input_node(node.inputs[0]) input = previous_node.name outshape = previous_node.get_output_variable().shape + if (model.config.config['IOType'] == 'io_stream') and len(outshape) == 3: + raise Exception( + 'No 3D transpose available in io_stream, this model cannot be converted to channels-last' + ) + if len(outshape) == 2: attributes = {'perm': [1, 0]} else: diff --git a/hls4ml/utils/config.py b/hls4ml/utils/config.py index e450084095..1db8e3c731 100644 --- a/hls4ml/utils/config.py +++ b/hls4ml/utils/config.py @@ -283,7 +283,7 @@ def config_from_pytorch_model( default_precision='ap_fixed<16,6>', default_reuse_factor=1, channels_last_conversion='full', - transpose_outputs=True, + transpose_outputs=False, max_precision=None, ): """Create an HLS conversion config given the PyTorch model. From 31219e35912364d5ad8e69552c38ee2305b5fc0d Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Tue, 7 Jan 2025 14:26:07 -0500 Subject: [PATCH 09/11] update pytest to new default setting --- test/pytest/test_pytorch_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/pytest/test_pytorch_api.py b/test/pytest/test_pytorch_api.py index 3056bd13f8..3de0b3f193 100644 --- a/test/pytest/test_pytorch_api.py +++ b/test/pytest/test_pytorch_api.py @@ -498,7 +498,7 @@ def test_pooling(pooling, padds, backend): model.eval() pytorch_prediction = model(torch.Tensor(X_input)).detach().numpy() - config = config_from_pytorch_model(model, input_shape_forHLS) + config = config_from_pytorch_model(model, input_shape_forHLS, transpose_outputs=True) output_dir = str(test_root_path / f'hls4mlprj_pytorch_api_pooling_{pooling.__name__}_padds_{padds}_backend_{backend}') hls_model = convert_from_pytorch_model(model, hls_config=config, output_dir=output_dir, backend=backend) hls_model.compile() From 4b7e12de87f37f490b93efa436f0333416231684 Mon Sep 17 00:00:00 2001 From: Benjamin Ramhorst <59868635+bo3z@users.noreply.github.com> Date: Tue, 7 Jan 2025 20:31:42 +0100 Subject: [PATCH 10/11] Fix Vivado Accelerator missing partition factor variable (#1160) Co-authored-by: Jan-Frederik Schulte --- hls4ml/writer/vivado_accelerator_writer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/hls4ml/writer/vivado_accelerator_writer.py b/hls4ml/writer/vivado_accelerator_writer.py index cefa158e11..817847887d 100644 --- a/hls4ml/writer/vivado_accelerator_writer.py +++ b/hls4ml/writer/vivado_accelerator_writer.py @@ -394,6 +394,8 @@ def write_board_script(self, model): f.write('set clock_uncertainty {}\n'.format(model.config.get_config_value('ClockUncertainty', '12.5%'))) f.write('variable version\n') f.write('set version "{}"\n'.format(model.config.get_config_value('Version', '1.0.0'))) + f.write('variable maximum_size\n') + f.write('set maximum_size {}\n'.format(model.config.get_config_value('MaximumSize', '4096'))) if self.vivado_accelerator_config.get_interface() == 'axi_stream': in_bit, out_bit = self.vivado_accelerator_config.get_io_bitwidth() f.write(f'set bit_width_hls_output {in_bit}\n') From 21f517934bbf1f26b6409fc4a29dbd9bc57cc150 Mon Sep 17 00:00:00 2001 From: Jan-Frederik Schulte Date: Fri, 10 Jan 2025 08:35:51 -0500 Subject: [PATCH 11/11] style fix --- hls4ml/converters/pytorch_to_hls.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hls4ml/converters/pytorch_to_hls.py b/hls4ml/converters/pytorch_to_hls.py index f3615200d0..871026bc49 100644 --- a/hls4ml/converters/pytorch_to_hls.py +++ b/hls4ml/converters/pytorch_to_hls.py @@ -164,7 +164,7 @@ def parse_pytorch_model(config, verbose=True): merge_layers = ['add', 'mul', 'sub', 'fmin', 'fmax'] i = 0 # count number of consts and use it in the name for node in traced_model.graph.nodes: - if node.name.split("_")[0] in merge_layers: + if node.name.split('_')[0] in merge_layers: for arg in node.args: if np.isscalar(arg): # add an input node with the constant value