1 Star 10 Fork 6

ahqzy/onnx_convert

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
float16.py 29.32 KB
一键复制 编辑 原始数据 按行查看 历史
ahqzy 提交于 2022-12-26 14:57 . support preproc fp32->fp16
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###########################################################################
import itertools
import numpy as np
import onnx
import packaging.version as pv
from onnx import helper, numpy_helper
from onnx import onnx_pb as onnx_proto
def _npfloat16_to_int(np_list):
'''
Convert numpy float16 to python int.
:param np_list: numpy float16 list
:return int_list: python int list
'''
return [int(bin(_.view('H'))[2:].zfill(16), 2) for _ in np_list]
def convert_np_to_float16(np_array, min_positive_val=1e-7, max_finite_val=1e4):
'''
Convert float32 numpy array to float16 without changing sign or finiteness.
Positive values less than min_positive_val are mapped to min_positive_val.
Positive finite values greater than max_finite_val are mapped to max_finite_val.
Similar for negative values. NaN, 0, inf, and -inf are unchanged.
'''
def between(a, b, c):
return np.logical_and(a < b, b < c)
np_array = np.where(between(0, np_array, min_positive_val), min_positive_val, np_array)
np_array = np.where(between(-min_positive_val, np_array, 0), -min_positive_val, np_array)
np_array = np.where(between(max_finite_val, np_array, float('inf')), max_finite_val, np_array)
np_array = np.where(between(float('-inf'), np_array, -max_finite_val), -max_finite_val, np_array)
return np.float16(np_array)
def convert_tensor_float_to_float16(tensor, min_positive_val=1e-7, max_finite_val=1e4):
'''
Convert tensor float to float16.
:param tensor: TensorProto object
:return tensor_float16: converted TensorProto object
Example:
::
from onnxmltools.utils.float16_converter import convert_tensor_float_to_float16
new_tensor = convert_tensor_float_to_float16(tensor)
'''
if not isinstance(tensor, onnx_proto.TensorProto):
raise ValueError('Expected input type is an ONNX TensorProto but got %s' % type(tensor))
if tensor.data_type == onnx_proto.TensorProto.FLOAT:
tensor.data_type = onnx_proto.TensorProto.FLOAT16
# convert float_data (float type) to float16 and write to int32_data
if tensor.float_data:
float16_data = convert_np_to_float16(np.array(tensor.float_data), min_positive_val, max_finite_val)
int_list = _npfloat16_to_int(float16_data)
tensor.int32_data[:] = int_list
tensor.float_data[:] = []
# convert raw_data (bytes type)
if tensor.raw_data:
# convert n.raw_data to float
float32_list = np.fromstring(tensor.raw_data, dtype='float32')
# convert float to float16
float16_list = convert_np_to_float16(float32_list, min_positive_val, max_finite_val)
# convert float16 to bytes and write back to raw_data
tensor.raw_data = float16_list.tostring()
return tensor
def make_value_info_from_tensor(tensor):
shape = numpy_helper.to_array(tensor).shape
return helper.make_tensor_value_info(tensor.name, tensor.data_type, shape)
DEFAULT_OP_BLOCK_LIST = ['ArrayFeatureExtractor', 'Binarizer', 'CastMap', 'CategoryMapper', 'DictVectorizer',
'FeatureVectorizer', 'Imputer', 'LabelEncoder', 'LinearClassifier', 'LinearRegressor',
'Normalizer', 'OneHotEncoder', 'SVMClassifier', 'SVMRegressor', 'Scaler',
'TreeEnsembleClassifier', 'TreeEnsembleRegressor', 'ZipMap', 'NonMaxSuppression', 'TopK',
'RoiAlign', 'ResizeTest', 'Range', 'CumSum', 'Upsample', 'DequantizeLinear', 'QuantizeLinear', 'PreProc']
def convert_float_to_float16_old(model, min_positive_val=1e-7, max_finite_val=1e4,
keep_io_types=False, disable_shape_infer=False,
op_block_list=None, node_block_list=None):
'''
Convert tensor float type in the ONNX ModelProto input to tensor float16.
:param model: ONNX ModelProto object
:param disable_shape_infer: Type/shape information is needed for conversion to work.
Set to True only if the model already has type/shape information for all tensors.
:return: converted ONNX ModelProto object
Examples:
::
Example 1: Convert ONNX ModelProto object:
from onnxmltools.utils.float16_converter import convert_float_to_float16
new_onnx_model = convert_float_to_float16(onnx_model)
Example 2: Convert ONNX model binary file:
from onnxmltools.utils.float16_converter import convert_float_to_float16
from onnxmltools.utils import load_model, save_model
onnx_model = load_model('model.onnx')
new_onnx_model = convert_float_to_float16(onnx_model)
save_model(new_onnx_model, 'new_model.onnx')
'''
func_infer_shape = None
if not disable_shape_infer and pv.Version(onnx.__version__) >= pv.Version('1.2'):
try:
from onnx.shape_inference import infer_shapes
func_infer_shape = infer_shapes
finally:
pass
if not isinstance(model, onnx_proto.ModelProto):
raise ValueError('Expected model type is an ONNX ModelProto but got %s' % type(model))
# create blocklists
if op_block_list is None:
op_block_list = DEFAULT_OP_BLOCK_LIST
if node_block_list is None:
node_block_list = []
op_block_list = set(op_block_list)
node_block_list = set(node_block_list)
# create a queue for BFS
queue = []
value_info_list = []
node_list = []
# type inference on input model
if func_infer_shape is not None:
model = func_infer_shape(model)
queue.append(model)
name_mapping = {}
graph_io_to_skip = set()
io_casts = set()
#qiuzy add
initializer = []
for init in model.graph.initializer:
initializer.append(init.name)
if keep_io_types:
for i, n in enumerate(model.graph.input):
if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT:
if n.name in initializer:
continue
output_name = 'graph_input_cast_' + str(i)
name_mapping[n.name] = output_name
graph_io_to_skip.add(n.name)
node_name = 'graph_input_cast' + str(i)
new_value_info = model.graph.value_info.add()
new_value_info.CopyFrom(n)
new_value_info.name = output_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16
# add Cast node (from tensor(float) to tensor(float16) after graph input
new_node = [helper.make_node('Cast', [n.name], [output_name], to=10, name=node_name)]
model.graph.node.extend(new_node)
value_info_list.append(new_value_info)
io_casts.add(node_name)
for i, n in enumerate(model.graph.output):
if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT:
input_name = 'graph_output_cast_' + str(i)
name_mapping[n.name] = input_name
graph_io_to_skip.add(n.name)
node_name = 'graph_output_cast' + str(i)
# add Cast node (from tensor(float16) to tensor(float) before graph output
new_value_info = model.graph.value_info.add()
new_value_info.CopyFrom(n)
new_value_info.name = input_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16
new_node = [helper.make_node('Cast', [input_name], [n.name], to=1, name=node_name)]
model.graph.node.extend(new_node)
value_info_list.append(new_value_info)
io_casts.add(node_name)
while queue:
next_level = []
for q in queue:
# if q is model, push q.graph (GraphProto)
if isinstance(q, onnx_proto.ModelProto):
next_level.append(q.graph)
# if q is model.graph, push q.node.attribute (AttributeProto)
if isinstance(q, onnx_proto.GraphProto):
for n in q.node:
# if n is in the block list (doesn't support float16), no conversion for the node,
# and save the node for further processing
if n.name in io_casts:
continue
for i in range(len(n.input)):
if n.input[i] in name_mapping:
n.input[i] = name_mapping[n.input[i]]
for i in range(len(n.output)):
if n.output[i] in name_mapping:
n.output[i] = name_mapping[n.output[i]]
# don't add the attr into next_level for the node in node_keep_data_type_list
# so it will not be converted to float16
if n.op_type in op_block_list or n.name in node_block_list:
node_list.append(n)
else:
if n.op_type == 'Cast':
for attr in n.attribute:
if attr.name == 'to' and attr.i == 1:
attr.i = 10
break
for attr in n.attribute:
next_level.append(attr)
# if q is model.graph.node.attribute, push q.g and q.graphs (GraphProto)
# and process node.attribute.t and node.attribute.tensors (TensorProto)
if isinstance(q, onnx_proto.AttributeProto):
next_level.append(q.g)
for n in q.graphs:
next_level.append(n)
q.t.CopyFrom(convert_tensor_float_to_float16(q.t, min_positive_val, max_finite_val))
for n in q.tensors:
n = convert_tensor_float_to_float16(n, min_positive_val, max_finite_val)
# if q is graph, process graph.initializer(TensorProto), input, output and value_info (ValueInfoProto)
if isinstance(q, onnx_proto.GraphProto):
for n in q.initializer: # TensorProto type
if n.data_type == onnx_proto.TensorProto.FLOAT:
n = convert_tensor_float_to_float16(n, min_positive_val, max_finite_val)
value_info_list.append(make_value_info_from_tensor(n))
# for all ValueInfoProto with tensor(float) type in input, output and value_info, convert them to
# tensor(float16) except map and seq(map). And save them in value_info_list for further processing
for n in itertools.chain(q.input, q.output, q.value_info):
if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT:
if n.name not in graph_io_to_skip:
n.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16
value_info_list.append(n)
queue = next_level
# process the nodes in block list that doesn't support tensor(float16)
for node in node_list:
# if input's name is in the value_info_list meaning input is tensor(float16) type,
# insert a float16 to float Cast node before the node,
# change current node's input name and create new value_info for the new name
for i in range(len(node.input)):
input = node.input[i]
for value_info in value_info_list:
if input == value_info.name:
# create new value_info for current node's new input name
new_value_info = model.graph.value_info.add()
new_value_info.CopyFrom(value_info)
output_name = node.name + '_input_cast_' + str(i)
new_value_info.name = output_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT
# add Cast node (from tensor(float16) to tensor(float) before current node
node_name = node.name + '_input_cast' + str(i)
new_node = [helper.make_node('Cast', [input], [output_name], to=1, name=node_name)]
model.graph.node.extend(new_node)
# change current node's input name
node.input[i] = output_name
break
# if output's name is in the value_info_list meaning output is tensor(float16) type, insert a float to
# float16 Cast node after the node, change current node's output name and create new value_info for the new name
for i in range(len(node.output)):
output = node.output[i]
for value_info in value_info_list:
if output == value_info.name:
# create new value_info for current node's new output
new_value_info = model.graph.value_info.add()
new_value_info.CopyFrom(value_info)
input_name = node.name + '_output_cast_' + str(i)
new_value_info.name = input_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT
# add Cast node (from tensor(float) to tensor(float16) after current node
node_name = node.name + '_output_cast' + str(i)
new_node = [helper.make_node('Cast', [input_name], [output], to=10, name=node_name)]
model.graph.node.extend(new_node)
# change current node's input name
node.output[i] = input_name
break
return model
#qiuzy
def get_resize_param(model):
resize_param_list = []
for n in model.graph.node:
if n.op_type == 'Resize':
for in_ in n.input[1:]:
if in_ not in resize_param_list:
resize_param_list.append(in_)
return resize_param_list
def get_layernorm_param(model):
layernorm_input1_list = []
'''
for n in model.graph.node:
if n.op_type == 'LayerNorm':
input_1 = n.input[1]
input_2 = n.input[2]
if input_1 not in layernorm_input1_list:
layernorm_input1_list.append(input_1)
if input_2 not in layernorm_input1_list:
layernorm_input1_list.append(input_2)
'''
return layernorm_input1_list
preproc_filter_list = ['const_std_r', 'const_std_g', 'const_std_b']
def convert_float_to_float16(model, min_positive_val=1e-7, max_finite_val=1e4,
keep_io_types=False, disable_shape_infer=False,
op_block_list=None, node_block_list=None):
'''
Convert tensor float type in the ONNX ModelProto input to tensor float16.
:param model: ONNX ModelProto object
:param disable_shape_infer: Type/shape information is needed for conversion to work.
Set to True only if the model already has type/shape information for all tensors.
:return: converted ONNX ModelProto object
Examples:
::
Example 1: Convert ONNX ModelProto object:
from onnxmltools.utils.float16_converter import convert_float_to_float16
new_onnx_model = convert_float_to_float16(onnx_model)
Example 2: Convert ONNX model binary file:
from onnxmltools.utils.float16_converter import convert_float_to_float16
from onnxmltools.utils import load_model, save_model
onnx_model = load_model('model.onnx')
new_onnx_model = convert_float_to_float16(onnx_model)
save_model(new_onnx_model, 'new_model.onnx')
'''
resize_param_list = get_resize_param(model)
ln_input1_list = get_layernorm_param(model)
func_infer_shape = None
if not disable_shape_infer and onnx.__version__ >= '1.2':
try:
from onnx.shape_inference import infer_shapes
func_infer_shape = infer_shapes
finally:
pass
if not isinstance(model, onnx_proto.ModelProto):
raise ValueError('Expected model type is an ONNX ModelProto but got %s' % type(model))
# create blocklists
if op_block_list is None:
op_block_list = DEFAULT_OP_BLOCK_LIST
if node_block_list is None:
node_block_list = []
op_block_list = set(op_block_list)
node_block_list = set(node_block_list)
# create a queue for BFS
queue = []
value_info_list = []
node_list = []
# type inference on input model
if func_infer_shape is not None:
model = func_infer_shape(model)
queue.append(model)
name_mapping = {}
graph_io_to_skip = set()
io_casts = set()
#qiuzy add
attributes_black_list = ['coordinate_transformation_mode', 'cubic_coeff_a', 'mode', 'nearest_mode']
initializer = []
for init in model.graph.initializer:
initializer.append(init.name)
if keep_io_types:
for i, n in enumerate(model.graph.input):
if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT:
if n.name in initializer:
continue
output_name = 'graph_input_cast_' + str(i)
name_mapping[n.name] = output_name
graph_io_to_skip.add(n.name)
node_name = 'graph_input_cast' + str(i)
new_value_info = model.graph.value_info.add()
new_value_info.CopyFrom(n)
new_value_info.name = output_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16
# add Cast node (from tensor(float) to tensor(float16) after graph input
new_node = [helper.make_node('Cast', [n.name], [output_name], to=10, name=node_name)]
model.graph.node.extend(new_node)
value_info_list.append(new_value_info)
io_casts.add(node_name)
for i, n in enumerate(model.graph.output):
if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT:
input_name = 'graph_output_cast_' + str(i)
name_mapping[n.name] = input_name
graph_io_to_skip.add(n.name)
node_name = 'graph_output_cast' + str(i)
# add Cast node (from tensor(float16) to tensor(float) before graph output
new_value_info = model.graph.value_info.add()
new_value_info.CopyFrom(n)
new_value_info.name = input_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16
new_node = [helper.make_node('Cast', [input_name], [n.name], to=1, name=node_name)]
model.graph.node.extend(new_node)
value_info_list.append(new_value_info)
io_casts.add(node_name)
while queue:
next_level = []
for q in queue:
# if q is model, push q.graph (GraphProto)
if isinstance(q, onnx_proto.ModelProto):
next_level.append(q.graph)
# if q is model.graph, push q.node.attribute (AttributeProto)
if isinstance(q, onnx_proto.GraphProto):
for n in q.node:
# if n is in the block list (doesn't support float16), no conversion for the node,
# and save the node for further processing
if n.name in io_casts:
continue
for i in range(len(n.input)):
if n.input[i] in name_mapping:
n.input[i] = name_mapping[n.input[i]]
for i in range(len(n.output)):
if n.output[i] in name_mapping:
n.output[i] = name_mapping[n.output[i]]
if n.op_type in op_block_list or n.name in node_block_list:
node_list.append(n)
else:
if n.op_type == 'Cast':
for attr in n.attribute:
if attr.name == 'to' and attr.i == 1:
attr.i = 10
break
for attr in n.attribute:
if attr.name not in attributes_black_list:
next_level.append(attr)
# if q is model.graph.node.attribute, push q.g and q.graphs (GraphProto)
# and process node.attribute.t and node.attribute.tensors (TensorProto)
if isinstance(q, onnx_proto.AttributeProto):
next_level.append(q.g)
for n in q.graphs:
next_level.append(n)
q.t.CopyFrom(convert_tensor_float_to_float16(q.t, min_positive_val, max_finite_val))
for n in q.tensors:
n = convert_tensor_float_to_float16(n, min_positive_val, max_finite_val)
# if q is graph, process graph.initializer(TensorProto), input, output and value_info (ValueInfoProto)
if isinstance(q, onnx_proto.GraphProto):
for n in q.initializer: # TensorProto type
if n.name not in resize_param_list:
if n.data_type == onnx_proto.TensorProto.FLOAT and n.name not in preproc_filter_list and n.name not in ln_input1_list:
n = convert_tensor_float_to_float16(n, min_positive_val, max_finite_val)
value_info_list.append(make_value_info_from_tensor(n))
# for all ValueInfoProto with tensor(float) type in input, output and value_info, convert them to
# tensor(float16) except map and seq(map). And save them in value_info_list for further processing
for n in itertools.chain(q.input, q.output, q.value_info):
if n.type.tensor_type.elem_type == onnx_proto.TensorProto.FLOAT and n.name not in preproc_filter_list:
if n.name not in graph_io_to_skip and n.name not in resize_param_list and n.name not in ln_input1_list:
n.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT16
value_info_list.append(n)
queue = next_level
# process the nodes in block list that doesn't support tensor(float16)
for node in node_list:
# if input's name is in the value_info_list meaning input is tensor(float16) type,
# insert a float16 to float Cast node before the node,
# change current node's input name and create new value_info for the new name
flag = False
for i in range(len(node.input)):
input = node.input[i]
for value_info in value_info_list:
if input == value_info.name:
# create new value_info for current node's new input name
new_value_info = model.graph.value_info.add()
new_value_info.CopyFrom(value_info)
output_name = node.name + '_input_cast_' + str(i)
new_value_info.name = output_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT
# add Cast node (from tensor(float16) to tensor(float) before current node
node_name = node.name + '_input_cast' + str(i)
new_node = [helper.make_node('Cast', [input], [output_name], to=1, name=node_name)]
model.graph.node.extend(new_node)
# change current node's input name
node.input[i] = output_name
flag = True
break
if flag:
flag = False
continue
record_list = []
# 新加入部分
if node.op_type == "ResizeTest":
node_name = node.name + '_input_cast_k' + str(i)
for n in model.graph.node:
if n in record_list:
continue
# if n.op_type == "Cast":
# continue
for j, output_name in enumerate(n.output):
if output_name == node.input[i]:# and "Cast" not in node.input[i] :
if node.op_type == "ResizeTest" and len(node.input) > 3 and i == 3:
continue
new_node = [helper.make_node('Cast', ['Cast_in_32_' + input], ['Cast_out_32_' + input], to=1, name=node_name)]
model.graph.node.extend(new_node)
record_list.append(new_node[0])
for n2 in model.graph.node:
for k in range(len(n2.input)):
if n2.input[k] == input:
n2.input[k] = 'Cast_out_32_' + input
for k in range(len(n2.output)):
if n2.output[k] == input:
n2.output[k] = 'Cast_in_32_' + input
# if output's name is in the value_info_list meaning output is tensor(float16) type, insert a float to
# float16 Cast node after the node, change current node's output name and create new value_info for the new name
for i in range(len(node.output)):
output = node.output[i]
for value_info in value_info_list:
if output == value_info.name:
# create new value_info for current node's new output
new_value_info = model.graph.value_info.add()
new_value_info.CopyFrom(value_info)
input_name = node.name + '_output_cast_' + str(i)
new_value_info.name = input_name
new_value_info.type.tensor_type.elem_type = onnx_proto.TensorProto.FLOAT
# add Cast node (from tensor(float) to tensor(float16) after current node
node_name = node.name + '_output_cast' + str(i)
new_node = [helper.make_node('Cast', [input_name], [output], to=10, name=node_name)]
model.graph.node.extend(new_node)
# change current node's input name
node.output[i] = input_name
flag = True
break
if flag:
flag =False
continue
record_list = []
# 新加入部分
if node.op_type == "ResizeTest":
node_name = node.name + '_output_cast_k' + str(i)
for n in model.graph.node:
# if n.op_type == "Cast":
# continue
if n in record_list:
continue
for j, input_name in enumerate(n.input):
if input_name == output:# and "Cast" not in output:
new_node = [helper.make_node('Cast', ['Cast_in_16_' + output], ['Cast_out_16_'+ output], to=10, name=node_name)]
model.graph.node.extend(new_node)
record_list.append(new_node[0])
for n2 in model.graph.node:
for k in range(len(n2.output)):
if n2.output[k] == output:
n2.output[k] = 'Cast_in_16_' + output
for k in range(len(n2.input)):
if n2.input[k] == output:
n2.input[k] = 'Cast_out_16_' + output
for n in model.graph.node:
if n.op_type not in op_block_list and n.op_type != "Cast":
for id, name in enumerate(n.input):
if "Cast_out_32_" in name:
n.input[id] = name.replace('out','in')
return model
def convert_float_to_float16_model_path(model_path, min_positive_val=1e-7, max_finite_val=1e4, keep_io_types=False):
'''
Convert tensor float type in the ONNX Model to tensor float16.
*It is to fix an issue that infer_shapes func cannot be used to infer >2GB models.
*But this function can be applied to all model sizes.
:param model_path: ONNX Model path
:return: converted ONNX ModelProto object
Examples
::
#Convert to ONNX ModelProto object and save model binary file:
from onnxmltools.utils.float16_converter import convert_float_to_float16_model_path
new_onnx_model = convert_float_to_float16_model_path('model.onnx')
onnx.save(new_onnx_model, 'new_model.onnx')
'''
disable_shape_infer = False
if pv.Version(onnx.__version__) >= pv.Version('1.8'):
try:
# infer_shapes_path can be applied to all model sizes
from onnx.shape_inference import infer_shapes_path
import tempfile
import os
# shape_infer_model_path should be in the same folder of model_path
with tempfile.NamedTemporaryFile(dir=os.path.dirname(model_path)) as tmpfile:
shape_infer_model_path = tmpfile.name
infer_shapes_path(model_path, shape_infer_model_path)
model = onnx.load(shape_infer_model_path)
disable_shape_infer = True
finally:
pass
if not disable_shape_infer:
model = onnx.load(model_path)
return convert_float_to_float16(model, min_positive_val, max_finite_val, keep_io_types, disable_shape_infer)
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Python
1
https://gitee.com/evan-q/onnx_convert.git
git@gitee.com:evan-q/onnx_convert.git
evan-q
onnx_convert
onnx_convert
master

搜索帮助