代码拉取完成,页面将自动刷新
同步操作将从 OpenV2X/hippocampus 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license
"""
Export a YOLOv5 PyTorch model to other formats. TensorFlow exports authored by https://github.com/zldrobit
Format | `export.py --include` | Model
--- | --- | ---
PyTorch | - | yolov5s.pt
TorchScript | `torchscript` | yolov5s.torchscript
ONNX | `onnx` | yolov5s.onnx
OpenVINO | `openvino` | yolov5s_openvino_model/
TensorRT | `engine` | yolov5s.engine
CoreML | `coreml` | yolov5s.mlmodel
TensorFlow SavedModel | `saved_model` | yolov5s_saved_model/
TensorFlow GraphDef | `pb` | yolov5s.pb
TensorFlow Lite | `tflite` | yolov5s.tflite
TensorFlow Edge TPU | `edgetpu` | yolov5s_edgetpu.tflite
TensorFlow.js | `tfjs` | yolov5s_web_model/
Usage:
$ python path/to/export.py --weights yolov5s.pt --include torchscript onnx openvino engine coreml tflite ...
Inference:
$ python path/to/detect.py --weights yolov5s.pt # PyTorch
yolov5s.torchscript # TorchScript
yolov5s.onnx # ONNX Runtime or OpenCV DNN with --dnn
yolov5s.xml # OpenVINO
yolov5s.engine # TensorRT
yolov5s.mlmodel # CoreML (MacOS-only)
yolov5s_saved_model # TensorFlow SavedModel
yolov5s.pb # TensorFlow GraphDef
yolov5s.tflite # TensorFlow Lite
yolov5s_edgetpu.tflite # TensorFlow Edge TPU
TensorFlow.js:
$ cd .. && git clone https://github.com/zldrobit/tfjs-yolov5-example.git && cd tfjs-yolov5-example
$ npm install
$ ln -s ../../yolov5/yolov5s_web_model public/yolov5s_web_model
$ npm start
"""
import argparse
import json
import os
import platform
import subprocess
import sys
import time
from pathlib import Path
import torch
import torch.nn as nn
from torch.utils.mobile_optimizer import optimize_for_mobile
FILE = Path(__file__).resolve()
ROOT = FILE.parents[0] # YOLOv5 root directory
if str(ROOT) not in sys.path:
sys.path.append(str(ROOT)) # add ROOT to PATH
ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative
from models.common import Conv
from models.experimental import attempt_load
from models.yolo import Detect
from utils.activations import SiLU
from utils.datasets import LoadImages
from utils.general import (
LOGGER,
check_dataset,
check_img_size,
check_requirements,
check_version,
colorstr,
file_size,
print_args,
url2file,
)
from utils.torch_utils import select_device
def export_torchscript(model, im, file, optimize, prefix=colorstr("TorchScript:")):
# YOLOv5 TorchScript model export
try:
LOGGER.info(f"\n{prefix} starting export with torch {torch.__version__}...")
f = file.with_suffix(".torchscript")
ts = torch.jit.trace(model, im, strict=False)
d = {"shape": im.shape, "stride": int(max(model.stride)), "names": model.names}
extra_files = {"config.txt": json.dumps(d)} # torch._C.ExtraFilesMap()
if optimize: # https://pytorch.org/tutorials/recipes/mobile_interpreter.html
optimize_for_mobile(ts)._save_for_lite_interpreter(str(f), _extra_files=extra_files)
else:
ts.save(str(f), _extra_files=extra_files)
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return f
except Exception as e:
LOGGER.info(f"{prefix} export failure: {e}")
def export_onnx(model, im, file, opset, train, dynamic, simplify, prefix=colorstr("ONNX:")):
# YOLOv5 ONNX export
try:
check_requirements(("onnx",))
import onnx
LOGGER.info(f"\n{prefix} starting export with onnx {onnx.__version__}...")
f = file.with_suffix(".onnx")
torch.onnx.export(
model,
im,
f,
verbose=False,
opset_version=opset,
training=torch.onnx.TrainingMode.TRAINING if train else torch.onnx.TrainingMode.EVAL,
do_constant_folding=not train,
input_names=["images"],
output_names=["output"],
dynamic_axes={
"images": {0: "batch", 2: "height", 3: "width"}, # shape(1,3,640,640)
"output": {0: "batch", 1: "anchors"}, # shape(1,25200,85)
}
if dynamic
else None,
)
# Checks
model_onnx = onnx.load(f) # load onnx model
onnx.checker.check_model(model_onnx) # check onnx model
# LOGGER.info(onnx.helper.printable_graph(model_onnx.graph)) # print
# Simplify
if simplify:
try:
check_requirements(("onnx-simplifier",))
import onnxsim
LOGGER.info(f"{prefix} simplifying with onnx-simplifier {onnxsim.__version__}...")
model_onnx, check = onnxsim.simplify(
model_onnx,
dynamic_input_shape=dynamic,
input_shapes={"images": list(im.shape)} if dynamic else None,
)
assert check, "assert check failed"
onnx.save(model_onnx, f)
except Exception as e:
LOGGER.info(f"{prefix} simplifier failure: {e}")
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return f
except Exception as e:
LOGGER.info(f"{prefix} export failure: {e}")
def export_openvino(model, im, file, prefix=colorstr("OpenVINO:")):
# YOLOv5 OpenVINO export
try:
check_requirements(
("openvino-dev",)
) # requires openvino-dev: https://pypi.org/project/openvino-dev/
import openvino.inference_engine as ie
LOGGER.info(f"\n{prefix} starting export with openvino {ie.__version__}...")
f = str(file).replace(".pt", "_openvino_model" + os.sep)
cmd = f"mo --input_model {file.with_suffix('.onnx')} --output_dir {f}"
subprocess.check_output(cmd, shell=True)
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return f
except Exception as e:
LOGGER.info(f"\n{prefix} export failure: {e}")
def export_coreml(model, im, file, prefix=colorstr("CoreML:")):
# YOLOv5 CoreML export
try:
check_requirements(("coremltools",))
import coremltools as ct
LOGGER.info(f"\n{prefix} starting export with coremltools {ct.__version__}...")
f = file.with_suffix(".mlmodel")
ts = torch.jit.trace(model, im, strict=False) # TorchScript model
ct_model = ct.convert(
ts, inputs=[ct.ImageType("image", shape=im.shape, scale=1 / 255, bias=[0, 0, 0])]
)
ct_model.save(f)
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return ct_model, f
except Exception as e:
LOGGER.info(f"\n{prefix} export failure: {e}")
return None, None
def export_engine(
model,
im,
file,
train,
half,
simplify,
workspace=4,
verbose=False,
prefix=colorstr("TensorRT:"),
):
# YOLOv5 TensorRT export https://developer.nvidia.com/tensorrt
try:
check_requirements(("tensorrt",))
import tensorrt as trt
if (
trt.__version__[0] == "7"
): # TensorRT 7 handling https://github.com/ultralytics/yolov5/issues/6012
grid = model.model[-1].anchor_grid
model.model[-1].anchor_grid = [a[..., :1, :1, :] for a in grid]
export_onnx(model, im, file, 12, train, False, simplify) # opset 12
model.model[-1].anchor_grid = grid
else: # TensorRT >= 8
check_version(trt.__version__, "8.0.0", hard=True) # require tensorrt>=8.0.0
export_onnx(model, im, file, 13, train, False, simplify) # opset 13
onnx = file.with_suffix(".onnx")
assert onnx.exists(), f"failed to export ONNX file: {onnx}"
LOGGER.info(f"\n{prefix} starting export with TensorRT {trt.__version__}...")
f = file.with_suffix(".engine") # TensorRT engine file
logger = trt.Logger(trt.Logger.INFO)
if verbose:
logger.min_severity = trt.Logger.Severity.VERBOSE
builder = trt.Builder(logger)
config = builder.create_builder_config()
config.max_workspace_size = workspace * 1 << 30
flag = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
network = builder.create_network(flag)
parser = trt.OnnxParser(network, logger)
if not parser.parse_from_file(str(onnx)):
raise RuntimeError(f"failed to load ONNX file: {onnx}")
inputs = [network.get_input(i) for i in range(network.num_inputs)]
outputs = [network.get_output(i) for i in range(network.num_outputs)]
LOGGER.info(f"{prefix} Network Description:")
for inp in inputs:
LOGGER.info(
f'{prefix}\tinput "{inp.name}" with shape {inp.shape} and dtype {inp.dtype}'
)
for out in outputs:
LOGGER.info(
f'{prefix}\toutput "{out.name}" with shape {out.shape} and dtype {out.dtype}'
)
half &= builder.platform_has_fast_fp16
LOGGER.info(f"{prefix} building FP{16 if half else 32} engine in {f}")
if half:
config.set_flag(trt.BuilderFlag.FP16)
with builder.build_engine(network, config) as engine, open(f, "wb") as t:
t.write(engine.serialize())
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return f
except Exception as e:
LOGGER.info(f"\n{prefix} export failure: {e}")
def export_saved_model(
model,
im,
file,
dynamic,
tf_nms=False,
agnostic_nms=False,
topk_per_class=100,
topk_all=100,
iou_thres=0.45,
conf_thres=0.25,
prefix=colorstr("TensorFlow SavedModel:"),
):
# YOLOv5 TensorFlow SavedModel export
try:
import tensorflow as tf
from tensorflow import keras
from models.tf import TFDetect, TFModel
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
f = str(file).replace(".pt", "_saved_model")
batch_size, ch, *imgsz = list(im.shape) # BCHW
tf_model = TFModel(cfg=model.yaml, model=model, nc=model.nc, imgsz=imgsz)
im = tf.zeros((batch_size, *imgsz, 3)) # BHWC order for TensorFlow
y = tf_model.predict(
im, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres
)
inputs = keras.Input(shape=(*imgsz, 3), batch_size=None if dynamic else batch_size)
outputs = tf_model.predict(
inputs, tf_nms, agnostic_nms, topk_per_class, topk_all, iou_thres, conf_thres
)
keras_model = keras.Model(inputs=inputs, outputs=outputs)
keras_model.trainable = False
keras_model.summary()
keras_model.save(f, save_format="tf")
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return keras_model, f
except Exception as e:
LOGGER.info(f"\n{prefix} export failure: {e}")
return None, None
def export_pb(keras_model, im, file, prefix=colorstr("TensorFlow GraphDef:")):
# YOLOv5 TensorFlow GraphDef *.pb export https://github.com/leimao/Frozen_Graph_TensorFlow
try:
import tensorflow as tf
from tensorflow.python.framework.convert_to_constants import (
convert_variables_to_constants_v2,
)
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
f = file.with_suffix(".pb")
m = tf.function(lambda x: keras_model(x)) # full model
m = m.get_concrete_function(
tf.TensorSpec(keras_model.inputs[0].shape, keras_model.inputs[0].dtype)
)
frozen_func = convert_variables_to_constants_v2(m)
frozen_func.graph.as_graph_def()
tf.io.write_graph(
graph_or_graph_def=frozen_func.graph, logdir=str(f.parent), name=f.name, as_text=False
)
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return f
except Exception as e:
LOGGER.info(f"\n{prefix} export failure: {e}")
def export_tflite(keras_model, im, file, int8, data, ncalib, prefix=colorstr("TensorFlow Lite:")):
# YOLOv5 TensorFlow Lite export
try:
import tensorflow as tf
LOGGER.info(f"\n{prefix} starting export with tensorflow {tf.__version__}...")
batch_size, ch, *imgsz = list(im.shape) # BCHW
f = str(file).replace(".pt", "-fp16.tflite")
converter = tf.lite.TFLiteConverter.from_keras_model(keras_model)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS]
converter.target_spec.supported_types = [tf.float16]
converter.optimizations = [tf.lite.Optimize.DEFAULT]
if int8:
from models.tf import representative_dataset_gen
dataset = LoadImages(
check_dataset(data)["train"], img_size=imgsz, auto=False
) # representative data
converter.representative_dataset = lambda: representative_dataset_gen(dataset, ncalib)
converter.target_spec.supported_ops = [tf.lite.OpsSet.TFLITE_BUILTINS_INT8]
converter.target_spec.supported_types = []
converter.inference_input_type = tf.uint8 # or tf.int8
converter.inference_output_type = tf.uint8 # or tf.int8
converter.experimental_new_quantizer = False
f = str(file).replace(".pt", "-int8.tflite")
tflite_model = converter.convert()
open(f, "wb").write(tflite_model)
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return f
except Exception as e:
LOGGER.info(f"\n{prefix} export failure: {e}")
def export_edgetpu(keras_model, im, file, prefix=colorstr("Edge TPU:")):
# YOLOv5 Edge TPU export https://coral.ai/docs/edgetpu/models-intro/
try:
cmd = "edgetpu_compiler --version"
help_url = "https://coral.ai/docs/edgetpu/compiler/"
assert platform.system() == "Linux", f"export only supported on Linux. See {help_url}"
if subprocess.run(cmd, shell=True).returncode != 0:
LOGGER.info(
f"\n{prefix} export requires Edge TPU compiler. Attempting install from {help_url}"
)
for c in [
"curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add -",
'echo "deb https://packages.cloud.google.com/apt coral-edgetpu-stable main" | sudo tee /etc/apt/sources.list.d/coral-edgetpu.list',
"sudo apt-get update",
"sudo apt-get install edgetpu-compiler",
]:
subprocess.run(c, shell=True, check=True)
ver = (
subprocess.run(cmd, shell=True, capture_output=True, check=True)
.stdout.decode()
.split()[-1]
)
LOGGER.info(f"\n{prefix} starting export with Edge TPU compiler {ver}...")
f = str(file).replace(".pt", "-int8_edgetpu.tflite") # Edge TPU model
f_tfl = str(file).replace(".pt", "-int8.tflite") # TFLite model
cmd = f"edgetpu_compiler -s {f_tfl}"
subprocess.run(cmd, shell=True, check=True)
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return f
except Exception as e:
LOGGER.info(f"\n{prefix} export failure: {e}")
def export_tfjs(keras_model, im, file, prefix=colorstr("TensorFlow.js:")):
# YOLOv5 TensorFlow.js export
try:
check_requirements(("tensorflowjs",))
import re
import tensorflowjs as tfjs
LOGGER.info(f"\n{prefix} starting export with tensorflowjs {tfjs.__version__}...")
f = str(file).replace(".pt", "_web_model") # js dir
f_pb = file.with_suffix(".pb") # *.pb path
f_json = f + "/model.json" # *.json path
cmd = (
f"tensorflowjs_converter --input_format=tf_frozen_model "
f'--output_node_names="Identity,Identity_1,Identity_2,Identity_3" {f_pb} {f}'
)
subprocess.run(cmd, shell=True)
json = open(f_json).read()
with open(f_json, "w") as j: # sort JSON Identity_* in ascending order
subst = re.sub(
r'{"outputs": {"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}, '
r'"Identity.?.?": {"name": "Identity.?.?"}}}',
r'{"outputs": {"Identity": {"name": "Identity"}, '
r'"Identity_1": {"name": "Identity_1"}, '
r'"Identity_2": {"name": "Identity_2"}, '
r'"Identity_3": {"name": "Identity_3"}}}',
json,
)
j.write(subst)
LOGGER.info(f"{prefix} export success, saved as {f} ({file_size(f):.1f} MB)")
return f
except Exception as e:
LOGGER.info(f"\n{prefix} export failure: {e}")
@torch.no_grad()
def run(
data=ROOT / "data/coco128.yaml", # 'dataset.yaml path'
weights=ROOT / "yolov5s.pt", # weights path
imgsz=(640, 640), # image (height, width)
batch_size=1, # batch size
device="cpu", # cuda device, i.e. 0 or 0,1,2,3 or cpu
include=("torchscript", "onnx"), # include formats
half=False, # FP16 half-precision export
inplace=False, # set YOLOv5 Detect() inplace=True
train=False, # model.train() mode
optimize=False, # TorchScript: optimize for mobile
int8=False, # CoreML/TF INT8 quantization
dynamic=False, # ONNX/TF: dynamic axes
simplify=False, # ONNX: simplify model
opset=12, # ONNX: opset version
verbose=False, # TensorRT: verbose log
workspace=4, # TensorRT: workspace size (GB)
nms=False, # TF: add NMS to model
agnostic_nms=False, # TF: add agnostic NMS to model
topk_per_class=100, # TF.js NMS: topk per class to keep
topk_all=100, # TF.js NMS: topk for all classes to keep
iou_thres=0.45, # TF.js NMS: IoU threshold
conf_thres=0.25, # TF.js NMS: confidence threshold
):
t = time.time()
include = [x.lower() for x in include]
tf_exports = list(
x in include for x in ("saved_model", "pb", "tflite", "edgetpu", "tfjs")
) # TensorFlow exports
file = Path(url2file(weights) if str(weights).startswith(("http:/", "https:/")) else weights)
# Checks
imgsz *= 2 if len(imgsz) == 1 else 1 # expand
opset = 12 if ("openvino" in include) else opset # OpenVINO requires opset <= 12
# Load PyTorch model
device = select_device(device)
assert not (
device.type == "cpu" and half
), "--half only compatible with GPU export, i.e. use --device 0"
model = attempt_load(weights, map_location=device, inplace=True, fuse=True) # load FP32 model
nc, names = model.nc, model.names # number of classes, class names
# Input
gs = int(max(model.stride)) # grid size (max stride)
imgsz = [check_img_size(x, gs) for x in imgsz] # verify img_size are gs-multiples
im = torch.zeros(batch_size, 3, *imgsz).to(device) # image size(1,3,320,192) BCHW iDetection
# Update model
if half:
im, model = im.half(), model.half() # to FP16
model.train() if train else model.eval() # training mode = no Detect() layer grid construction
for k, m in model.named_modules():
if isinstance(m, Conv): # assign export-friendly activations
if isinstance(m.act, nn.SiLU):
m.act = SiLU()
elif isinstance(m, Detect):
m.inplace = inplace
m.onnx_dynamic = dynamic
# m.forward = m.forward_export # assign forward (optional)
for _ in range(2):
y = model(im) # dry runs
LOGGER.info(f"\n{colorstr('PyTorch:')} starting from {file} ({file_size(file):.1f} MB)")
# Exports
if "torchscript" in include:
f = export_torchscript(model, im, file, optimize)
if "engine" in include: # TensorRT required before ONNX
f = export_engine(model, im, file, train, half, simplify, workspace, verbose)
if ("onnx" in include) or ("openvino" in include): # OpenVINO requires ONNX
f = export_onnx(model, im, file, opset, train, dynamic, simplify)
if "openvino" in include:
f = export_openvino(model, im, file)
if "coreml" in include:
_, f = export_coreml(model, im, file)
# TensorFlow Exports
if any(tf_exports):
pb, tflite, edgetpu, tfjs = tf_exports[1:]
if int8 or edgetpu: # TFLite --int8 bug https://github.com/ultralytics/yolov5/issues/5707
check_requirements(("flatbuffers==1.12",)) # required before `import tensorflow`
assert not (
tflite and tfjs
), "TFLite and TF.js models must be exported separately, please pass only one type."
model, f = export_saved_model(
model,
im,
file,
dynamic,
tf_nms=nms or agnostic_nms or tfjs,
agnostic_nms=agnostic_nms or tfjs,
topk_per_class=topk_per_class,
topk_all=topk_all,
conf_thres=conf_thres,
iou_thres=iou_thres,
) # keras model
if pb or tfjs: # pb prerequisite to tfjs
f = export_pb(model, im, file)
if tflite or edgetpu:
f = export_tflite(model, im, file, int8=int8 or edgetpu, data=data, ncalib=100)
if edgetpu:
f = export_edgetpu(model, im, file)
if tfjs:
f = export_tfjs(model, im, file)
# Finish
LOGGER.info(
f"\nExport complete ({time.time() - t:.2f}s)"
f"\nResults saved to {colorstr('bold', file.parent.resolve())}"
f"\nVisualize with https://netron.app"
f"\nDetect with `python detect.py --weights {f}`"
f" or `model = torch.hub.load('ultralytics/yolov5', 'custom', '{f}')"
f"\nValidate with `python val.py --weights {f}`"
)
def parse_opt():
parser = argparse.ArgumentParser()
parser.add_argument(
"--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path"
)
parser.add_argument(
"--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model.pt path(s)"
)
parser.add_argument(
"--imgsz",
"--img",
"--img-size",
nargs="+",
type=int,
default=[640, 640],
help="image (h, w)",
)
parser.add_argument("--batch-size", type=int, default=1, help="batch size")
parser.add_argument("--device", default="cpu", help="cuda device, i.e. 0 or 0,1,2,3 or cpu")
parser.add_argument("--half", action="store_true", help="FP16 half-precision export")
parser.add_argument("--inplace", action="store_true", help="set YOLOv5 Detect() inplace=True")
parser.add_argument("--train", action="store_true", help="model.train() mode")
parser.add_argument("--optimize", action="store_true", help="TorchScript: optimize for mobile")
parser.add_argument("--int8", action="store_true", help="CoreML/TF INT8 quantization")
parser.add_argument("--dynamic", action="store_true", help="ONNX/TF: dynamic axes")
parser.add_argument("--simplify", action="store_true", help="ONNX: simplify model")
parser.add_argument("--opset", type=int, default=12, help="ONNX: opset version")
parser.add_argument("--verbose", action="store_true", help="TensorRT: verbose log")
parser.add_argument("--workspace", type=int, default=4, help="TensorRT: workspace size (GB)")
parser.add_argument("--nms", action="store_true", help="TF: add NMS to model")
parser.add_argument(
"--agnostic-nms", action="store_true", help="TF: add agnostic NMS to model"
)
parser.add_argument(
"--topk-per-class", type=int, default=100, help="TF.js NMS: topk per class to keep"
)
parser.add_argument(
"--topk-all", type=int, default=100, help="TF.js NMS: topk for all classes to keep"
)
parser.add_argument("--iou-thres", type=float, default=0.45, help="TF.js NMS: IoU threshold")
parser.add_argument(
"--conf-thres", type=float, default=0.25, help="TF.js NMS: confidence threshold"
)
parser.add_argument(
"--include",
nargs="+",
default=["torchscript", "onnx"],
help="torchscript, onnx, openvino, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs",
)
opt = parser.parse_args()
print_args(FILE.stem, opt)
return opt
def main(opt):
for opt.weights in opt.weights if isinstance(opt.weights, list) else [opt.weights]:
run(**vars(opt))
if __name__ == "__main__":
opt = parse_opt()
main(opt)
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。