zhengrongzhang wangfangyuan commited on
Commit
5424e99
1 Parent(s): d857ef1

Upload code&model for NHWC format (#2)

Browse files

- Upload 7 files (33c13362c44a2f914057b20869d026d51a400a04)


Co-authored-by: fangyuan wang <wangfangyuan@users.noreply.huggingface.co>

Files changed (3) hide show
  1. onnx_eval.py +7 -4
  2. onnx_inference.py +4 -2
  3. yolov5s_qat.onnx +2 -2
onnx_eval.py CHANGED
@@ -3,9 +3,10 @@ import json
3
  import os
4
  import sys
5
  from pathlib import Path
 
6
  import onnxruntime
7
  import numpy as np
8
- import torch
9
  from tqdm import tqdm
10
  from pycocotools.coco import COCO
11
  from pycocotools.cocoeval import COCOeval
@@ -145,8 +146,10 @@ def run(data,
145
  targets = targets.to(device)
146
  nb, _, height, width = img.shape # batch size, channels, height, width
147
 
148
- outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: img.cpu().numpy()})
149
- outputs = [torch.tensor(item).to(device) for item in outputs]
 
 
150
  outputs = post_process(outputs)
151
  out, train_out = outputs[0], outputs[1]
152
 
@@ -267,4 +270,4 @@ def main(opt):
267
 
268
  if __name__ == "__main__":
269
  opt = parse_opt()
270
- main(opt)
 
3
  import os
4
  import sys
5
  from pathlib import Path
6
+ import torch
7
  import onnxruntime
8
  import numpy as np
9
+
10
  from tqdm import tqdm
11
  from pycocotools.coco import COCO
12
  from pycocotools.cocoeval import COCOeval
 
146
  targets = targets.to(device)
147
  nb, _, height, width = img.shape # batch size, channels, height, width
148
 
149
+ # outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: img.cpu().numpy()})
150
+ outputs = onnx_model.run(None, {onnx_model.get_inputs()[0].name: img.permute(0, 2, 3, 1).cpu().numpy()})
151
+ # outputs = [torch.tensor(item).to(device) for item in outputs]
152
+ outputs = [torch.tensor(item).permute(0, 3, 1, 2).to(device) for item in outputs]
153
  outputs = post_process(outputs)
154
  out, train_out = outputs[0], outputs[1]
155
 
 
270
 
271
  if __name__ == "__main__":
272
  opt = parse_opt()
273
+ main(opt)
onnx_inference.py CHANGED
@@ -1,7 +1,8 @@
1
- import onnxruntime
2
  import numpy as np
3
  import cv2
4
  import torch
 
5
  import sys
6
  import pathlib
7
  CURRENT_DIR = pathlib.Path(__file__).parent
@@ -113,8 +114,9 @@ if __name__ == '__main__':
113
 
114
  img0 = cv2.imread(path)
115
  img = pre_process(img0)
116
- onnx_input = {onnx_model.get_inputs()[0].name: img}
117
  onnx_output = onnx_model.run(None, onnx_input)
 
118
  onnx_output = post_process(onnx_output)
119
  pred = non_max_suppression(
120
  onnx_output[0], conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det
 
1
+
2
  import numpy as np
3
  import cv2
4
  import torch
5
+ import onnxruntime
6
  import sys
7
  import pathlib
8
  CURRENT_DIR = pathlib.Path(__file__).parent
 
114
 
115
  img0 = cv2.imread(path)
116
  img = pre_process(img0)
117
+ onnx_input = {onnx_model.get_inputs()[0].name: img.transpose(0, 2, 3, 1)}
118
  onnx_output = onnx_model.run(None, onnx_input)
119
+ onnx_output = [torch.tensor(item).permute(0, 3, 1, 2) for item in onnx_output]
120
  onnx_output = post_process(onnx_output)
121
  pred = non_max_suppression(
122
  onnx_output[0], conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det
yolov5s_qat.onnx CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5ba00d5f170eab6130610bb543c1f4b1e8354f4944c127e61c28beb99beddf26
3
- size 29141657
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f05e2860614a4d10757405f5e4ad2849d380631e16915f91aa0f69597d10575
3
+ size 29142007