zhengrongzhang XiaodongWang commited on
Commit
2f72c46
1 Parent(s): c36ee24

Rename onnx_inference.py to infer_onnx.py (#4)

Browse files

- Rename onnx_inference.py to infer_onnx.py (c7d7a6f43f39e8f1b95ded6600786e774de9c4a1)


Co-authored-by: Xiaodong Wang <XiaodongWang@users.noreply.huggingface.co>

onnx_inference.py → infer_onnx.py RENAMED
@@ -53,9 +53,9 @@ def make_parser():
53
  parser = argparse.ArgumentParser("onnxruntime inference sample")
54
  parser.add_argument(
55
  "-m",
56
- "--model",
57
  type=str,
58
- default="./yolov5s_qat.onnx",
59
  help="input your onnx model.",
60
  )
61
  parser.add_argument(
@@ -99,13 +99,13 @@ names = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', '
99
 
100
  if __name__ == '__main__':
101
  args = make_parser().parse_args()
102
- onnx_path = args.model
103
  if args.ipu:
104
  providers = ["VitisAIExecutionProvider"]
105
  provider_options = [{"config_file": args.provider_config}]
106
- onnx_model = onnxruntime.InferenceSession(onnx_path, providers=providers, provider_options=provider_options)
107
  else:
108
- onnx_model = onnxruntime.InferenceSession(onnx_path)
109
  grid = np.load("./grid.npy", allow_pickle=True)
110
  anchor_grid = np.load("./anchor_grid.npy", allow_pickle=True)
111
  path = args.image_path
@@ -114,8 +114,8 @@ if __name__ == '__main__':
114
 
115
  img0 = cv2.imread(path)
116
  img = pre_process(img0)
117
- onnx_input = {onnx_model.get_inputs()[0].name: img.transpose(0, 2, 3, 1)}
118
- onnx_output = onnx_model.run(None, onnx_input)
119
  onnx_output = [torch.tensor(item).permute(0, 3, 1, 2) for item in onnx_output]
120
  onnx_output = post_process(onnx_output)
121
  pred = non_max_suppression(
@@ -137,3 +137,4 @@ if __name__ == '__main__':
137
  # Stream results
138
  im0 = annotator.result()
139
  cv2.imwrite(new_path, im0)
 
 
53
  parser = argparse.ArgumentParser("onnxruntime inference sample")
54
  parser.add_argument(
55
  "-m",
56
+ "--onnx_model",
57
  type=str,
58
+ default="./yolov5s.onnx",
59
  help="input your onnx model.",
60
  )
61
  parser.add_argument(
 
99
 
100
  if __name__ == '__main__':
101
  args = make_parser().parse_args()
102
+ onnx_path = args.onnx_model
103
  if args.ipu:
104
  providers = ["VitisAIExecutionProvider"]
105
  provider_options = [{"config_file": args.provider_config}]
106
+ onnx_weight = onnxruntime.InferenceSession(onnx_path, providers=providers, provider_options=provider_options)
107
  else:
108
+ onnx_weight = onnxruntime.InferenceSession(onnx_path)
109
  grid = np.load("./grid.npy", allow_pickle=True)
110
  anchor_grid = np.load("./anchor_grid.npy", allow_pickle=True)
111
  path = args.image_path
 
114
 
115
  img0 = cv2.imread(path)
116
  img = pre_process(img0)
117
+ onnx_input = {onnx_weight.get_inputs()[0].name: img.transpose(0, 2, 3, 1)}
118
+ onnx_output = onnx_weight.run(None, onnx_input)
119
  onnx_output = [torch.tensor(item).permute(0, 3, 1, 2) for item in onnx_output]
120
  onnx_output = post_process(onnx_output)
121
  pred = non_max_suppression(
 
137
  # Stream results
138
  im0 = annotator.result()
139
  cv2.imwrite(new_path, im0)
140
+