Shell
复制代码
torch.Size([20, 10, 10])
torch.Size([2, 20, 32])
torch.Size([20, 10])
torch.Size([2, 20, 32])
/home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/onnx/utils.py:2041: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input input
"No names were found for specified dynamic axes of provided input."
/home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/onnx/utils.py:2041: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input hidden_prev
"No names were found for specified dynamic axes of provided input."
/home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/onnx/utils.py:2041: UserWarning: No names were found for specified dynamic axes of provided input.Automatically generated names will be applied to each dynamic axes of input output
"No names were found for specified dynamic axes of provided input."
/home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/onnx/symbolic_opset9.py:4322: UserWarning: Exporting a model to ONNX with a batch_size other than 1, with a variable length with RNN_TANH can cause an error when running the ONNX model with a different batch size. Make sure to save the model with a batch size of 1, or define the initial states (h0/c0) as inputs of the model.
+ "or define the initial states (h0/c0) as inputs of the model. "
/home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/onnx/_internal/jit_utils.py:258: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.)
_C._jit_pass_onnx_node_shape_type_inference(node, params_dict, opset_version)
/home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/onnx/utils.py:688: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.)
graph, params_dict, GLOBALS.export_onnx_opset_version
/home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/onnx/utils.py:1179: UserWarning: The shape inference of prim::Constant type is missing, so it may result in wrong shape inference for the exported graph. Please consider adding it in symbolic function. (Triggered internally at ../torch/csrc/jit/passes/onnx/shape_type_inference.cpp:1884.)
graph, params_dict, GLOBALS.export_onnx_opset_version
Exported graph: graph(%input : Float(*, 10, 10, strides=[100, 10, 1], requires_grad=0, device=cpu),
%hidden_prev.1 : Float(2, *, 32, strides=[640, 32, 1], requires_grad=1, device=cpu),
%fc.weight : Float(10, 320, strides=[320, 1], requires_grad=1, device=cpu),
%fc.bias : Float(10, strides=[1], requires_grad=1, device=cpu),
%onnx::RNN_58 : Float(1, 32, 10, strides=[320, 10, 1], requires_grad=0, device=cpu),
%onnx::RNN_59 : Float(1, 32, 32, strides=[1024, 32, 1], requires_grad=0, device=cpu),
%onnx::RNN_60 : Float(1, 64, strides=[64, 1], requires_grad=0, device=cpu),
%onnx::RNN_62 : Float(1, 32, 32, strides=[1024, 32, 1], requires_grad=0, device=cpu),
%onnx::RNN_63 : Float(1, 32, 32, strides=[1024, 32, 1], requires_grad=0, device=cpu),
%onnx::RNN_64 : Float(1, 64, strides=[64, 1], requires_grad=0, device=cpu)):
%/rnn/Transpose_output_0 : Float(10, *, 10, device=cpu) = onnx::Transpose[perm=[1, 0, 2], onnx_name="/rnn/Transpose"](%input), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%onnx::RNN_13 : Tensor? = prim::Constant(), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Constant_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}, onnx_name="/rnn/Constant"](), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Constant_1_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}, onnx_name="/rnn/Constant_1"](), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Constant_2_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={1}, onnx_name="/rnn/Constant_2"](), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Slice_output_0 : Float(1, *, 32, device=cpu) = onnx::Slice[onnx_name="/rnn/Slice"](%hidden_prev.1, %/rnn/Constant_1_output_0, %/rnn/Constant_2_output_0, %/rnn/Constant_output_0), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/RNN_output_0 : Float(10, 1, *, 32, device=cpu), %/rnn/RNN_output_1 : Float(1, *, 32, device=cpu) = onnx::RNN[activations=["Tanh"], hidden_size=32, onnx_name="/rnn/RNN"](%/rnn/Transpose_output_0, %onnx::RNN_58, %onnx::RNN_59, %onnx::RNN_60, %onnx::RNN_13, %/rnn/Slice_output_0), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Constant_3_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={1}, onnx_name="/rnn/Constant_3"](), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Squeeze_output_0 : Float(10, *, 32, device=cpu) = onnx::Squeeze[onnx_name="/rnn/Squeeze"](%/rnn/RNN_output_0, %/rnn/Constant_3_output_0), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Constant_4_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}, onnx_name="/rnn/Constant_4"](), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Constant_5_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={1}, onnx_name="/rnn/Constant_5"](), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Constant_6_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={2}, onnx_name="/rnn/Constant_6"](), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Slice_1_output_0 : Float(1, *, 32, device=cpu) = onnx::Slice[onnx_name="/rnn/Slice_1"](%hidden_prev.1, %/rnn/Constant_5_output_0, %/rnn/Constant_6_output_0, %/rnn/Constant_4_output_0), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/RNN_1_output_0 : Float(10, 1, *, 32, device=cpu), %/rnn/RNN_1_output_1 : Float(1, *, 32, device=cpu) = onnx::RNN[activations=["Tanh"], hidden_size=32, onnx_name="/rnn/RNN_1"](%/rnn/Squeeze_output_0, %onnx::RNN_62, %onnx::RNN_63, %onnx::RNN_64, %onnx::RNN_13, %/rnn/Slice_1_output_0), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Constant_7_output_0 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={1}, onnx_name="/rnn/Constant_7"](), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Squeeze_1_output_0 : Float(10, *, 32, device=cpu) = onnx::Squeeze[onnx_name="/rnn/Squeeze_1"](%/rnn/RNN_1_output_0, %/rnn/Constant_7_output_0), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/rnn/Transpose_1_output_0 : Float(*, 10, 32, strides=[320, 32, 1], requires_grad=1, device=cpu) = onnx::Transpose[perm=[1, 0, 2], onnx_name="/rnn/Transpose_1"](%/rnn/Squeeze_1_output_0), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%hidden_prev : Float(2, *, 32, strides=[640, 32, 1], requires_grad=1, device=cpu) = onnx::Concat[axis=0, onnx_name="/rnn/Concat"](%/rnn/RNN_output_1, %/rnn/RNN_1_output_1), scope: __main__.RNN::/torch.nn.modules.rnn.RNN::rnn # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/rnn.py:478:0
%/Shape_output_0 : Long(3, strides=[1], device=cpu) = onnx::Shape[onnx_name="/Shape"](%/rnn/Transpose_1_output_0), scope: __main__.RNN:: # /zengli/20230320/ao/test/test_onnx_rnn.py:25:0
%/Constant_output_0 : Long(device=cpu) = onnx::Constant[value={0}, onnx_name="/Constant"](), scope: __main__.RNN:: # /zengli/20230320/ao/test/test_onnx_rnn.py:25:0
%/Gather_output_0 : Long(device=cpu) = onnx::Gather[axis=0, onnx_name="/Gather"](%/Shape_output_0, %/Constant_output_0), scope: __main__.RNN:: # /zengli/20230320/ao/test/test_onnx_rnn.py:25:0
%onnx::Unsqueeze_50 : Long(1, strides=[1], device=cpu) = onnx::Constant[value={0}]()
%/Unsqueeze_output_0 : Long(1, strides=[1], device=cpu) = onnx::Unsqueeze[onnx_name="/Unsqueeze"](%/Gather_output_0, %onnx::Unsqueeze_50), scope: __main__.RNN::
%/Constant_1_output_0 : Long(1, strides=[1], requires_grad=0, device=cpu) = onnx::Constant[value={-1}, onnx_name="/Constant_1"](), scope: __main__.RNN::
%/Concat_output_0 : Long(2, strides=[1], device=cpu) = onnx::Concat[axis=0, onnx_name="/Concat"](%/Unsqueeze_output_0, %/Constant_1_output_0), scope: __main__.RNN:: # /zengli/20230320/ao/test/test_onnx_rnn.py:25:0
%/Reshape_output_0 : Float(*, *, strides=[320, 1], requires_grad=1, device=cpu) = onnx::Reshape[allowzero=0, onnx_name="/Reshape"](%/rnn/Transpose_1_output_0, %/Concat_output_0), scope: __main__.RNN:: # /zengli/20230320/ao/test/test_onnx_rnn.py:25:0
%output : Float(*, 10, strides=[10, 1], requires_grad=1, device=cpu) = onnx::Gemm[alpha=1., beta=1., transB=1, onnx_name="/fc/Gemm"](%/Reshape_output_0, %fc.weight, %fc.bias), scope: __main__.RNN::/torch.nn.modules.linear.Linear::fc # /home/ubuntu/anaconda3/envs/py37/lib/python3.7/site-packages/torch/nn/modules/linear.py:114:0
return (%output, %hidden_prev)
load model done.
graph torch_jit (
%input[FLOAT, input_dynamic_axes_1x10x10]
%hidden_prev.1[FLOAT, 2xhidden_prev.1_dim_1x32]
) initializers (
%fc.weight[FLOAT, 10x320]
%fc.bias[FLOAT, 10]
%onnx::RNN_58[FLOAT, 1x32x10]
%onnx::RNN_59[FLOAT, 1x32x32]
%onnx::RNN_60[FLOAT, 1x64]
%onnx::RNN_62[FLOAT, 1x32x32]
%onnx::RNN_63[FLOAT, 1x32x32]
%onnx::RNN_64[FLOAT, 1x64]
) {
%/rnn/Transpose_output_0 = Transpose[perm = [1, 0, 2]](%input)
%/rnn/Constant_output_0 = Constant[value = <Tensor>]()
%/rnn/Constant_1_output_0 = Constant[value = <Tensor>]()
%/rnn/Constant_2_output_0 = Constant[value = <Tensor>]()
%/rnn/Slice_output_0 = Slice(%hidden_prev.1, %/rnn/Constant_1_output_0, %/rnn/Constant_2_output_0, %/rnn/Constant_output_0)
%/rnn/RNN_output_0, %/rnn/RNN_output_1 = RNN[activations = ['Tanh'], hidden_size = 32](%/rnn/Transpose_output_0, %onnx::RNN_58, %onnx::RNN_59, %onnx::RNN_60, %, %/rnn/Slice_output_0)
%/rnn/Constant_3_output_0 = Constant[value = <Tensor>]()
%/rnn/Squeeze_output_0 = Squeeze(%/rnn/RNN_output_0, %/rnn/Constant_3_output_0)
%/rnn/Constant_4_output_0 = Constant[value = <Tensor>]()
%/rnn/Constant_5_output_0 = Constant[value = <Tensor>]()
%/rnn/Constant_6_output_0 = Constant[value = <Tensor>]()
%/rnn/Slice_1_output_0 = Slice(%hidden_prev.1, %/rnn/Constant_5_output_0, %/rnn/Constant_6_output_0, %/rnn/Constant_4_output_0)
%/rnn/RNN_1_output_0, %/rnn/RNN_1_output_1 = RNN[activations = ['Tanh'], hidden_size = 32](%/rnn/Squeeze_output_0, %onnx::RNN_62, %onnx::RNN_63, %onnx::RNN_64, %, %/rnn/Slice_1_output_0)
%/rnn/Constant_7_output_0 = Constant[value = <Tensor>]()
%/rnn/Squeeze_1_output_0 = Squeeze(%/rnn/RNN_1_output_0, %/rnn/Constant_7_output_0)
%/rnn/Transpose_1_output_0 = Transpose[perm = [1, 0, 2]](%/rnn/Squeeze_1_output_0)
%hidden_prev = Concat[axis = 0](%/rnn/RNN_output_1, %/rnn/RNN_1_output_1)
%/Shape_output_0 = Shape(%/rnn/Transpose_1_output_0)
%/Constant_output_0 = Constant[value = <Scalar Tensor []>]()
%/Gather_output_0 = Gather[axis = 0](%/Shape_output_0, %/Constant_output_0)
%onnx::Unsqueeze_50 = Constant[value = <Tensor>]()
%/Unsqueeze_output_0 = Unsqueeze(%/Gather_output_0, %onnx::Unsqueeze_50)
%/Constant_1_output_0 = Constant[value = <Tensor>]()
%/Concat_output_0 = Concat[axis = 0](%/Unsqueeze_output_0, %/Constant_1_output_0)
%/Reshape_output_0 = Reshape[allowzero = 0](%/rnn/Transpose_1_output_0, %/Concat_output_0)
%output = Gemm[alpha = 1, beta = 1, transB = 1](%/Reshape_output_0, %fc.weight, %fc.bias)
return %output, %hidden_prev
}
check model done.