Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pb convert to tflite and quant uint8 #262

Open
fangxiaoying opened this issue Jul 21, 2021 · 0 comments
Open

Pb convert to tflite and quant uint8 #262

fangxiaoying opened this issue Jul 21, 2021 · 0 comments

Comments

@fangxiaoying
Copy link

我想在 NXP i.MX8MP(包含NPU) 上部署这个模型 为什么不用直接用saved model 转为tflite 模型 并进行int8量化。 我在用tf模型的pb文件转换tflite 过程中遇到错误。

%0 = "tfl.pad"(%arg0, %cst) : (tensor<?x240x320x3xf32>, tensor<4x2xi32>) -> tensor<?x242x322x3xf32>
%1 = "tfl.conv_2d"(%0, %cst_51, %cst_26) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<?x242x322x3xf32>, tensor<16x3x3x3xf32>, tensor<16xf32>) -> tensor<?x120x160x16xf32>
%2 = "tfl.pad"(%1, %cst) : (tensor<?x120x160x16xf32>, tensor<4x2xi32>) -> tensor<?x122x162x16xf32>
%3 = "tfl.depthwise_conv_2d"(%2, %cst_81, %cst_40) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x122x162x16xf32>, tensor<1x3x3x16xf32>, tensor<16xf32>) -> tensor<?x120x160x16xf32>
%4 = "tfl.conv_2d"(%3, %cst_52, %cst_27) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x120x160x16xf32>, tensor<32x1x1x16xf32>, tensor<32xf32>) -> tensor<?x120x160x32xf32>
%5 = "tfl.pad"(%4, %cst) : (tensor<?x120x160x32xf32>, tensor<4x2xi32>) -> tensor<?x122x162x32xf32>
%6 = "tfl.depthwise_conv_2d"(%5, %cst_82, %cst_33) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<?x122x162x32xf32>, tensor<1x3x3x32xf32>, tensor<32xf32>) -> tensor<?x60x80x32xf32>
%7 = "tfl.conv_2d"(%6, %cst_53, %cst_34) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x60x80x32xf32>, tensor<32x1x1x32xf32>, tensor<32xf32>) -> tensor<?x60x80x32xf32>
%8 = "tfl.pad"(%7, %cst) : (tensor<?x60x80x32xf32>, tensor<4x2xi32>) -> tensor<?x62x82x32xf32>
%9 = "tfl.depthwise_conv_2d"(%8, %cst_83, %cst_35) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x62x82x32xf32>, tensor<1x3x3x32xf32>, tensor<32xf32>) -> tensor<?x60x80x32xf32>
%10 = "tfl.conv_2d"(%9, %cst_54, %cst_36) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x60x80x32xf32>, tensor<32x1x1x32xf32>, tensor<32xf32>) -> tensor<?x60x80x32xf32>
%11 = "tfl.pad"(%10, %cst) : (tensor<?x60x80x32xf32>, tensor<4x2xi32>) -> tensor<?x62x82x32xf32>
%12 = "tfl.depthwise_conv_2d"(%11, %cst_84, %cst_37) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<?x62x82x32xf32>, tensor<1x3x3x32xf32>, tensor<32xf32>) -> tensor<?x30x40x32xf32>
%13 = "tfl.conv_2d"(%12, %cst_55, %cst_38) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x30x40x32xf32>, tensor<64x1x1x32xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%14 = "tfl.pad"(%13, %cst) : (tensor<?x30x40x64xf32>, tensor<4x2xi32>) -> tensor<?x32x42x64xf32>
%15 = "tfl.depthwise_conv_2d"(%14, %cst_85, %cst_39) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x32x42x64xf32>, tensor<1x3x3x64xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%16 = "tfl.conv_2d"(%15, %cst_56, %cst_41) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x30x40x64xf32>, tensor<64x1x1x64xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%17 = "tfl.pad"(%16, %cst) : (tensor<?x30x40x64xf32>, tensor<4x2xi32>) -> tensor<?x32x42x64xf32>
%18 = "tfl.depthwise_conv_2d"(%17, %cst_86, %cst_42) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x32x42x64xf32>, tensor<1x3x3x64xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%19 = "tfl.conv_2d"(%18, %cst_57, %cst_43) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x30x40x64xf32>, tensor<64x1x1x64xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%20 = "tfl.pad"(%19, %cst) : (tensor<?x30x40x64xf32>, tensor<4x2xi32>) -> tensor<?x32x42x64xf32>
%21 = "tfl.depthwise_conv_2d"(%20, %cst_87, %cst_44) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x32x42x64xf32>, tensor<1x3x3x64xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%22 = "tfl.conv_2d"(%21, %cst_58, %cst_45) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x30x40x64xf32>, tensor<64x1x1x64xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%23 = "tfl.pad"(%22, %cst) : (tensor<?x30x40x64xf32>, tensor<4x2xi32>) -> tensor<?x32x42x64xf32>
%24 = "tfl.depthwise_conv_2d"(%23, %cst_88, %cst_46) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<?x32x42x64xf32>, tensor<1x3x3x64xf32>, tensor<64xf32>) -> tensor<?x15x20x64xf32>
%25 = "tfl.conv_2d"(%24, %cst_59, %cst_47) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x15x20x64xf32>, tensor<128x1x1x64xf32>, tensor<128xf32>) -> tensor<?x15x20x128xf32>
%26 = "tfl.pad"(%25, %cst) : (tensor<?x15x20x128xf32>, tensor<4x2xi32>) -> tensor<?x17x22x128xf32>
%27 = "tfl.depthwise_conv_2d"(%26, %cst_89, %cst_48) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x17x22x128xf32>, tensor<1x3x3x128xf32>, tensor<128xf32>) -> tensor<?x15x20x128xf32>
%28 = "tfl.depthwise_conv_2d"(%23, %cst_60, %cst_25) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x32x42x64xf32>, tensor<1x3x3x64xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%29 = "tfl.conv_2d"(%28, %cst_61, %cst_90) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x30x40x64xf32>, tensor<6x1x1x64xf32>, tensor<6xf32>) -> tensor<?x30x40x6xf32>
%30 = "tfl.shape"(%29) : (tensor<?x30x40x6xf32>) -> tensor<4xi32>
%31 = "tfl.strided_slice"(%30, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<4xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%32 = "tfl.pack"(%31, %cst_14, %cst_1) {axis = 0 : i32, values_count = 3 : i32} : (tensor, tensor, tensor) -> tensor<3xi32>
%33 = "tfl.reshape"(%29, %32) : (tensor<?x30x40x6xf32>, tensor<3xi32>) -> tensor<?x?x2xf32>
%34 = "tfl.depthwise_conv_2d"(%23, %cst_62, %cst_21) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x32x42x64xf32>, tensor<1x3x3x64xf32>, tensor<64xf32>) -> tensor<?x30x40x64xf32>
%35 = "tfl.conv_2d"(%34, %cst_63, %cst_91) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x30x40x64xf32>, tensor<12x1x1x64xf32>, tensor<12xf32>) -> tensor<?x30x40x12xf32>
%36 = "tfl.shape"(%35) : (tensor<?x30x40x12xf32>) -> tensor<4xi32>
%37 = "tfl.strided_slice"(%36, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<4xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%38 = "tfl.pack"(%37, %cst_14, %cst_0) {axis = 0 : i32, values_count = 3 : i32} : (tensor, tensor, tensor) -> tensor<3xi32>
%39 = "tfl.reshape"(%35, %38) : (tensor<?x30x40x12xf32>, tensor<3xi32>) -> tensor<?x?x4xf32>
%40 = "tfl.conv_2d"(%27, %cst_64, %cst_49) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x15x20x128xf32>, tensor<128x1x1x128xf32>, tensor<128xf32>) -> tensor<?x15x20x128xf32>
%41 = "tfl.pad"(%40, %cst) : (tensor<?x15x20x128xf32>, tensor<4x2xi32>) -> tensor<?x17x22x128xf32>
%42 = "tfl.depthwise_conv_2d"(%41, %cst_92, %cst_50) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x17x22x128xf32>, tensor<1x3x3x128xf32>, tensor<128xf32>) -> tensor<?x15x20x128xf32>
%43 = "tfl.conv_2d"(%42, %cst_65, %cst_28) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x15x20x128xf32>, tensor<128x1x1x128xf32>, tensor<128xf32>) -> tensor<?x15x20x128xf32>
%44 = "tfl.pad"(%43, %cst) : (tensor<?x15x20x128xf32>, tensor<4x2xi32>) -> tensor<?x17x22x128xf32>
%45 = "tfl.depthwise_conv_2d"(%44, %cst_93, %cst_29) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<?x17x22x128xf32>, tensor<1x3x3x128xf32>, tensor<128xf32>) -> tensor<?x8x10x128xf32>
%46 = "tfl.conv_2d"(%45, %cst_66, %cst_30) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x8x10x128xf32>, tensor<256x1x1x128xf32>, tensor<256xf32>) -> tensor<?x8x10x256xf32>
%47 = "tfl.pad"(%46, %cst) : (tensor<?x8x10x256xf32>, tensor<4x2xi32>) -> tensor<?x10x12x256xf32>
%48 = "tfl.depthwise_conv_2d"(%47, %cst_94, %cst_31) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x10x12x256xf32>, tensor<1x3x3x256xf32>, tensor<256xf32>) -> tensor<?x8x10x256xf32>
%49 = "tfl.conv_2d"(%48, %cst_67, %cst_32) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x8x10x256xf32>, tensor<256x1x1x256xf32>, tensor<256xf32>) -> tensor<?x8x10x256xf32>
%50 = "tfl.pad"(%49, %cst) : (tensor<?x8x10x256xf32>, tensor<4x2xi32>) -> tensor<?x10x12x256xf32>
%51 = "tfl.depthwise_conv_2d"(%50, %cst_68, %cst_23) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x10x12x256xf32>, tensor<1x3x3x256xf32>, tensor<256xf32>) -> tensor<?x8x10x256xf32>
%52 = "tfl.conv_2d"(%51, %cst_69, %cst_95) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x8x10x256xf32>, tensor<4x1x1x256xf32>, tensor<4xf32>) -> tensor<?x8x10x4xf32>
%53 = "tfl.shape"(%52) : (tensor<?x8x10x4xf32>) -> tensor<4xi32>
%54 = "tfl.strided_slice"(%53, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<4xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%55 = "tfl.pack"(%54, %cst_14, %cst_1) {axis = 0 : i32, values_count = 3 : i32} : (tensor, tensor, tensor) -> tensor<3xi32>
%56 = "tfl.reshape"(%52, %55) : (tensor<?x8x10x4xf32>, tensor<3xi32>) -> tensor<?x?x2xf32>
%57 = "tfl.conv_2d"(%49, %cst_70, %cst_17) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x8x10x256xf32>, tensor<64x1x1x256xf32>, tensor<64xf32>) -> tensor<?x8x10x64xf32>
%58 = "tfl.pad"(%57, %cst) : (tensor<?x8x10x64xf32>, tensor<4x2xi32>) -> tensor<?x10x12x64xf32>
%59 = "tfl.depthwise_conv_2d"(%58, %cst_71, %cst_18) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 2 : i32, stride_w = 2 : i32} : (tensor<?x10x12x64xf32>, tensor<1x3x3x64xf32>, tensor<64xf32>) -> tensor<?x4x5x64xf32>
%60 = "tfl.conv_2d"(%59, %cst_72, %cst_22) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x4x5x64xf32>, tensor<256x1x1x64xf32>, tensor<256xf32>) -> tensor<?x4x5x256xf32>
%61 = "tfl.conv_2d"(%60, %cst_73, %cst_96) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x4x5x256xf32>, tensor<6x3x3x256xf32>, tensor<6xf32>) -> tensor<?x4x5x6xf32>
%62 = "tfl.shape"(%61) : (tensor<?x4x5x6xf32>) -> tensor<4xi32>
%63 = "tfl.strided_slice"(%62, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<4xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%64 = "tfl.pack"(%63, %cst_14, %cst_1) {axis = 0 : i32, values_count = 3 : i32} : (tensor, tensor, tensor) -> tensor<3xi32>
%65 = "tfl.reshape"(%61, %64) : (tensor<?x4x5x6xf32>, tensor<3xi32>) -> tensor<?x?x2xf32>
%66 = "tfl.conv_2d"(%60, %cst_74, %cst_97) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "SAME", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x4x5x256xf32>, tensor<12x3x3x256xf32>, tensor<12xf32>) -> tensor<?x4x5x12xf32>
%67 = "tfl.shape"(%66) : (tensor<?x4x5x12xf32>) -> tensor<4xi32>
%68 = "tfl.strided_slice"(%67, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<4xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%69 = "tfl.pack"(%68, %cst_14, %cst_0) {axis = 0 : i32, values_count = 3 : i32} : (tensor, tensor, tensor) -> tensor<3xi32>
%70 = "tfl.reshape"(%66, %69) : (tensor<?x4x5x12xf32>, tensor<3xi32>) -> tensor<?x?x4xf32>
%71 = "tfl.depthwise_conv_2d"(%50, %cst_75, %cst_19) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x10x12x256xf32>, tensor<1x3x3x256xf32>, tensor<256xf32>) -> tensor<?x8x10x256xf32>
%72 = "tfl.conv_2d"(%71, %cst_76, %cst_98) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x8x10x256xf32>, tensor<8x1x1x256xf32>, tensor<8xf32>) -> tensor<?x8x10x8xf32>
%73 = "tfl.shape"(%72) : (tensor<?x8x10x8xf32>) -> tensor<4xi32>
%74 = "tfl.strided_slice"(%73, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<4xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%75 = "tfl.pack"(%74, %cst_14, %cst_0) {axis = 0 : i32, values_count = 3 : i32} : (tensor, tensor, tensor) -> tensor<3xi32>
%76 = "tfl.reshape"(%72, %75) : (tensor<?x8x10x8xf32>, tensor<3xi32>) -> tensor<?x?x4xf32>
%77 = "tfl.depthwise_conv_2d"(%44, %cst_77, %cst_24) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x17x22x128xf32>, tensor<1x3x3x128xf32>, tensor<128xf32>) -> tensor<?x15x20x128xf32>
%78 = "tfl.conv_2d"(%77, %cst_78, %cst_99) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x15x20x128xf32>, tensor<4x1x1x128xf32>, tensor<4xf32>) -> tensor<?x15x20x4xf32>
%79 = "tfl.shape"(%78) : (tensor<?x15x20x4xf32>) -> tensor<4xi32>
%80 = "tfl.strided_slice"(%79, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<4xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%81 = "tfl.pack"(%80, %cst_14, %cst_1) {axis = 0 : i32, values_count = 3 : i32} : (tensor, tensor, tensor) -> tensor<3xi32>
%82 = "tfl.reshape"(%78, %81) : (tensor<?x15x20x4xf32>, tensor<3xi32>) -> tensor<?x?x2xf32>
%83 = "tfl.concatenation"(%33, %82, %56, %65) {axis = 1 : i32, fused_activation_function = "NONE"} : (tensor<?x?x2xf32>, tensor<?x?x2xf32>, tensor<?x?x2xf32>, tensor<?x?x2xf32>) -> tensor<?x?x2xf32>
%84 = "tfl.softmax"(%83) {beta = 1.000000e+00 : f32} : (tensor<?x?x2xf32>) -> tensor<?x?x2xf32>
%85 = "tf.StridedSlice"(%84, %cst_15, %cst_16, %cst_107) {_cloned = true, begin_mask = 0 : i64, device = "", ellipsis_mask = 1 : i64, end_mask = 0 : i64, new_axis_mask = 0 : i64, shrink_axis_mask = 2 : i64} : (tensor<?x?x2xf32>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<?x?xf32>
%86 = "tfl.greater"(%85, %cst_3) : (tensor<?x?xf32>, tensor) -> tensor<?x?xi1>
%87 = "tfl.reshape"(%86, %cst_13) : (tensor<?x?xi1>, tensor<1xi32>) -> tensor<?xi1>
%88 = "tfl.where"(%87) : (tensor<?xi1>) -> tensor<?x1xi64>
%89 = "tfl.squeeze"(%88) {squeeze_dims = [1]} : (tensor<?x1xi64>) -> tensor<?xi64>
%90 = "tfl.depthwise_conv_2d"(%44, %cst_79, %cst_20) {depth_multiplier = 1 : i32, dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "RELU", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x17x22x128xf32>, tensor<1x3x3x128xf32>, tensor<128xf32>) -> tensor<?x15x20x128xf32>
%91 = "tfl.conv_2d"(%90, %cst_80, %cst_100) {dilation_h_factor = 1 : i32, dilation_w_factor = 1 : i32, fused_activation_function = "NONE", padding = "VALID", stride_h = 1 : i32, stride_w = 1 : i32} : (tensor<?x15x20x128xf32>, tensor<8x1x1x128xf32>, tensor<8xf32>) -> tensor<?x15x20x8xf32>
%92 = "tfl.shape"(%91) : (tensor<?x15x20x8xf32>) -> tensor<4xi32>
%93 = "tfl.strided_slice"(%92, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<4xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%94 = "tfl.pack"(%93, %cst_14, %cst_0) {axis = 0 : i32, values_count = 3 : i32} : (tensor, tensor, tensor) -> tensor<3xi32>
%95 = "tfl.reshape"(%91, %94) : (tensor<?x15x20x8xf32>, tensor<3xi32>) -> tensor<?x?x4xf32>
%96 = "tfl.concatenation"(%39, %95, %76, %70) {axis = 1 : i32, fused_activation_function = "NONE"} : (tensor<?x?x4xf32>, tensor<?x?x4xf32>, tensor<?x?x4xf32>, tensor<?x?x4xf32>) -> tensor<?x?x4xf32>
%97 = "tfl.strided_slice"(%96, %cst_102, %cst_101, %cst_103) {begin_mask = 7 : i32, ellipsis_mask = 0 : i32, end_mask = 3 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 0 : i32} : (tensor<?x?x4xf32>, tensor<3xi32>, tensor<3xi32>, tensor<3xi32>) -> tensor<?x?x2xf32>
%98 = "tfl.mul"(%97, %cst_6) {fused_activation_function = "NONE"} : (tensor<?x?x2xf32>, tensor) -> tensor<?x?x2xf32>
%99 = "tfl.mul"(%98, %cst_8) {fused_activation_function = "NONE"} : (tensor<?x?x2xf32>, tensor<4420x2xf32>) -> tensor<?x4420x2xf32>
%100 = "tfl.add"(%99, %cst_2) {fused_activation_function = "NONE"} : (tensor<?x4420x2xf32>, tensor<4420x2xf32>) -> tensor<?x4420x2xf32>
%101 = "tfl.strided_slice"(%96, %cst_101, %cst_102, %cst_103) {begin_mask = 3 : i32, ellipsis_mask = 0 : i32, end_mask = 7 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 0 : i32} : (tensor<?x?x4xf32>, tensor<3xi32>, tensor<3xi32>, tensor<3xi32>) -> tensor<?x?x2xf32>
%102 = "tfl.mul"(%101, %cst_7) {fused_activation_function = "NONE"} : (tensor<?x?x2xf32>, tensor) -> tensor<?x?x2xf32>
%103 = "tfl.exp"(%102) : (tensor<?x?x2xf32>) -> tensor<?x?x2xf32>
%104 = "tfl.mul"(%103, %cst_8) {fused_activation_function = "NONE"} : (tensor<?x?x2xf32>, tensor<4420x2xf32>) -> tensor<?x4420x2xf32>
%105 = "tfl.div"(%104, %cst_12) {fused_activation_function = "NONE"} : (tensor<?x4420x2xf32>, tensor) -> tensor<?x4420x2xf32>
%106 = "tfl.sub"(%100, %105) {fused_activation_function = "NONE"} : (tensor<?x4420x2xf32>, tensor<?x4420x2xf32>) -> tensor<?x4420x2xf32>
%107 = "tfl.add"(%100, %105) {fused_activation_function = "NONE"} : (tensor<?x4420x2xf32>, tensor<?x4420x2xf32>) -> tensor<?x4420x2xf32>
%108 = "tfl.concatenation"(%106, %107) {axis = -1 : i32, fused_activation_function = "NONE"} : (tensor<?x4420x2xf32>, tensor<?x4420x2xf32>) -> tensor<?x4420x4xf32>
%109 = "tfl.minimum"(%108, %cst_5) : (tensor<?x4420x4xf32>, tensor) -> tensor<?x4420x4xf32>
%110 = "tfl.maximum"(%109, %cst_4) : (tensor<?x4420x4xf32>, tensor) -> tensor<?x4420x4xf32>
%111 = "tfl.concatenation"(%84, %110) {axis = 2 : i32, fused_activation_function = "NONE"} : (tensor<?x?x2xf32>, tensor<?x4420x4xf32>) -> tensor<?x4420x6xf32>
%112 = "tfl.shape"(%111) : (tensor<?x4420x6xf32>) -> tensor<3xi32>
%113 = "tfl.strided_slice"(%112, %cst_108, %cst_104, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 0 : i32} : (tensor<3xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<2xi32>
%114 = "tfl.reduce_prod"(%113, %cst_108) {keep_dims = false} : (tensor<2xi32>, tensor<1xi32>) -> tensor
%115 = "tfl.pack"(%114) {axis = 0 : i32, values_count = 1 : i32} : (tensor) -> tensor<1xi32>
%116 = "tfl.strided_slice"(%112, %cst_104, %cst_108, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 1 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 0 : i32} : (tensor<3xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor<1xi32>
%117 = "tfl.concatenation"(%115, %116) {axis = 0 : i32, fused_activation_function = "NONE"} : (tensor<1xi32>, tensor<1xi32>) -> tensor<2xi32>
%118 = "tfl.reshape"(%111, %117) : (tensor<?x4420x6xf32>, tensor<2xi32>) -> tensor<?x?xf32>
%119 = "tfl.gather"(%118, %89) {axis = 0 : i32} : (tensor<?x?xf32>, tensor<?xi64>) -> tensor<?x?xf32>
%120 = "tfl.strided_slice"(%119, %cst_105, %cst_106, %cst_107) {begin_mask = 1 : i32, ellipsis_mask = 0 : i32, end_mask = 3 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 0 : i32} : (tensor<?x?xf32>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<?x?xf32>
%121 = "tf.StridedSlice"(%119, %cst_15, %cst_16, %cst_107) {_cloned = true, begin_mask = 0 : i64, device = "", ellipsis_mask = 1 : i64, end_mask = 0 : i64, new_axis_mask = 0 : i64, shrink_axis_mask = 2 : i64} : (tensor<?x?xf32>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<?xf32>
%122 = "tf.NonMaxSuppressionV3"(%120, %121, %cst_10, %cst_9, %cst_11) {T = f32, T_threshold = f32, _cloned = true, device = ""} : (tensor<?x?xf32>, tensor<?xf32>, tensor, tensor, tensor) -> tensor<?xi32>
%123 = "tfl.gather"(%119, %122) {axis = 0 : i32} : (tensor<?x?xf32>, tensor<?xi32>) -> tensor<?x?xf32>
%124 = "tfl.shape"(%123) : (tensor<?x?xf32>) -> tensor<2xi32>
%125 = "tfl.strided_slice"(%124, %cst_108, %cst_109, %cst_109) {begin_mask = 0 : i32, ellipsis_mask = 0 : i32, end_mask = 0 : i32, new_axis_mask = 0 : i32, shrink_axis_mask = 1 : i32} : (tensor<2xi32>, tensor<1xi32>, tensor<1xi32>, tensor<1xi32>) -> tensor
%126 = "tfl.minimum"(%125, %cst_10) : (tensor, tensor) -> tensor
%127 = "tf.StridedSlice"(%123, %cst_15, %cst_16, %cst_107) {_cloned = true, begin_mask = 0 : i64, device = "", ellipsis_mask = 1 : i64, end_mask = 0 : i64, new_axis_mask = 0 : i64, shrink_axis_mask = 2 : i64} : (tensor<?x?xf32>, tensor<2xi32>, tensor<2xi32>, tensor<2xi32>) -> tensor<?xf32>
%values, %indices = "tfl.topk_v2"(%127, %126) : (tensor<?xf32>, tensor) -> (tensor<?xf32>, tensor<?xi32>)
%128 = "tfl.gather"(%123, %indices) {axis = 0 : i32} : (tensor<?x?xf32>, tensor<?xi32>) -> tensor<?x?xf32>
"std.return"(%128) : (tensor<?x?xf32>) -> ()
}) {sym_name = "main", tf.entry_function = {control_outputs = "", inputs = "input_1", outputs = "Identity"}, type = (tensor<?x240x320x3xf32>) -> tensor<?x?xf32>} : () -> ()

但不清楚,这是什么错误。导致没有转换成功

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant