Closed robinvanemden closed 4 years ago
Here is generated resnet model.
#include "operators/Conv.h"
#include "operators/Relu.h"
#include "operators/Add.h"
#include "operators/GlobalAveragePool.h"
#include "operators/Reshape.h"
#include "operators/Gemm.h"
using namespace dnnc;
void usage(char** args) {
std::cout << "\nUsage: " << args[0] <<
" <datafile for input \"input\">" <<
"\n\n";
std::cout << "This model has " << 1 << " input(s):\n";
std::cout << "\t 1. \"input\" (shape 1, 3, 32, 32):\n";
std::cout << "Output(s) will be written in file(s):\n";
std::cout << "\t 1. \"output.out\" (shape 1, 10):\n";
}
int main(int argc, char** argv) {
#define BUNDLE_DIR std::string(argv[0]).substr(0,\
std::string(argv[0]).find_last_of("/")) + "/"
if ( argc < 2 || std::string(argv[1]).substr(0,2) == "-h" ) {
usage(argv);
return 1;
}
tensor<float> dnnc_input({1, 3, 32, 32});
dnnc_input.read(argv[1]);
tensor<float> dnnc_fc_dot_weight({10, 64});
dnnc_fc_dot_weight.read(BUNDLE_DIR + "fc.weight");
tensor<float> dnnc_fc_dot_bias({10});
dnnc_fc_dot_bias.read(BUNDLE_DIR + "fc.bias");
tensor<float> dnnc_214({16, 3, 3, 3});
dnnc_214.read(BUNDLE_DIR + "214");
tensor<float> dnnc_216({16});
dnnc_216.read(BUNDLE_DIR + "216");
tensor<float> dnnc_218({16, 16, 3, 3});
dnnc_218.read(BUNDLE_DIR + "218");
tensor<float> dnnc_220({16});
dnnc_220.read(BUNDLE_DIR + "220");
tensor<float> dnnc_222({16, 16, 3, 3});
dnnc_222.read(BUNDLE_DIR + "222");
tensor<float> dnnc_224({16});
dnnc_224.read(BUNDLE_DIR + "224");
tensor<float> dnnc_226({16, 16, 3, 3});
dnnc_226.read(BUNDLE_DIR + "226");
tensor<float> dnnc_228({16});
dnnc_228.read(BUNDLE_DIR + "228");
tensor<float> dnnc_230({16, 16, 3, 3});
dnnc_230.read(BUNDLE_DIR + "230");
tensor<float> dnnc_232({16});
dnnc_232.read(BUNDLE_DIR + "232");
tensor<float> dnnc_234({16, 16, 3, 3});
dnnc_234.read(BUNDLE_DIR + "234");
tensor<float> dnnc_236({16});
dnnc_236.read(BUNDLE_DIR + "236");
tensor<float> dnnc_238({16, 16, 3, 3});
dnnc_238.read(BUNDLE_DIR + "238");
tensor<float> dnnc_240({16});
dnnc_240.read(BUNDLE_DIR + "240");
tensor<float> dnnc_242({32, 16, 3, 3});
dnnc_242.read(BUNDLE_DIR + "242");
tensor<float> dnnc_244({32});
dnnc_244.read(BUNDLE_DIR + "244");
tensor<float> dnnc_246({32, 32, 3, 3});
dnnc_246.read(BUNDLE_DIR + "246");
tensor<float> dnnc_248({32});
dnnc_248.read(BUNDLE_DIR + "248");
tensor<float> dnnc_250({32, 16, 1, 1});
dnnc_250.read(BUNDLE_DIR + "250");
tensor<float> dnnc_252({32});
dnnc_252.read(BUNDLE_DIR + "252");
tensor<float> dnnc_254({32, 32, 3, 3});
dnnc_254.read(BUNDLE_DIR + "254");
tensor<float> dnnc_256({32});
dnnc_256.read(BUNDLE_DIR + "256");
tensor<float> dnnc_258({32, 32, 3, 3});
dnnc_258.read(BUNDLE_DIR + "258");
tensor<float> dnnc_260({32});
dnnc_260.read(BUNDLE_DIR + "260");
tensor<float> dnnc_262({32, 32, 3, 3});
dnnc_262.read(BUNDLE_DIR + "262");
tensor<float> dnnc_264({32});
dnnc_264.read(BUNDLE_DIR + "264");
tensor<float> dnnc_266({32, 32, 3, 3});
dnnc_266.read(BUNDLE_DIR + "266");
tensor<float> dnnc_268({32});
dnnc_268.read(BUNDLE_DIR + "268");
tensor<float> dnnc_270({64, 32, 3, 3});
dnnc_270.read(BUNDLE_DIR + "270");
tensor<float> dnnc_272({64});
dnnc_272.read(BUNDLE_DIR + "272");
tensor<float> dnnc_274({64, 64, 3, 3});
dnnc_274.read(BUNDLE_DIR + "274");
tensor<float> dnnc_276({64});
dnnc_276.read(BUNDLE_DIR + "276");
tensor<float> dnnc_278({64, 32, 1, 1});
dnnc_278.read(BUNDLE_DIR + "278");
tensor<float> dnnc_280({64});
dnnc_280.read(BUNDLE_DIR + "280");
tensor<float> dnnc_282({64, 64, 3, 3});
dnnc_282.read(BUNDLE_DIR + "282");
tensor<float> dnnc_284({64});
dnnc_284.read(BUNDLE_DIR + "284");
tensor<float> dnnc_286({64, 64, 3, 3});
dnnc_286.read(BUNDLE_DIR + "286");
tensor<float> dnnc_288({64});
dnnc_288.read(BUNDLE_DIR + "288");
tensor<float> dnnc_290({64, 64, 3, 3});
dnnc_290.read(BUNDLE_DIR + "290");
tensor<float> dnnc_292({64});
dnnc_292.read(BUNDLE_DIR + "292");
tensor<float> dnnc_294({64, 64, 3, 3});
dnnc_294.read(BUNDLE_DIR + "294");
tensor<float> dnnc_296({64});
dnnc_296.read(BUNDLE_DIR + "296");
tensor<int64_t> dnnc_206({2});
dnnc_206.read(BUNDLE_DIR + "206");
Conv<float, float, float> Conv_0("Conv_0");
std::vector<int32_t> Conv_0_dilations = {1,1} ;
Conv_0.setAttribute ( attr_dilations, Conv_0_dilations );
int32_t Conv_0_group = 1 ;
Conv_0.setAttribute ( attr_group, Conv_0_group );
std::vector<int32_t> Conv_0_kernel_shape = {3,3} ;
Conv_0.setAttribute ( attr_kernel_shape, Conv_0_kernel_shape );
std::vector<int32_t> Conv_0_pads = {1,1,1,1} ;
Conv_0.setAttribute ( attr_pads, Conv_0_pads );
std::vector<int32_t> Conv_0_strides = {1,1} ;
Conv_0.setAttribute ( attr_strides, Conv_0_strides );
tensor<float> dnnc_Conv_0_129 = Conv_0.compute ( dnnc_input, dnnc_214, dnnc_216);
Relu<float, float> Relu_2("Relu_2");
tensor<float> dnnc_Relu_2_131 = Relu_2.compute ( dnnc_Conv_0_129);
Conv<float, float, float> Conv_3("Conv_3");
std::vector<int32_t> Conv_3_dilations = {1,1} ;
Conv_3.setAttribute ( attr_dilations, Conv_3_dilations );
int32_t Conv_3_group = 1 ;
Conv_3.setAttribute ( attr_group, Conv_3_group );
std::vector<int32_t> Conv_3_kernel_shape = {3,3} ;
Conv_3.setAttribute ( attr_kernel_shape, Conv_3_kernel_shape );
std::vector<int32_t> Conv_3_pads = {1,1,1,1} ;
Conv_3.setAttribute ( attr_pads, Conv_3_pads );
std::vector<int32_t> Conv_3_strides = {1,1} ;
Conv_3.setAttribute ( attr_strides, Conv_3_strides );
tensor<float> dnnc_Conv_3_132 = Conv_3.compute ( dnnc_Relu_2_131, dnnc_218, dnnc_220);
Relu<float, float> Relu_5("Relu_5");
tensor<float> dnnc_Relu_5_134 = Relu_5.compute ( dnnc_Conv_3_132);
Conv<float, float, float> Conv_6("Conv_6");
std::vector<int32_t> Conv_6_dilations = {1,1} ;
Conv_6.setAttribute ( attr_dilations, Conv_6_dilations );
int32_t Conv_6_group = 1 ;
Conv_6.setAttribute ( attr_group, Conv_6_group );
std::vector<int32_t> Conv_6_kernel_shape = {3,3} ;
Conv_6.setAttribute ( attr_kernel_shape, Conv_6_kernel_shape );
std::vector<int32_t> Conv_6_pads = {1,1,1,1} ;
Conv_6.setAttribute ( attr_pads, Conv_6_pads );
std::vector<int32_t> Conv_6_strides = {1,1} ;
Conv_6.setAttribute ( attr_strides, Conv_6_strides );
tensor<float> dnnc_Conv_6_135 = Conv_6.compute ( dnnc_Relu_5_134, dnnc_222, dnnc_224);
Add<float, float, float> Add_8("Add_8");
tensor<float> dnnc_Add_8_137 = Add_8.compute ( dnnc_Conv_6_135, dnnc_Relu_2_131);
Relu<float, float> Relu_9("Relu_9");
tensor<float> dnnc_Relu_9_138 = Relu_9.compute ( dnnc_Add_8_137);
Conv<float, float, float> Conv_10("Conv_10");
std::vector<int32_t> Conv_10_dilations = {1,1} ;
Conv_10.setAttribute ( attr_dilations, Conv_10_dilations );
int32_t Conv_10_group = 1 ;
Conv_10.setAttribute ( attr_group, Conv_10_group );
std::vector<int32_t> Conv_10_kernel_shape = {3,3} ;
Conv_10.setAttribute ( attr_kernel_shape, Conv_10_kernel_shape );
std::vector<int32_t> Conv_10_pads = {1,1,1,1} ;
Conv_10.setAttribute ( attr_pads, Conv_10_pads );
std::vector<int32_t> Conv_10_strides = {1,1} ;
Conv_10.setAttribute ( attr_strides, Conv_10_strides );
tensor<float> dnnc_Conv_10_139 = Conv_10.compute ( dnnc_Relu_9_138, dnnc_226, dnnc_228);
Relu<float, float> Relu_12("Relu_12");
tensor<float> dnnc_Relu_12_141 = Relu_12.compute ( dnnc_Conv_10_139);
Conv<float, float, float> Conv_13("Conv_13");
std::vector<int32_t> Conv_13_dilations = {1,1} ;
Conv_13.setAttribute ( attr_dilations, Conv_13_dilations );
int32_t Conv_13_group = 1 ;
Conv_13.setAttribute ( attr_group, Conv_13_group );
std::vector<int32_t> Conv_13_kernel_shape = {3,3} ;
Conv_13.setAttribute ( attr_kernel_shape, Conv_13_kernel_shape );
std::vector<int32_t> Conv_13_pads = {1,1,1,1} ;
Conv_13.setAttribute ( attr_pads, Conv_13_pads );
std::vector<int32_t> Conv_13_strides = {1,1} ;
Conv_13.setAttribute ( attr_strides, Conv_13_strides );
tensor<float> dnnc_Conv_13_142 = Conv_13.compute ( dnnc_Relu_12_141, dnnc_230, dnnc_232);
Add<float, float, float> Add_15("Add_15");
tensor<float> dnnc_Add_15_144 = Add_15.compute ( dnnc_Conv_13_142, dnnc_Relu_9_138);
Relu<float, float> Relu_16("Relu_16");
tensor<float> dnnc_Relu_16_145 = Relu_16.compute ( dnnc_Add_15_144);
Conv<float, float, float> Conv_17("Conv_17");
std::vector<int32_t> Conv_17_dilations = {1,1} ;
Conv_17.setAttribute ( attr_dilations, Conv_17_dilations );
int32_t Conv_17_group = 1 ;
Conv_17.setAttribute ( attr_group, Conv_17_group );
std::vector<int32_t> Conv_17_kernel_shape = {3,3} ;
Conv_17.setAttribute ( attr_kernel_shape, Conv_17_kernel_shape );
std::vector<int32_t> Conv_17_pads = {1,1,1,1} ;
Conv_17.setAttribute ( attr_pads, Conv_17_pads );
std::vector<int32_t> Conv_17_strides = {1,1} ;
Conv_17.setAttribute ( attr_strides, Conv_17_strides );
tensor<float> dnnc_Conv_17_146 = Conv_17.compute ( dnnc_Relu_16_145, dnnc_234, dnnc_236);
Relu<float, float> Relu_19("Relu_19");
tensor<float> dnnc_Relu_19_148 = Relu_19.compute ( dnnc_Conv_17_146);
Conv<float, float, float> Conv_20("Conv_20");
std::vector<int32_t> Conv_20_dilations = {1,1} ;
Conv_20.setAttribute ( attr_dilations, Conv_20_dilations );
int32_t Conv_20_group = 1 ;
Conv_20.setAttribute ( attr_group, Conv_20_group );
std::vector<int32_t> Conv_20_kernel_shape = {3,3} ;
Conv_20.setAttribute ( attr_kernel_shape, Conv_20_kernel_shape );
std::vector<int32_t> Conv_20_pads = {1,1,1,1} ;
Conv_20.setAttribute ( attr_pads, Conv_20_pads );
std::vector<int32_t> Conv_20_strides = {1,1} ;
Conv_20.setAttribute ( attr_strides, Conv_20_strides );
tensor<float> dnnc_Conv_20_149 = Conv_20.compute ( dnnc_Relu_19_148, dnnc_238, dnnc_240);
Add<float, float, float> Add_22("Add_22");
tensor<float> dnnc_Add_22_151 = Add_22.compute ( dnnc_Conv_20_149, dnnc_Relu_16_145);
Relu<float, float> Relu_23("Relu_23");
tensor<float> dnnc_Relu_23_152 = Relu_23.compute ( dnnc_Add_22_151);
Conv<float, float, float> Conv_24("Conv_24");
std::vector<int32_t> Conv_24_dilations = {1,1} ;
Conv_24.setAttribute ( attr_dilations, Conv_24_dilations );
int32_t Conv_24_group = 1 ;
Conv_24.setAttribute ( attr_group, Conv_24_group );
std::vector<int32_t> Conv_24_kernel_shape = {3,3} ;
Conv_24.setAttribute ( attr_kernel_shape, Conv_24_kernel_shape );
std::vector<int32_t> Conv_24_pads = {1,1,1,1} ;
Conv_24.setAttribute ( attr_pads, Conv_24_pads );
std::vector<int32_t> Conv_24_strides = {2,2} ;
Conv_24.setAttribute ( attr_strides, Conv_24_strides );
tensor<float> dnnc_Conv_24_153 = Conv_24.compute ( dnnc_Relu_23_152, dnnc_242, dnnc_244);
Relu<float, float> Relu_26("Relu_26");
tensor<float> dnnc_Relu_26_155 = Relu_26.compute ( dnnc_Conv_24_153);
Conv<float, float, float> Conv_27("Conv_27");
std::vector<int32_t> Conv_27_dilations = {1,1} ;
Conv_27.setAttribute ( attr_dilations, Conv_27_dilations );
int32_t Conv_27_group = 1 ;
Conv_27.setAttribute ( attr_group, Conv_27_group );
std::vector<int32_t> Conv_27_kernel_shape = {3,3} ;
Conv_27.setAttribute ( attr_kernel_shape, Conv_27_kernel_shape );
std::vector<int32_t> Conv_27_pads = {1,1,1,1} ;
Conv_27.setAttribute ( attr_pads, Conv_27_pads );
std::vector<int32_t> Conv_27_strides = {1,1} ;
Conv_27.setAttribute ( attr_strides, Conv_27_strides );
tensor<float> dnnc_Conv_27_156 = Conv_27.compute ( dnnc_Relu_26_155, dnnc_246, dnnc_248);
Conv<float, float, float> Conv_29("Conv_29");
std::vector<int32_t> Conv_29_dilations = {1,1} ;
Conv_29.setAttribute ( attr_dilations, Conv_29_dilations );
int32_t Conv_29_group = 1 ;
Conv_29.setAttribute ( attr_group, Conv_29_group );
std::vector<int32_t> Conv_29_kernel_shape = {1,1} ;
Conv_29.setAttribute ( attr_kernel_shape, Conv_29_kernel_shape );
std::vector<int32_t> Conv_29_pads = {0,0,0,0} ;
Conv_29.setAttribute ( attr_pads, Conv_29_pads );
std::vector<int32_t> Conv_29_strides = {2,2} ;
Conv_29.setAttribute ( attr_strides, Conv_29_strides );
tensor<float> dnnc_Conv_29_158 = Conv_29.compute ( dnnc_Relu_23_152, dnnc_250, dnnc_252);
Add<float, float, float> Add_31("Add_31");
tensor<float> dnnc_Add_31_160 = Add_31.compute ( dnnc_Conv_27_156, dnnc_Conv_29_158);
Relu<float, float> Relu_32("Relu_32");
tensor<float> dnnc_Relu_32_161 = Relu_32.compute ( dnnc_Add_31_160);
Conv<float, float, float> Conv_33("Conv_33");
std::vector<int32_t> Conv_33_dilations = {1,1} ;
Conv_33.setAttribute ( attr_dilations, Conv_33_dilations );
int32_t Conv_33_group = 1 ;
Conv_33.setAttribute ( attr_group, Conv_33_group );
std::vector<int32_t> Conv_33_kernel_shape = {3,3} ;
Conv_33.setAttribute ( attr_kernel_shape, Conv_33_kernel_shape );
std::vector<int32_t> Conv_33_pads = {1,1,1,1} ;
Conv_33.setAttribute ( attr_pads, Conv_33_pads );
std::vector<int32_t> Conv_33_strides = {1,1} ;
Conv_33.setAttribute ( attr_strides, Conv_33_strides );
tensor<float> dnnc_Conv_33_162 = Conv_33.compute ( dnnc_Relu_32_161, dnnc_254, dnnc_256);
Relu<float, float> Relu_35("Relu_35");
tensor<float> dnnc_Relu_35_164 = Relu_35.compute ( dnnc_Conv_33_162);
Conv<float, float, float> Conv_36("Conv_36");
std::vector<int32_t> Conv_36_dilations = {1,1} ;
Conv_36.setAttribute ( attr_dilations, Conv_36_dilations );
int32_t Conv_36_group = 1 ;
Conv_36.setAttribute ( attr_group, Conv_36_group );
std::vector<int32_t> Conv_36_kernel_shape = {3,3} ;
Conv_36.setAttribute ( attr_kernel_shape, Conv_36_kernel_shape );
std::vector<int32_t> Conv_36_pads = {1,1,1,1} ;
Conv_36.setAttribute ( attr_pads, Conv_36_pads );
std::vector<int32_t> Conv_36_strides = {1,1} ;
Conv_36.setAttribute ( attr_strides, Conv_36_strides );
tensor<float> dnnc_Conv_36_165 = Conv_36.compute ( dnnc_Relu_35_164, dnnc_258, dnnc_260);
Add<float, float, float> Add_38("Add_38");
tensor<float> dnnc_Add_38_167 = Add_38.compute ( dnnc_Conv_36_165, dnnc_Relu_32_161);
Relu<float, float> Relu_39("Relu_39");
tensor<float> dnnc_Relu_39_168 = Relu_39.compute ( dnnc_Add_38_167);
Conv<float, float, float> Conv_40("Conv_40");
std::vector<int32_t> Conv_40_dilations = {1,1} ;
Conv_40.setAttribute ( attr_dilations, Conv_40_dilations );
int32_t Conv_40_group = 1 ;
Conv_40.setAttribute ( attr_group, Conv_40_group );
std::vector<int32_t> Conv_40_kernel_shape = {3,3} ;
Conv_40.setAttribute ( attr_kernel_shape, Conv_40_kernel_shape );
std::vector<int32_t> Conv_40_pads = {1,1,1,1} ;
Conv_40.setAttribute ( attr_pads, Conv_40_pads );
std::vector<int32_t> Conv_40_strides = {1,1} ;
Conv_40.setAttribute ( attr_strides, Conv_40_strides );
tensor<float> dnnc_Conv_40_169 = Conv_40.compute ( dnnc_Relu_39_168, dnnc_262, dnnc_264);
Relu<float, float> Relu_42("Relu_42");
tensor<float> dnnc_Relu_42_171 = Relu_42.compute ( dnnc_Conv_40_169);
Conv<float, float, float> Conv_43("Conv_43");
std::vector<int32_t> Conv_43_dilations = {1,1} ;
Conv_43.setAttribute ( attr_dilations, Conv_43_dilations );
int32_t Conv_43_group = 1 ;
Conv_43.setAttribute ( attr_group, Conv_43_group );
std::vector<int32_t> Conv_43_kernel_shape = {3,3} ;
Conv_43.setAttribute ( attr_kernel_shape, Conv_43_kernel_shape );
std::vector<int32_t> Conv_43_pads = {1,1,1,1} ;
Conv_43.setAttribute ( attr_pads, Conv_43_pads );
std::vector<int32_t> Conv_43_strides = {1,1} ;
Conv_43.setAttribute ( attr_strides, Conv_43_strides );
tensor<float> dnnc_Conv_43_172 = Conv_43.compute ( dnnc_Relu_42_171, dnnc_266, dnnc_268);
Add<float, float, float> Add_45("Add_45");
tensor<float> dnnc_Add_45_174 = Add_45.compute ( dnnc_Conv_43_172, dnnc_Relu_39_168);
Relu<float, float> Relu_46("Relu_46");
tensor<float> dnnc_Relu_46_175 = Relu_46.compute ( dnnc_Add_45_174);
Conv<float, float, float> Conv_47("Conv_47");
std::vector<int32_t> Conv_47_dilations = {1,1} ;
Conv_47.setAttribute ( attr_dilations, Conv_47_dilations );
int32_t Conv_47_group = 1 ;
Conv_47.setAttribute ( attr_group, Conv_47_group );
std::vector<int32_t> Conv_47_kernel_shape = {3,3} ;
Conv_47.setAttribute ( attr_kernel_shape, Conv_47_kernel_shape );
std::vector<int32_t> Conv_47_pads = {1,1,1,1} ;
Conv_47.setAttribute ( attr_pads, Conv_47_pads );
std::vector<int32_t> Conv_47_strides = {2,2} ;
Conv_47.setAttribute ( attr_strides, Conv_47_strides );
tensor<float> dnnc_Conv_47_176 = Conv_47.compute ( dnnc_Relu_46_175, dnnc_270, dnnc_272);
Relu<float, float> Relu_49("Relu_49");
tensor<float> dnnc_Relu_49_178 = Relu_49.compute ( dnnc_Conv_47_176);
Conv<float, float, float> Conv_50("Conv_50");
std::vector<int32_t> Conv_50_dilations = {1,1} ;
Conv_50.setAttribute ( attr_dilations, Conv_50_dilations );
int32_t Conv_50_group = 1 ;
Conv_50.setAttribute ( attr_group, Conv_50_group );
std::vector<int32_t> Conv_50_kernel_shape = {3,3} ;
Conv_50.setAttribute ( attr_kernel_shape, Conv_50_kernel_shape );
std::vector<int32_t> Conv_50_pads = {1,1,1,1} ;
Conv_50.setAttribute ( attr_pads, Conv_50_pads );
std::vector<int32_t> Conv_50_strides = {1,1} ;
Conv_50.setAttribute ( attr_strides, Conv_50_strides );
tensor<float> dnnc_Conv_50_179 = Conv_50.compute ( dnnc_Relu_49_178, dnnc_274, dnnc_276);
Conv<float, float, float> Conv_52("Conv_52");
std::vector<int32_t> Conv_52_dilations = {1,1} ;
Conv_52.setAttribute ( attr_dilations, Conv_52_dilations );
int32_t Conv_52_group = 1 ;
Conv_52.setAttribute ( attr_group, Conv_52_group );
std::vector<int32_t> Conv_52_kernel_shape = {1,1} ;
Conv_52.setAttribute ( attr_kernel_shape, Conv_52_kernel_shape );
std::vector<int32_t> Conv_52_pads = {0,0,0,0} ;
Conv_52.setAttribute ( attr_pads, Conv_52_pads );
std::vector<int32_t> Conv_52_strides = {2,2} ;
Conv_52.setAttribute ( attr_strides, Conv_52_strides );
tensor<float> dnnc_Conv_52_181 = Conv_52.compute ( dnnc_Relu_46_175, dnnc_278, dnnc_280);
Add<float, float, float> Add_54("Add_54");
tensor<float> dnnc_Add_54_183 = Add_54.compute ( dnnc_Conv_50_179, dnnc_Conv_52_181);
Relu<float, float> Relu_55("Relu_55");
tensor<float> dnnc_Relu_55_184 = Relu_55.compute ( dnnc_Add_54_183);
Conv<float, float, float> Conv_56("Conv_56");
std::vector<int32_t> Conv_56_dilations = {1,1} ;
Conv_56.setAttribute ( attr_dilations, Conv_56_dilations );
int32_t Conv_56_group = 1 ;
Conv_56.setAttribute ( attr_group, Conv_56_group );
std::vector<int32_t> Conv_56_kernel_shape = {3,3} ;
Conv_56.setAttribute ( attr_kernel_shape, Conv_56_kernel_shape );
std::vector<int32_t> Conv_56_pads = {1,1,1,1} ;
Conv_56.setAttribute ( attr_pads, Conv_56_pads );
std::vector<int32_t> Conv_56_strides = {1,1} ;
Conv_56.setAttribute ( attr_strides, Conv_56_strides );
tensor<float> dnnc_Conv_56_185 = Conv_56.compute ( dnnc_Relu_55_184, dnnc_282, dnnc_284);
Relu<float, float> Relu_58("Relu_58");
tensor<float> dnnc_Relu_58_187 = Relu_58.compute ( dnnc_Conv_56_185);
Conv<float, float, float> Conv_59("Conv_59");
std::vector<int32_t> Conv_59_dilations = {1,1} ;
Conv_59.setAttribute ( attr_dilations, Conv_59_dilations );
int32_t Conv_59_group = 1 ;
Conv_59.setAttribute ( attr_group, Conv_59_group );
std::vector<int32_t> Conv_59_kernel_shape = {3,3} ;
Conv_59.setAttribute ( attr_kernel_shape, Conv_59_kernel_shape );
std::vector<int32_t> Conv_59_pads = {1,1,1,1} ;
Conv_59.setAttribute ( attr_pads, Conv_59_pads );
std::vector<int32_t> Conv_59_strides = {1,1} ;
Conv_59.setAttribute ( attr_strides, Conv_59_strides );
tensor<float> dnnc_Conv_59_188 = Conv_59.compute ( dnnc_Relu_58_187, dnnc_286, dnnc_288);
Add<float, float, float> Add_61("Add_61");
tensor<float> dnnc_Add_61_190 = Add_61.compute ( dnnc_Conv_59_188, dnnc_Relu_55_184);
Relu<float, float> Relu_62("Relu_62");
tensor<float> dnnc_Relu_62_191 = Relu_62.compute ( dnnc_Add_61_190);
Conv<float, float, float> Conv_63("Conv_63");
std::vector<int32_t> Conv_63_dilations = {1,1} ;
Conv_63.setAttribute ( attr_dilations, Conv_63_dilations );
int32_t Conv_63_group = 1 ;
Conv_63.setAttribute ( attr_group, Conv_63_group );
std::vector<int32_t> Conv_63_kernel_shape = {3,3} ;
Conv_63.setAttribute ( attr_kernel_shape, Conv_63_kernel_shape );
std::vector<int32_t> Conv_63_pads = {1,1,1,1} ;
Conv_63.setAttribute ( attr_pads, Conv_63_pads );
std::vector<int32_t> Conv_63_strides = {1,1} ;
Conv_63.setAttribute ( attr_strides, Conv_63_strides );
tensor<float> dnnc_Conv_63_192 = Conv_63.compute ( dnnc_Relu_62_191, dnnc_290, dnnc_292);
Relu<float, float> Relu_65("Relu_65");
tensor<float> dnnc_Relu_65_194 = Relu_65.compute ( dnnc_Conv_63_192);
Conv<float, float, float> Conv_66("Conv_66");
std::vector<int32_t> Conv_66_dilations = {1,1} ;
Conv_66.setAttribute ( attr_dilations, Conv_66_dilations );
int32_t Conv_66_group = 1 ;
Conv_66.setAttribute ( attr_group, Conv_66_group );
std::vector<int32_t> Conv_66_kernel_shape = {3,3} ;
Conv_66.setAttribute ( attr_kernel_shape, Conv_66_kernel_shape );
std::vector<int32_t> Conv_66_pads = {1,1,1,1} ;
Conv_66.setAttribute ( attr_pads, Conv_66_pads );
std::vector<int32_t> Conv_66_strides = {1,1} ;
Conv_66.setAttribute ( attr_strides, Conv_66_strides );
tensor<float> dnnc_Conv_66_195 = Conv_66.compute ( dnnc_Relu_65_194, dnnc_294, dnnc_296);
Add<float, float, float> Add_68("Add_68");
tensor<float> dnnc_Add_68_197 = Add_68.compute ( dnnc_Conv_66_195, dnnc_Relu_62_191);
Relu<float, float> Relu_69("Relu_69");
tensor<float> dnnc_Relu_69_198 = Relu_69.compute ( dnnc_Add_68_197);
GlobalAveragePool<float, float> GlobalAveragePool_70("GlobalAveragePool_70");
tensor<float> dnnc_GlobalAveragePool_70_199 = GlobalAveragePool_70.compute ( dnnc_Relu_69_198);
Reshape<float, float, int64_t> Reshape_80("Reshape_80");
tensor<float> dnnc_Reshape_80_207 = Reshape_80.compute ( dnnc_GlobalAveragePool_70_199, dnnc_206);
Gemm<float, float, float> Gemm_81("Gemm_81");
float Gemm_81_alpha = 1.000000 ;
Gemm_81.setAttribute ( attr_alpha, Gemm_81_alpha );
float Gemm_81_beta = 1.000000 ;
Gemm_81.setAttribute ( attr_beta, Gemm_81_beta );
int32_t Gemm_81_transB = 1 ;
Gemm_81.setAttribute ( attr_transB, Gemm_81_transB );
tensor<float> dnnc_Gemm_81_output = Gemm_81.compute ( dnnc_Reshape_80_207, dnnc_fc_dot_weight, dnnc_fc_dot_bias);
// Write the output tensor in a file.
dnnc_Gemm_81_output.write("output.out");
return 0;
}
Thanks again for the fast response! Much appreciated.
I just pulled the latest version of deepC, and ran the resnet model again. This time, I receive a different error:
running DNNC graph sanity check.
Writing C++ file source-out//model.cpp
python3: cppCodeGen.cpp:422: std::string dnnc::cppCodeGen::writeUnaryOperator(dnnc::opNode &, std::vector<node *> &, std::vector<node *> &): Assertion `ins.size() == 1 && outs.size() == 1' failed.
Aborted
Did you make any changes to the model itself? Or maybe there have been some additional commits in the meanwhile?
I forgot to push. Doing it now.
Please pull again in 10 mins.
Indeed! That fixed the issue - super 👍
Some actually implemented operators sometimes result in "operator xxx is not supported yet" messages in the generated c++ code. For example, the attached model results in several "Relu not supported" comments within the generated code:
model.zip