tensorlayer / TensorLayer

Deep Learning and Reinforcement Learning Library for Scientists and Engineers
http://tensorlayerx.com
Other
7.31k stars 1.61k forks source link

SRGAN转为.pb #1125

Open jz-li666 opened 3 years ago

jz-li666 commented 3 years ago

The source address: https://github.com/tensorlayer/srgan I want to convert part of the get_G network in model.py to a.pb file.I didn't use the pre-training model,direct use model to.pb file.but, AttributeError: module 'tensorflow.python.framework.ops' has no attribute '_TensorLike'.I'm not familiar with tensorlayer code,plase help me . my code(to .pb file) G = get_G() input_signature=tf.TensorSpec([1,128,128,3]) output_tensor=G.infer.get_concrete_function(x=input_signature) frozen_func = convert_variables_to_constants_v2(output_tensor) frozen_func.graph.as_graph_def() pb_dir = r'srgan.pb' tf.io.write_graph(graph_or_graph_def=frozen_func.graph,logdir="models/pb",name=pb_dir,as_text=False) print('done')

Laicheng0830 commented 3 years ago

The AttributeError problem can be solved by the following methods. 1.Version TensorFlow downgraded to TensorFlow2.0. 2.Modify line 213 of tensorlayer/model/core.py. if isinstance(check_argu, tf_ops._TensorLike) or tf_ops.is_dense_tensor_like(check_argu): to if isinstance(check_argu, (tf.Tensor, tf.SparseTensor, tf.Variable)) or tf_ops.is_dense_tensor_like(check_argu):

jz-li666 commented 3 years ago

the problenr can be solved by the following methods 1.SRGAN source code for the static model, there are compatibility problems, can be modified to dynamic model 1.Version TensorFlow:2.0;Version tensorlayer:2.23 code: import tensorflow as tf import tensorlayer as tl from tensorlayer.layers import (Input, Conv2d, BatchNorm2d, Elementwise, SubpixelConv2d, Flatten, Dense, inputs) from tensorlayer.models import Model from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2 class MyG(Model): def init(self): super().init() w_init = tf.random_normal_initializer(stddev=0.02) g_init = tf.random_normal_initializer(1., 0.02) self.conv1=Conv2d(in_channels=3,n_filter=64, filter_size=(3, 3), strides=(1, 1), act=tf.nn.relu, padding='SAME', W_init=w_init) self.block0=self.get_block() self.block1=self.get_block() self.block2=self.get_block() self.block3=self.get_block() self.block4=self.get_block() self.block5=self.get_block() self.block6=self.get_block() self.block7=self.get_block() self.block8=self.get_block() self.block9=self.get_block() self.block10=self.get_block() self.block11=self.get_block() self.block12=self.get_block() self.block13=self.get_block() self.block14=self.get_block() self.block15=self.get_block() self.conv2=Conv2d(in_channels=64,n_filter=64, filter_size=(3, 3), strides=(1, 1), padding='SAME', W_init=w_init, b_init=None) self.bn2=BatchNorm2d(num_features=64,gamma_init=g_init) layer_list=[] layer_list.append(Conv2d(in_channels=64,n_filter=256, filter_size=(3, 3), strides=(1, 1), padding='SAME', W_init=w_init)) layer_list.append(SubpixelConv2d(in_channels=256,scale=2, n_out_channels=None, act=tf.nn.relu)) layer_list.append(Conv2d(in_channels=64,n_filter=256, filter_size=(3, 3), strides=(1, 1), act=None, padding='SAME', W_init=w_init)) layer_list.append(SubpixelConv2 layer_list.append(SubpixelConv2d(in_channels=256,scale=2, n_out_channels=None, act=tf.nn.relu)) layer_list.append(Conv2d(in_channels=64,n_filter=3, filter_size=(3, 3), strides=(1, 1), act=tf.nn.tanh, padding='SAME', W_init=w_init)) self.blockend=tl.layers.LayerList(layer_list) def get_block(self): w_init = tf.random_normal_initializer(stddev=0.02) g_init = tf.random_normal_initializer(1., 0.02) layer_list=[] layer_list.append(Conv2d(in_channels=64,n_filter=64, filter_size=(3, 3), strides=(1, 1), padding='SAME', W_init=w_init, b_init=None)) layer_list.append(BatchNorm2d(num_features=64,act=tf.nn.relu, gamma_init=g_init)) layer_list.append(Conv2d(in_channels=64,n_filter=64, filter_size=(3, 3), strides=(1, 1), padding='SAME', W_init=w_init, b_init=None)) layer_list.append(BatchNorm2d(num_features=64,gamma_init=g_init)) block=tl.layers.LayerList(layer_list) return block def forward(self,x): x=self.conv1.forward(x) x=self.block0.forward(x)+x x=self.block1.forward(x)+x x=self.block2.forward(x)+x x=self.block3.forward(x)+x x=self.block4.forward(x)+x x=self.block5.forward(x)+x x=self.block6.forward(x)+x x=self.block7.forward(x)+x x=self.block8.forward(x)+x x=self.block9.forward(x)+x x=self.block10.forward(x)+x x=self.block11.forward(x)+x x=self.block12.forward(x)+x x=self.block13.forward(x)+x x=self.block14.forward(x)+x x=self.block15.forward(x)+x x=self.conv2.forward(x) x=self.bn2.forward(x) x=self.blockend.forward(x) return x @tf.function(experimental_relax_shapes=True) def infer(self,x): return self.forward(x)
myg=MyG() myg.eval() input_signature=tf.TensorSpec([1,128,128,3]) concrete_function=myg.infer.get_concrete_function(x=input_signature) frozen_graph=convert_variables_to_constants_v2(concrete_function) frozen_graph_def=frozen_graph.graph.as_graph_def() tf.io.write_graph(graph_or_graph_def=frozen_graph_def,logdir="./",name=f"srgan.pb",\ as_text=False) x=self.blockend.forward(x)