Commit 87f24eea authored by Jonathan Juhl's avatar Jonathan Juhl
Browse files

Delete models.py

parent 6f43a18f
from super_clas_sortem import super_class
from tensorflow.keras import Model
import tensorflow as tf
from tensorflow.keras.layers import Flatten,LeakyReLU,Conv2DTranspose, Dense,Conv2D,UpSampling2D,Conv3D,Conv3DTranspose,LeakyReLU,Dense,UpSampling3D,ReLU
from utils_sortem import Spectral_norm,Instance_norm,transform_3D
class fit_layer(Model):
def __init__(self):
super(Cluster_Layer,self).__init__()
self.watersheed_layer = Watersheed_Layer()
def call(self,mean_matrix,mean_bias,variance_matrix,variance_bias):
loss_value = self.watersheed_layer(mean_matrix,mean_bias,variance_matrix,variance_bias)
return loss_value
class Cluster_Layer(Model):
def __init__(self):
super(Cluster_Layer,self).__init__()
self.dense_mean = Dense(4**3)
self.dense_var = Dense(1)
self.dense_mean_angel = Dense(2)
self.dense_var_angel = Dense(1)
def call(self,catagorial_variable):
catagorial_variable = Flatten()(catagorial_variable)
mean = self.dense_mean(catagorial_variable)
s =tf.shape(mean)
batch = s[0]
length = s[1]
epsilon = tf.random.normal(shape=[batch,length])
logvar = self.dense_var(catagorial_variable)
out = epsilon*logvar+mean
mean_angel = self.dense_mean_angel(catagorial_variable)
logvar_angel = self.dense_var_angel(catagorial_variable)
epsilon_angel = tf.random.normal(shape=[batch,2])
out_angel = tf.nn.softsign(epsilon_angel*logvar_angel+mean_angel)
return out,out_angel
def AdaIn_3D(inputs,s1,b1):
b= tf.shape(inputs)[0]
w= tf.shape(inputs)[1]
h= tf.shape(inputs)[2]
d= tf.shape(inputs)[3]
c= tf.shape(inputs)[4]
inputs = Flatten()(inputs)
mean = tf.reduce_mean(inputs,axis=1,keepdims=True)
std = tf.math.reduce_std(inputs,axis=1,keepdims=True)
inputs = tf.reshape(((inputs - mean)/std),[b,w,h,d,c])
out = tf.transpose(s1*tf.transpose(inputs,perm=[1,2,3,0,4]) + b1,perm=[3,0,1,2,4])
return out
def AdaIn_2D(inputs,s1,b1):
b= tf.shape(inputs)[0]
w= tf.shape(inputs)[1]
h= tf.shape(inputs)[2]
c= tf.shape(inputs)[3]
inputs = Flatten()(inputs)
mean = tf.reduce_mean(inputs,axis=1,keepdims=True)
std = tf.math.reduce_std(inputs,axis=1,keepdims=True)
inputs = tf.reshape(((inputs - mean)/std),[b,w,h,c])
out = tf.transpose(s1*tf.transpose(inputs,perm=[1,2,0,3]) + b1,perm=[2,0,1,3])
return out
class Double_Dense(Model):
def __init__(self,channels):
super(Double_Dense,self).__init__()
self.dense_1 = Dense(channels)
self.dense_2 = Dense(channels)
def call(self,x):
x = Flatten()(x)
s = self.dense_1(x)
c = self.dense_2(x)
return s,c
class Generator_AdaIN_Noise(Model):
def __init__(self,gf_dim=64):
self.gf_dim = gf_dim
super(Generator_AdaIN_Noise,self).__init__()
self.zmap_0 = Double_Dense( self.gf_dim * 8)
self.h0 = AdaIn_2D
self.h0_a = LeakyReLU()
self.h1= Conv2DTranspose( self.gf_dim * 8,3,strides=2,padding='SAME')
self.z_map_1= Double_Dense(self.gf_dim * 8)
self.h1_aI = AdaIn_2D
self.h1_a = LeakyReLU()
self.h2 = Conv2DTranspose(self.gf_dim * 16,3,strides=2,padding='SAME')
self.h2_a = LeakyReLU()
self.z_map_2= Double_Dense(self.gf_dim * 16)
self.h2_aI = AdaIn_2D
self.h2_a = LeakyReLU()
self.h5 = Conv2DTranspose(self.gf_dim*4,4,strides=2,padding='SAME')
self.z_map_4 = Double_Dense(self.gf_dim*4)
self.h6 = AdaIn_2D
self.h6_a = LeakyReLU()
self.h7 = Conv2DTranspose(self.gf_dim*2,4,strides=2,padding='SAME')
self.z_map_5 = Double_Dense(self.gf_dim*2)
self.h7_in = AdaIn_2D
self.h7_a = LeakyReLU()
self.h8 = Conv2DTranspose(self.gf_dim,4,strides=2,padding='SAME')
self.z_map_6 = Double_Dense( self.gf_dim)
self.h8_in = AdaIn_2D
self.h8_a = LeakyReLU()
self.h9 = Conv2D(1,4,activation='tanh',padding='SAME')
def call(self,z):
a,b = self.zmap_0(z)
x = self.h0(z,a,b)
x = self.h0_a(x)
x = self.h1(x)
a,b = self.z_map_1(z)
x = self.h1_aI(x,a,b)
x = self.h1_a(x)
x = self.h2(x)
a,b = self.z_map_2(z)
x = self.h2_aI(x,a,b)
x = self.h2_a(x)
x = self.h5(x)
a,b = self.z_map_4(z)
x = self.h6(x,a,b)
x = self.h6_a(x)
x = self.h7(x)
a,b = self.z_map_5(z)
x = self.h7_in(x,a,b)
x = self.h7_a(x)
x = self.h8(x)
a,b = self.z_map_6(z)
x = self.h8_in(x,a,b)
x = self.h8_a(x)
x = self.h9(x)
return x
class Generator_AdaIN_res128(Model):
def __init__(self,gf_dim=64):
super(Generator_AdaIN_res128,self).__init__()
self.trans_3d = transform_3D()
self.cluster = Cluster_Layer()
self.gf_dim = gf_dim
self.h0= Conv3DTranspose( self.gf_dim * 8,3,strides=2,padding='SAME')
self.zmap_0 = Double_Dense( self.gf_dim * 8)
self.h0_aI = AdaIn_3D
self.h0_a = LeakyReLU()
self.h1 = Conv3DTranspose(self.gf_dim * 4,3,strides=2,padding='SAME')
self.z_map_1= Double_Dense(self.gf_dim * 4)
self.h1_aI = AdaIn_3D
self.h1_a = LeakyReLU()
#=============================================================================================================
# h2_rotated = transform_3D(h2, view_in, 16, 16)
self.h2_proj1 = Conv3D( self.gf_dim*2,3,padding='SAME')
self.h2_proj1_a = LeakyReLU()
self.h2_proj2 = Conv3D(self.gf_dim*2,3,padding='SAME')
self.h2_proj2_a = LeakyReLU()
# =============================================================================================================
# Collapsing depth dimension
#h2_2d = tf.reshape(h2_proj2, [batch_size, s_h4, s_w4, s_d4 * self.gf_dim])
# 1X1 convolution
# =============================================================================================================
self.h4 = Conv2D(self.gf_dim * 16,1)
self.h4_a = LeakyReLU()
self.h5 = Conv2DTranspose(self.gf_dim*8,4,strides=2,padding='SAME')
self.z_map_4 = Double_Dense(self.gf_dim*8)
self.h6 = AdaIn_2D
self.h6_a = LeakyReLU()
self.h7 = Conv2DTranspose(self.gf_dim*2,4,strides=2,padding='SAME')
self.z_map_5 = Double_Dense(self.gf_dim*2)
self.h7_in = AdaIn_2D
self.h7_a = LeakyReLU()
self.h8 = Conv2DTranspose(self.gf_dim,4,strides=2,padding='SAME')
self.z_map_6 = Double_Dense(self.gf_dim)
self.h8_in = AdaIn_2D
self.h8_a = LeakyReLU()
self.h9 = Conv2D(1,4,activation='tanh',padding='SAME')
def call(self,catagorial,phi):
z,angels = self.cluster(catagorial)
psi,rho = tf.split(angels,2,axis=1)
z = tf.reshape(z,[-1,4,4,4,1])
a,b = self.zmap_0(z)
x = self.h0(z)
x = self.h0_aI(x,a,b)
x = self.h0_a(x)
x = self.h1(x)
a,b = self.z_map_1(z)
x = self.h1_aI(x,a,b)
x = self.h1_a(x)
x = self.trans_3d(x,tf.squeeze(psi),phi,tf.squeeze(rho))
x = self.h2_proj1(x)
x = self.h2_proj1_a(x)
x = self.h2_proj2(x)
x = self.h2_proj2_a(x)
s = tf.shape(x)
x = tf.reshape(x, [s[0],s[1],s[2],s[3]*s[4]])
x = self.h4(x)
x = self.h4_a(x)
x = self.h5(x)
a,b = self.z_map_4(z)
x = self.h6(x,a,b)
x = self.h6_a(x)
x = self.h7(x)
a,b = self.z_map_5(z)
x = self.h7_in(x,a,b)
x = self.h7_a(x)
x = self.h8(x)
a,b = self.z_map_6(z)
x = self.h8_in(x,a,b)
x = self.h8_a(x)
x = self.h9(x)
return x
class Discriminator_AdaIN_res128(Model):
def __init__(self,laten_z_dims,df_dim=64):
super(Discriminator_AdaIN_res128,self).__init__()
self.num_parts = laten_z_dims
self.df_dim = df_dim
self.instance_norm_0 = Instance_norm( True)
self.h0 = Spectral_norm(self.df_dim*4,strides=2)
self.h0_a = ReLU()
self.dh0 =Dense(1)
self.h1 = Spectral_norm(self.df_dim * 8,strides=2)
self.instance_norm_1 = Instance_norm( True)
self.dh1 = Dense(1)
self.h1_a = ReLU()
self.h2 = Spectral_norm( self.df_dim * 16,strides=2)
self.instance_norm_2 = Instance_norm(True)
self.dh2 = Dense(1)
self.h2_a = ReLU()
self.h3 = Spectral_norm( self.df_dim * 32,strides=2)
self.instance_norm_3 = Instance_norm( True)
self.h3_a = ReLU()
self.dh3 = Dense(1)
self.h4_a = ReLU()
#Returning logits to determine whether the images are real or fake
self.dense_out = Dense(1)
self.act_out = ReLU()
self.encode = Dense(128)
self.predict = Dense(self.num_parts)
def style(self,x, h1_mean,h1_var ):
h1_mean = Flatten()(h1_mean)
h1_var = Flatten()(h1_var)
d_h1_style = tf.concat([h1_mean, h1_var], 0)
return d_h1_style
def call(self,x):
x = self.h0(x)
x, h1_mean,h1_var = self.instance_norm_0(x)
d_h1_style = self.style(x, h1_mean,h1_var)
d_logist_0 = self.dh0(d_h1_style)
d_sigmoid_0 = tf.nn.sigmoid(d_logist_0)
x = self.h0_a(x)
x = self.h1(x)
x, h2_mean,h2_var = self.instance_norm_1(x)
d_h2_style = self.style( x, h2_mean,h2_var )
d_logist_1 = self.dh1(d_h2_style)
d_sigmoid_1 = tf.nn.sigmoid(d_logist_1)
x = self.h1_a(x)
x = self.h2(x)
x, h3_mean,h3_var = self.instance_norm_2(x)
d_h3_style = self.style( x, h3_mean,h3_var )
d_logist_2= self.dh2(d_h3_style)
d_sigmoid_2 = tf.nn.sigmoid(d_logist_2)
x = self.h2_a(x)
x = self.h3(x)
x, h4_mean,h4_var = self.instance_norm_3(x)
d_h3_style = self.style( x, h4_mean,h4_var)
d_logist_3 = self.dh3(d_h3_style)
d_sigmoid_3= tf.nn.sigmoid(d_logist_3)
x = self.h3_a(x)
x = Flatten()(x)
h5 = self.dense_out(x)
encode = self.encode(x)
latent_out = self.act_out(x)
cont_vars = self.predict(latent_out)
return tf.nn.sigmoid(h5), h5, cont_vars, d_sigmoid_0, d_sigmoid_1, d_sigmoid_2, d_sigmoid_3
\ No newline at end of file
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment