Commit 8b361ec5 authored by Jonathan Juhl's avatar Jonathan Juhl
Browse files

add utils

parent e5649037
......@@ -18,20 +18,20 @@ def loss_gen(D_logits_,predict_z,z,predict_angels,angels):
g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_logits_, tf.ones_like(D_logits_)))+loss_latent(predict_z,z)+loss_angels(predict_angels,angels)
return g_loss
def loss_disc(d_h1_r,d_h1_f,d_h2_r,d_h2_f,d_h3_r,d_h3_f,d_h4_r,d_h4_,D_logits,D_logits_fake,predict_z,z,predict_angels,angels):
def loss_disc(d_h1_r,d_h1_f,d_h2_r,d_h2_f,d_h3_r,d_h3_f,d_h4_r,d_h4_f,D_logits,D_logits_fake,predict_z,z,predict_angels,angels):
z = tf.cast(z,tf.float32)
d_h1_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d_h1_r, tf.ones_like(d_h1_r))) \
+ tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d_h1_f, tf.zeros_like(d_h1_f)))
d_h2_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d_h2_r, tf.ones_like(d_h2_r))) \
+ tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d_h2_f, tf.zeros_like(d_h2_f)))
d_h3_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d_h3_r, tf.ones_like(d_h3_r))) \
+ tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d_h3_f, tf.zeros_like(d_h3_f)))
d_h4_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d_h4_r, tf.ones_like(d_h4_r))) \
+ tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(d_h4_f, tf.zeros_like(d_h4_f)))
d_h1_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_h1_r, tf.ones_like(d_h1_r))) \
+ tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_h1_f, tf.zeros_like(d_h1_f)))
d_h2_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_h2_r, tf.ones_like(d_h2_r))) \
+ tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_h2_f, tf.zeros_like(d_h2_f)))
d_h3_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_h3_r, tf.ones_like(d_h3_r))) \
+ tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_h3_f, tf.zeros_like(d_h3_f)))
d_h4_loss = tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_h4_r, tf.ones_like(d_h4_r))) \
+ tf.reduce_mean(sigmoid_cross_entropy_with_logits(d_h4_f, tf.zeros_like(d_h4_f)))
d_loss_real = tf.reduce_mean(sigmoid_cross_entropy_with_logits(D_logits, tf.ones_like(D_logits)))
d_loss_fake = tf.reduce_mean(sigmoid_cross_entropy_with_logits(D_logits_fake, tf.zeros_like(D_logits_fake)))
d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_logits, tf.ones_like(D_logits)))
d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(D_logits_fake, tf.zeros_like(D_logits_fake)))
d_loss = d_h1_loss+d_h2_loss+d_h3_loss+d_h4_loss+d_loss_real+d_loss_fake+loss_latent(predict_z,z)+loss_angels(predict_angels,angels)
return d_loss
......@@ -174,10 +174,11 @@ class Instance_norm(tf.keras.layers.Layer):
def call(self,x):
mean, variance = tf.nn.moments(x, axes=[1, 2], keep_dims=True)
mean, variance = tf.nn.moments(x, axes=[1, 2])
epsilon = 1e-5
inv = 1/tf.sqrt(variance + epsilon)
normalized = (input - mean) * inv
normalized = tf.transpose((tf.transpose(x,perm=[1,2,0,3]) - mean) * inv,perm=[2,0,1,3])
if self.return_mean:
return self.scale * normalized + self.offset, mean, variance
else:
......@@ -191,29 +192,30 @@ class Spectral_norm(tf.keras.layers.Layer):
self.strides = strides
self.kernels = kernels
def build(self,input_shape):
self.w = self.add_weight("offset",
self.w = self.add_weight("kernel",
shape=[self.kernels,self.kernels,input_shape[-1],self.channels])
self.u = self.add_weight("offset",
shape=[tf.math.reduce_prod(input_shape)*self.channels])
self.bias = self.add_weight("offset",
shape=[1,input_shape[-1]])
self.u = self.add_weight("spec_kernel",
shape=[1,self.channels], initializer=tf.keras.initializers.TruncatedNormal(), trainable=False)
self.bias = self.add_weight("offset",
shape=[1,self.channels])
self.inp_shape = input_shape[-1]
def call(self,x):
w_shape = tf.shape(x)
x = tf.reshape(x, [-1, w_shape[-1]])
w = tf.reshape(self.w, [-1, self.channels])
v_ = tf.matmul(self.u,w,transpose_b=True)
v_hat = tf.math.l2_normalize(v_)
u_ = tf.matmul(v_hat,w)
u_hat = tf.math.l2_normalize(u_)
v_ = tf.matmul(self.u,tf.reshape(self.w,[-1]),transpose_b=True)
v_hat = tf.math.l2_norm(v_)
u_ = tf.matmul(v_hat, w)
u_hat = tf.math.l2_norm(u_)
print(v_hat,u_hat);exit()
sigma = tf.matmul(tf.matmul(v_hat, w), tf.transpose(u_hat))
w_norm = w / sigma
w_norm = tf.reshape(w_norm, w_shape)
conv = tf.nn.conv2d(input_, w_norm, strides=[1, self.strides, self.strides, 1], padding='SAME')+self.bias
w_norm = tf.reshape(w_norm, [self.kernels,self.kernels,self.inp_shape,self.channels])
conv = tf.nn.conv2d(x, w_norm, strides=[1, self.strides, self.strides, 1], padding='SAME')+self.bias
return conv
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment