Commit 9ec83b9f authored by Jonathan Juhl's avatar Jonathan Juhl
Browse files

corrected log file

parent ecfe932b
import tensorflow as tf
import numpy as np
import umap
from os.path import join
from trainer_sortem import Trainer
from mrc_loader_sortem import mrc_loader
class GAN_NERF():
......@@ -10,7 +10,7 @@ class GAN_NERF():
self.args = args
dic = {32:1,64:2,128:3,256:4,512:5}
self.predict_steps = int(np.ceil(args['number_particles']/(args['num_gpus']*args['batch_size'])))
self.dic = dic
l = np.asarray([32,64,128,256,512])
self.args['resize'] = l[np.argmin(np.abs(l-self.args['size']))]
l_list = []
......@@ -52,17 +52,20 @@ class GAN_NERF():
predict_generator = mrc_loader(args).pred_generate()
output_generator = mrc_loader(args).pred_generate()
if args['num_gpus'] > 1:
strategy = tf.distribute.MirroredStrategy(devices= gpu_list )
self.generator = strategy.experimental_distribute_dataset( generator)
self.generator_pred = strategy.experimental_distribute_dataset( predict_generator )
self.output_generator = strategy.experimental_distribute_dataset( output_generator )
else:
strategy = tf.distribute.OneDeviceStrategy(device=gpu_list[0])
self.generator = strategy.experimental_distribute_dataset( generator )
self.generator_pred = strategy.experimental_distribute_dataset( predict_generator )
self.output_generator = strategy.experimental_distribute_dataset( output_generator )
args['strategy'] = strategy
self.trainer = Trainer(args)
self.train()
......@@ -72,7 +75,7 @@ class GAN_NERF():
print('Begin training: ', '-' * 60)
current_step = self.trainer.step_variable
gen = iter(self.generator)
pred = iter(self.generator_pred)
for i in range(int(current_step)):
# this starts the data recording at where it left off
# this is to prevent when continuation of training the model does not use the same data
......@@ -101,27 +104,52 @@ class GAN_NERF():
self.trainer.distributed_training_step(params)
if (i % self.args['record']) == 0:
if self.args['num_gpus'] == 1:
self.single_device_model_maker.model_maker()
features = []
current_shape = params['image'].numpy().shape[1]
for kk in range(int(np.ceil(self.args['umap_t_size']/self.args['batch_size']))):
data = next(pred)
features.append(self.args['strategy'].run(self.trainer.get_features,
args=(data,params['alpha'],self.trainer.Encoder[int(params['index'])],current_shape)))
self.trainer.write_summaries(features)
else:
self.multi_device_model_maker.model_maker()
self.trainer.write_summaries()
data = next(pred)
features = []
for kk in range(int(np.ceil(self.args['umap_t_size']/(self.num_gpus*self.args['batch_size'])))):
features.append(self.args['strategy'].run(self.trainer.get_features,
args=(data,params['alpha'],self.trainer.Encoder[int(params['index'])],current_shape)).reduce())
self.trainer.write_summaries(features)
if (i % self.args['save_model']) == 0:
self.trainer.save_checkpoint()
self.trainer.save_best_model()
def over_cluster(self):
self.trainer.load_best_model()
trainer.sparse_water_sheed_algorithm()
def predict(self):
pred = iter(self.output_generator)
output_vectors = []
if not isfile(join(self.args['results'],'final_featur_vectors.npy')):
if self.args['num_gpus'] > 1:
for kk in range(int(np.ceil(self.args['depth']/self.args['num_gpus']*self.args['batch_size']))):
data = next(pred)
output_vectors.append(self.args['strategy'].run(self.trainer.get_features,args=(data,params['alpha'],self.trainer.Encoder[self.dic[self.args['resize']]],current_shape)).reduce())
else:
for kk in range(int(np.ceil(self.args['depth']/self.args['batch_size']))):
data = next(pred)
output_vectors.append(self.args['strategy'].run(self.trainer.get_features,args=(data,params['alpha'],self.trainer.Encoder[self.dic[self.args['resize']]],current_shape)))
np.save(join(self.args['results'],'final_featur_vectors.npy'))
labels,umap_output,collect_centers = pred_umap(args,feature_vector)
if not isfile(join(self.args['results'],'final_labels.npy')):
np.save(join(self.args['results'],'final_labels.npy'))
np.save(join(self.args['results'],'final_umap_output.npy'))
np.save(join(self.args['results'],'final_collect_centers.npy'))
self.trainer.load_best_model()
#self.trainer.model_maker()
bools = isfile(join(self.args['results'],'over_cluster.npy'))
if bools:
labels = np.load(join(self.args['results'],'over_cluster.npy'))
clusters = []
for i in range(self.predict_steps):
image = next(self.generator_pred )
......
......@@ -33,7 +33,7 @@ class ED_Maker(Model):
self.final_layer = final_layer
self.fromRGB.reverse()
self.start = self.img_size_to_layer[shape]
self.flatten = Flatten()
self.lays = layers
def call(self,input,alpha):
......@@ -51,7 +51,7 @@ class ED_Maker(Model):
x = layer(x)
out = tf.squeeze(self.final_layer(x))
out = self.flatten(self.final_layer(x))
......@@ -61,7 +61,8 @@ class ED_Maker(Model):
v = []
for i in self.lays[:self.start ]:
v+= i.trainable_variables
return v+self.fromRGB[self.start-1].trainable_variables+self.fromRGB[ int(self.start) ].trainable_variables
k = self.final_layer.trainable_variables
return v+k+self.fromRGB[self.start-1].trainable_variables+self.fromRGB[ int(self.start) ].trainable_variables
class ResidualConvBlock(Layer):
def __init__(self,inplanes, planes, kernel_size=3, stride=1, downsample=False, groups=1):
......@@ -167,7 +168,7 @@ class Ray_maker(Layer):
s = tf.shape(coordinates)[1]
powered = tf.expand_dims(tf.pow(tf.cast(2.0,dtype=coordinates.dtype),tf.cast(tf.range(self.init_L),dtype=coordinates.dtype)),axis=0)
out = tf.matmul(coordinates,powered,coordinates.dtype)
out = tf.matmul(coordinates,powered,transpose_a=True)
out = tf.reshape(out,[-1,s,self.init_L*3])
......@@ -212,9 +213,9 @@ class Ray_maker(Layer):
a,b = tf.split(z,2,axis=-1)
a = tf.squeeze(a)
b = tf.squeeze(b)
fourie = tf.complex(tf.cast(a,tf.float32),tf.cast(b,tf.float32))
return tf.squeeze(fourie)
return a,b
class Noise_Maker(Model):
......
......@@ -64,10 +64,8 @@ class mrc_loader:
image = tf.io.decode_raw(ins,tf.float32)
image = tf.reshape(image,[self.df_keys['size'],self.df_keys['size'],1])
image = tf.image.per_image_standardization(image)
minima = tf.reduce_min(image)
tmp = image+minima
image = tmp/tf.reduce_max(tmp)
return image
image = image-tf.reduce_min(image)
return image/tf.reduce_max(image)
data = tf.data.FixedLengthRecordDataset(self.df_keys['mrc_paths'],self.df_keys['bpr'],num_parallel_reads=self.df_keys['num_cpus'], header_bytes=1024).map(pred_image,self.df_keys['num_cpus']).batch(self.df_keys['batch_size']).repeat()
return data
......@@ -75,3 +73,25 @@ class mrc_loader:
# def generator_model(self,input_vectors,):
class Grid_Maker:
def __init__(self,kwargs,means):
self.size = kwargs['size']
x = tf.linspace(0.0,0.5,int(self.size/2)+1)
y = tf.linspace(-0.5,0.5,self.size)
z = tf.linspace(-0.5,0.5,self.size)
X,Y,Z = tf.meshgrid(x,y,z)
self.X = X
self.Y = Y
self.Z = Z
self.mean_size = means.shape[0]
def grid_generator(self):
x_slice = tf.from_tensor_slices(self.X)
y_slice = tf.from_tensor_slices(self.Y)
z_slice = tf.from_tensor_slices(self.Z)
return tf.data.Dataset.zip(x_slice,y_slice,z_slice).repeat(self.mean_size).batch(self.kwargs['m_batch_size'])
This diff is collapsed.
......@@ -7,11 +7,52 @@ from os.path import join
from tensorflow.keras import Model
# Clustering Analysis via Deep Generative Models With Mixture Models
from tensorflow.keras.layers import Layer
import umap
import hdbscan
from sklearn.cluster import KMeans
def get_parameters(self,normed_rotation):
normed_rotation,_ = tf.linalg.normalize(normed_rotation,axis=1)
translate_x = 2*translation[:,0]*self.ny
translate_y = 2*translation[:,1]*self.nx
tilt = tf.math.atan2(normed_rotation[:,2,1],normed_rotation[:,2,2])*180/(np.pi)
psi = tf.math.atan2(-normed_rotation[:,2,0],tf.sqrt(tf.pow(normed_rotation[:,2,1],2)+tf.pow(normed_rotation[:,2,2],2)))*180/(np.pi)
euler = tf.math.atan2(normed_rotation[:,1,0],normed_rotation[:,0,0])*180/(np.pi)
inplane = tf.reshape(tf.stack([tf.cos(euler),-tf.sin(euler),tf.sin(euler),tf.cos(euler)],axis=-1),[-1,2,2])
return tilt,psi,inplane
class Make_2D_Transform(tf.keras.layers.Layer):
def __init__(self,width):
super(Make_2D_Transform, self).__init__()
self.width = width
fourier = np.fft.fftfreq(width)
x = tf.linspace(0.0,float(width)-1,width)
self.F_X,self.X = np.meshgrid(x,fourier)
def call(self,real,imaginary):
s_r = tf.reverse(real,axis=[1])
s_i = tf.reverse(imaginary,axis=[1])
s_r = tf.concat([tf.expand_dims(s_r[:,0,:],axis=1),tf.reverse(s_r[:,1:,:],axis=[1])],axis=1)
s_im = tf.concat([tf.expand_dims(s_i[:,0,:],axis=1),tf.reverse(s_i[:,1:,:],axis=[1])],axis=1)
a = tf.concat([real,s_r[:,:,1:-1]],axis=2)
b = tf.concat([imaginary,-s_im[:,:,1:-1]],axis=2)
A = tf.cast(tf.cos(2*np.pi*self.F_X*self.X),tf.float32)/self.width
B = tf.cast(tf.sin(2*np.pi*self.F_X*self.X),tf.float32)/self.width
a_1 = tf.matmul(A,a)-tf.matmul(B,b)
b_1 = tf.matmul(B,a)+tf.matmul(A,b)
a_out = tf.matmul(a_1,A)-tf.matmul(b_1,B)
return a_out
class DiversityLoss(tf.keras.losses.Loss):
def call(self,z,x):
nominator = (tf.norm(tf.math.roll(x,1,axis=0)-x))
denominator = (tf.norm(tf.math.roll(z,1,axis=0)-z))
nominator = (tf.norm(tf.roll(x,1,axis=0)-x))
denominator = (tf.norm(tf.roll(z,1,axis=0)-z))
s = -tf.math.minimum(nominator/denominator,10**(-4))
return s
class AverageBlurPooling2D(Layer):
......@@ -105,12 +146,12 @@ class Eval_CTF(tf.keras.layers.Layer):
return tf.cast(ctf,tf.complex64)
def gradient_penalty(self, discriminate,x_real, x_gen):
epsilon = tf.random.uniform([x.shape[0], 1, 1, 1], 0.0, 1.0)
def gradient_penalty(discriminate,x_real, x_gen,alpha):
epsilon = tf.random.uniform([x_real.shape[0], 1, 1, 1], 0.0, 1.0)
x_hat = epsilon *x_real + (1 - epsilon) * x_gen
with tf.GradientTape() as t:
t.watch(x_hat)
d_hat = discriminate(x_hat)
d_hat = discriminate(x_hat,alpha)
gradients = t.gradient(d_hat, x_hat)
ddx = tf.sqrt(tf.reduce_sum(gradients ** 2, axis=[1, 2]))
d_regularizer = tf.reduce_mean((ddx - 1.0) ** 2)
......@@ -118,37 +159,39 @@ def gradient_penalty(self, discriminate,x_real, x_gen):
class Discriminator_Loss(tf.keras.losses.Loss):
def init(self):
self.loss_real = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.loss_fake = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.loss_tilde = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def call(self,x_real, D_real, y_pred):
D_fake,D_tilde,D_real,reg = y_pred
real_loss = self.loss_real(tf.ones_like(D_real),D_real)
tilde_loss = self.loss_fake(tf.zeros_like(D_tilde),D_tilde)
fake_loss = self.loss_tilde(tf.zeros_like(D_tilde),D_fake)
super(Discriminator_Loss, self).__init__()
def call(self,D_real, y_pred):
D_tilde,D_fake = y_pred
return real_loss+tilde_loss+fake_loss+reg
tilde_loss = tf.nn.sigmoid_cross_entropy_with_logits(tf.zeros_like(D_tilde),D_tilde)
real_loss = tf.nn.sigmoid_cross_entropy_with_logits(tf.ones_like(D_real),D_real)
fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(tf.zeros_like(D_tilde),D_fake)
gan_loss = real_loss+tilde_loss+fake_loss
return gan_loss
class Generator_Loss(tf.keras.losses.Loss):
def init(self):
self.loss_mse = tf.keras.losses.MSE()
self.loss_tilde = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.loss_real = tf.keras.losses.BinaryCrossentropy(from_logits=True)
def call(self, real_image, y_pred):
D_tilde,D_fake,tilde_image = y_pred
ll_loss = self.loss_mse(real_image,tilde_image)
d_fake = self.loss_tilde(tf.ones_like(D_fake),D_fake)
d_real = self.loss_real(tf.ones_like(D_tilde),D_tilde)
return d_fake+d_real+0.1*ll_loss
super(Generator_Loss, self).__init__()
def call(self, D_real, y_pred):
tilde_image,real_image, D_tilde,D_fake = y_pred
real_loss = tf.nn.sigmoid_cross_entropy_with_logits(tf.ones_like(D_real),D_real)
tilde_loss = tf.nn.sigmoid_cross_entropy_with_logits(tf.zeros_like(D_tilde),D_tilde)
fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(tf.zeros_like(D_tilde),D_fake)
gan_loss = real_loss+tilde_loss+fake_loss
c = -0.5 * tf.math.log(2 * np.pi)
multiplier = 1.0 / (2.0 * 1)
tmp = tf.square(tilde_image - real_image)
tmp *= -multiplier
tmp += c
return 0.1 * tf.reduce_sum(tmp,axis=0) - gan_loss
class Encoder_Loss(tf.keras.losses.Loss):
def init(self):
self.loss_mse = tf.keras.losses.MSE()
self.loss_tilde = tf.keras.losses.BinaryCrossentropy(from_logits=True)
self.loss_real = tf.keras.losses.BinaryCrossentropy(from_logits=True)
super(Encoder_Loss, self).__init__()
def kl(self,mu,log_var):
return -0.5 * tf.reduce_sum(1 + log_var - tf.pow(mu, 2) - tf.exp(log_var))
......@@ -173,14 +216,16 @@ class Poisson_Measure(tf.keras.layers.Layer):
super(Poisson_Measure, self).__init__()
def call(self,input_image,noise_lam):
# compute the approximate poisson noise and add to image
input_image = tf.expand_dims(input_image,axis=-1)
# rescale the image such that the square root is no negative
mean = tf.constant([0.0],dtype=input_image.dtype);scale =tf.constant([2.0],dtype=input_image.dtype)
_noise_scale = tf.cast(tf.sqrt(1. / noise_lam),dtype=input_image.dtype)
eps = tf.random.uniform(shape=tf.shape(input_image),dtype=input_image.dtype)
noise = (eps * _noise_scale *
tf.sqrt((tf.stop_gradient(input_image)- mean) / scale + 0.5)) * scale
output = input_image + noise
return output
......@@ -196,12 +241,18 @@ def make_umap(args,feature_vectors):
min_cluster_size=args['minimum_size'],
).fit_predict(umap_output)
index = np.unique(labels)
unique_labels = np.unique(labels)
unique_labels = unique_labels[np.greater(unique_labels,-1)]
collect_centers = []
for i in index:
bools = np.equal(i,labels)
selected_features = np.take_along_axis(feature_vectors,bools,axis=0)
for i in unique_labels:
selected_features = umap_output[np.equal(i,labels)]
collect_centers.append(KMeans(n_clusters=args['frames']).fit(feature_vectors).cluster_centers_)
return np.concatenate(collect_centers)
return labels,umap_output,np.concatenate(collect_centers)
class Make_Grids(tf.keras.layers.Layer):
def __init__(self):
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment