Commit bcb915e9 authored by Jonathan Juhl's avatar Jonathan Juhl
Browse files

removed tensorflow_addons

parent a48ba7b2
import numpy as np
from super_class import super_class
from .super_clas_sortem import super_class
from os.path import join,getsize,isdir,isfile,dirname,basename
from os import listdir,rename
from fac import DynAE
from .fac_sortem import DynAE
from os import listdir,mkdir
import glob
......@@ -13,9 +13,10 @@ class control_flow:
def __init__(self,num_gpus,gpu_list,num_cpus,num_clusters,star,deep_NN_batch_size,deep_NN_prediction_size,workdir,half_precision,max_particles,epochs,pca,transfer_learning,verbose):
def __init__(self,num_gpus,gpu_list,num_cpus,num_clusters,star,deep_NN_batch_size,workdir,half_precision,max_particles,epochs,transfer_learning,verbose):
self.workdir = workdir
self.verbose = verbose
if not isdir(workdir): # check if all dirs are their
mkdir(workdir)
if not isdir(join(workdir,'gui_display')):
......@@ -51,26 +52,49 @@ class control_flow:
with open(z, newline='') as csvfile:
reader = list(csvfile)
header = list(filter(lambda x: '_rln' in x,reader))
header = list(filter(lambda x: '_rln' == x[0:4] or 'loop_' == x.strip(),reader))
header = [i.split()[0] for i in header]
heads = []
head = []
for i in header:
if 'loop_' == i:
head = []
heads.append(head)
else:
head.append(i)
take_this = []
for i in heads:
if '_rlnImageName' in i:
take_this = i
header = take_this
name = header.index('_rlnImageName')
if bool(verbose):
if self.verbose:
try:
class_num = header.index('_rlnClassNumber')
except:
verbose = False
print("the --log true cannot be run")
self.verbose = False
print("the --log true cannot be run as _rlnClassNumber i missing ")
for row in reader:
#current_name = '/emcc/misser11/New_Project/Assets/ParticleStacks/particle_stack_0.mrc'
if len(header)== len(row.split()):
# try:
#current_name = '/emcc/misser11/New_Project/Assets/ParticleStacks/particle_stack_1.mrc'
current_name = row.split()[name].split('@')[1]
current_id = row.split()[name].split('@')[0]
......@@ -79,7 +103,7 @@ class control_flow:
counter.append(count)
names.append(join(c,current_name))
count = 0
elif len(names) == 0:
......@@ -87,24 +111,27 @@ class control_flow:
count +=1
else:
count +=1
if bool(verbose):
if self.verbose:
labels_list.append(int(row.split()[class_num]))
#except: pass
counter.append(count)
np.save(join(join(self.workdir, 'particle_stack_dir'),'labels'),np.asarray(labels_list))
tmp_length = np.asarray(counter).sum()
np.save(join(join(self.workdir, 'particle_stack_dir'),'labels'),np.asarray(labels_list))
return tmp_length,names
depth,mrc_paths = get_star_file_parameters(star_files,star)
length,bytes_pr_record = self.get_parameters(mrc_paths)
DynAE( mrc_paths,
workdir,
length,
......@@ -116,13 +143,10 @@ class control_flow:
gpu_list,
half_precision,
deep_NN_batch_size,
deep_NN_prediction_size,
transfer_learning,
max_particles,
epochs,
verbose,
pca
)
verbose)
......
This diff is collapsed.
import argparse
from execute_sortem import control_flow
from .execute_sortem import control_flow
def main():
parser = argparse.ArgumentParser(description='Run sortinator.')
......@@ -27,10 +26,12 @@ def main():
parser.add_argument('--log', type=str,default="False",help='log all possible values to file (loss, pca_components,NMI,Recall,false positives,false negatives.')
parser.add_argument('--num_classes',type=int,default=4,help='the number of classes to perform the fine tuning.')
parser.add_argument('--num_classes',type=int,default=4,help='the number subparts in the refinement process.')
args = parser.parse_args()
num_gpus = args.num_gpus
if args.gpu_list == None:
gpu_list = ['/GPU:0']
else:
......@@ -43,25 +44,27 @@ def main():
num_cpus = args.num_cpus
star = args.star
train_light = args.tr
if train_light =='False':
train_light = False
else:
train_light = True
deep_NN_batch_size = args.ab
epochs = args.epochs
output = args.o
half_precision = args.f16
pca = args.pca
verbose = args.log
max_particles = args.mp
num_clusters = args.num_classes
use_cluster_head = args.use_cluster_head
if use_cluster_head == 'True':
use_cluster_head = True
if train_light =='False':
train_light = False
else:
train_light = True
if verbose == 'True':
verbose = True
else:
use_cluster_head = False
verbose = False
if half_precision == 'False':
half_precision = False
......@@ -74,18 +77,16 @@ def main():
num_clusters,
star,
deep_NN_batch_size,
deep_NN_prediction_size,
output,
half_precision,
max_particles,
epochs,
pca,
train_light,
verbose)
if __name__ == "__main__":
#if __name__ == "__main__":
main()
main()
\ No newline at end of file
import numpy as np
from sklearn.metrics import normalized_mutual_info_score, adjusted_rand_score
nmi = normalized_mutual_info_score
ari = adjusted_rand_score
......
......@@ -5,7 +5,6 @@ from os import listdir
from random import sample
import tensorflow as tf
import numpy as np
import tensorflow_addons as tfa
class mrc_loader:
......@@ -92,7 +91,7 @@ class mrc_loader:
alphas = tf.random.uniform([1])
images =tf.cast( tf.squeeze(images), self.precision )
images = tf.squeeze(tfa.image.rotate(tf.expand_dims(images,axis=2),alphas*2*3.1415))
x = tf.cast(tf.random.uniform([1],minval = minval, maxval = maxval,dtype=tf.float32)*self.image_size,tf.int32)
y = tf.cast(tf.random.uniform([1],minval = minval, maxval = maxval,dtype=tf.float32)*self.image_size,tf.int32)
......@@ -100,7 +99,7 @@ class mrc_loader:
croped_image = tf.expand_dims(croped_image,axis=2)
resized_image = tf.image.resize(croped_image, [rescale,rescale],method='lanczos5')
resized_image = tf.image.resize(croped_image, [rescale,rescale])
resized_image = tf.image.random_brightness(resized_image,0.1,0.6)
resized_image = tf.image.random_contrast(resized_image,0.1,0.6)
......
......@@ -3,7 +3,6 @@ from os.path import join,isdir,getsize,isfile
from os import mkdir,listdir
import tensorflow as tf
import numpy as np
from H_reader import HF_rader
class super_class:
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment