Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Jonathan Juhl
SortEM
Commits
434e9c23
Commit
434e9c23
authored
Apr 07, 2021
by
Jonathan Juhl
Browse files
added relative path and removed voltage bug
parent
eb6e00c2
Changes
4
Hide whitespace changes
Inline
Side-by-side
execute_sortem.py
View file @
434e9c23
import
numpy
as
np
import
os
from
super_clas_sortem
import
super_class
from
os.path
import
join
,
getsize
,
isdir
,
isfile
,
dirname
,
basename
from
os
import
listdir
,
rename
...
...
@@ -10,6 +11,8 @@ from os import listdir,mkdir
import
glob
from
os.path
import
isabs
from
fac_sortem
import
DynAE
class
control_flow
:
...
...
@@ -49,9 +52,11 @@ class control_flow:
heads
=
[]
head
=
[]
counter
=
0
for
i
in
header
:
if
'loop_'
==
i
:
counter
+=
1
head
=
[]
heads
.
append
(
head
)
else
:
...
...
@@ -59,16 +64,20 @@ class control_flow:
take_this
=
[]
for
i
in
heads
:
if
'_rlnVoltage'
in
i
:
take_that
=
i
if
'_rlnImageName'
in
i
:
take_this
=
i
header
=
take_this
name
=
header
.
index
(
'_rlnImageName'
)
voltage
=
header
.
index
(
'_rlnVoltage'
)
voltage
=
take_that
.
index
(
'_rlnVoltage'
)
defocusU
=
header
.
index
(
'_rlnDefocusU'
)
defocusV
=
header
.
index
(
'_rlnDefocusV'
)
defocusAngle
=
header
.
index
(
'_rlnDefocusAngle'
)
abberation
=
header
.
index
(
'_rlnSphericalAberration'
)
amp_contrast
=
header
.
index
(
'_rlnAmplitudeContrast'
)
abberation
=
take_that
.
index
(
'_rlnSphericalAberration'
)
amp_contrast
=
take_that
.
index
(
'_rlnAmplitudeContrast'
)
phase_shift
=
header
.
index
(
'_rlnPhaseShift'
)
ctf_params
=
[]
...
...
@@ -82,6 +91,13 @@ class control_flow:
print
(
"the --log true cannot be run as _rlnClassNumber i missing "
)
for
row
in
reader
:
if
len
(
take_that
)
==
len
(
row
.
split
())
and
counter
==
2
:
abberation_d
=
float
(
row
.
split
()[
abberation
])
amp_contrast_d
=
float
(
row
.
split
()[
amp_contrast
])
V
=
float
(
row
.
split
()[
voltage
])
electron_volts
=
(
1.23
*
10
**
3
)
/
np
.
sqrt
(
V
*
(
V
*
10
**
(
-
7
)
*
1.96
+
1
))
counter
=
0
if
len
(
header
)
==
len
(
row
.
split
()):
...
...
@@ -95,16 +111,18 @@ class control_flow:
names
.
append
(
join
(
c
,
current_name
))
if
self
.
verbose
:
labels_list
.
append
(
int
(
row
.
split
()[
class_num
]))
V
=
float
(
row
.
split
()[
voltage
])
electron_volts
=
(
1.23
*
10
**
3
)
/
np
.
sqrt
(
V
*
(
V
*
10
**
(
-
7
)
*
1.96
+
1
))
abberation_d
=
float
(
row
.
split
()[
abberation
])
amp_contrast_d
=
float
(
row
.
split
()[
amp_contrast
])
if
counter
==
1
:
V
=
float
(
row
.
split
()[
voltage
])
electron_volts
=
(
1.23
*
10
**
3
)
/
np
.
sqrt
(
V
*
(
V
*
10
**
(
-
7
)
*
1.96
+
1
))
abberation_d
=
float
(
row
.
split
()[
abberation
])
amp_contrast_d
=
float
(
row
.
split
()[
amp_contrast
])
counter
=
0
ctf_params
.
append
([
float
(
row
.
split
()[
phase_shift
]),
float
(
row
.
split
()[
defocusU
]),
float
(
row
.
split
()[
defocusV
]),
float
(
row
.
split
()[
defocusAngle
])])
current_id
=
row
.
split
()[
name
].
split
(
'@'
)[
0
]
np
.
save
(
join
(
self
.
particle_stack_dir
,
'depth.npy'
),
f
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'names.npy'
),
names
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'electron_volts.npy'
),
electron_volts
)
...
...
@@ -115,18 +133,20 @@ class control_flow:
return
f
,
np
.
unique
(
names
)
dir_path
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
if
not
isinstance
(
self
.
star
,
list
)
and
isdir
(
star
):
star_files
=
glob
.
glob
(
self
.
star
)
if
star_files
==
[]:
print
(
"no star files in directory. You must point to atleast one star file to run Sortinator."
)
exit
()
star_files
=
[
join
(
star
,
i
)
for
i
in
star_files
]
star_files
=
[
join
(
dir_path
,
join
(
star
,
i
))
for
i
in
star_files
]
else
:
star_files
=
self
.
star
star_files
=
[
join
(
dir_path
,
i
)
for
i
in
self
.
star
]
if
not
isfile
(
join
(
self
.
particle_stack_dir
,
'depth.npy'
)):
depth
,
mrc_paths
=
get_star_file_parameters
(
star_files
)
...
...
fac_sortem.py
View file @
434e9c23
...
...
@@ -132,7 +132,7 @@ class DynAE(super_class):
# tmp = tf.expand_dims(tf.sqrt(tf.abs(ctf_applied_image)),axis=-1)
# generated_image = tf.expand_dims(fake_model,axis=-1) #+ sigma*tmp
#print(generated_image);exit()
h5_sig_f
,
h5
,
predict_z
,
d_h1_f
,
d_h2_f
,
d_h3_f
,
d_h4_f
,
predict_angels
=
self
.
D
(
fake_
model
)
h5_sig_f
,
h5
,
predict_z
,
d_h1_f
,
d_h2_f
,
d_h3_f
,
d_h4_f
,
predict_angels
=
self
.
D
(
fake_
projections
)
h5_sig_r
,
h5
,
_
,
d_h1_r
,
d_h2_r
,
d_h3_r
,
d_h4_r
,
predict_angels_real
=
self
.
D
(
x_real
)
loss_tot
=
loss_disc
(
d_h1_r
,
d_h1_f
,
d_h2_r
,
d_h2_f
,
d_h3_r
,
d_h3_f
,
d_h4_r
,
d_h4_f
,
h5_sig_r
,
h5_sig_f
,
predict_z
,
z
,
predict_angels
,
angels
)
...
...
models.py
View file @
434e9c23
...
...
@@ -15,15 +15,13 @@ def AdaIn_3D(inputs,s1,b1):
d
=
tf
.
shape
(
inputs
)[
3
]
c
=
tf
.
shape
(
inputs
)[
4
]
tmp
=
Flatten
()(
inputs
)
inputs
=
Flatten
()(
inputs
)
mean
=
tf
.
reduce_mean
(
tmp
,
axis
=
1
)
std
=
tf
.
math
.
reduce_std
(
tmp
,
axis
=
1
)
out
=
s1
*
((
inputs
-
mean
)
/
std
)
+
b1
mean
=
tf
.
reduce_mean
(
inputs
,
axis
=
1
,
keepdims
=
True
)
std
=
tf
.
math
.
reduce_std
(
inputs
,
axis
=
1
,
keepdims
=
True
)
inputs
=
tf
.
reshape
(((
inputs
-
mean
)
/
std
),[
b
,
w
,
h
,
d
,
c
])
out
=
tf
.
transpose
(
s1
*
tf
.
transpose
(
inputs
,
perm
=
[
1
,
2
,
3
,
0
,
4
])
+
b1
,
perm
=
[
3
,
0
,
1
,
2
,
4
])
out
=
tf
.
reshape
(
out
,[
b
,
w
,
h
,
d
,
c
])
return
out
def
AdaIn_2D
(
inputs
,
s1
,
b1
):
...
...
@@ -33,12 +31,13 @@ def AdaIn_2D(inputs,s1,b1):
c
=
tf
.
shape
(
inputs
)[
3
]
inputs
=
Flatten
()(
inputs
)
mean
=
tf
.
reduce_mean
(
inputs
,
axis
=
1
)
std
=
tf
.
math
.
reduce_
std
(
inputs
,
axis
=
1
)
o
ut
=
s1
*
((
inputs
-
mean
)
/
std
)
+
b1
out
=
tf
.
reshape
(
out
,[
b
,
w
,
h
,
c
])
mean
=
tf
.
reduce_
mean
(
inputs
,
axis
=
1
,
keepdims
=
True
)
std
=
tf
.
math
.
reduce_std
(
inputs
,
axis
=
1
,
keepdims
=
True
)
inp
ut
s
=
tf
.
reshape
(
((
inputs
-
mean
)
/
std
)
,[
b
,
w
,
h
,
c
])
out
=
tf
.
transpose
(
s1
*
tf
.
transpose
(
inputs
,
perm
=
[
1
,
2
,
0
,
3
])
+
b1
,
perm
=
[
2
,
0
,
1
,
3
])
return
out
class
Double_Dense
(
Model
):
def
__init__
(
self
,
channels
):
...
...
@@ -140,10 +139,10 @@ class Generator_AdaIN_Noise(Model):
class
Generator_AdaIN_res128
(
Model
):
def
__init__
(
self
,
gf_dim
=
32
):
self
.
gf_dim
=
gf_dim
super
(
Generator_AdaIN_res128
,
self
).
__init__
()
self
.
trans_3d
=
transform_3D
()
self
.
gf_dim
=
gf_dim
self
.
h0
=
Conv3DTranspose
(
self
.
gf_dim
*
8
,
3
,
strides
=
2
)
self
.
zmap_0
=
Double_Dense
(
self
.
gf_dim
*
8
)
...
...
@@ -202,29 +201,30 @@ class Generator_AdaIN_res128(Model):
def
call
(
self
,
x
,
psi
,
phi
,
rho
):
a
,
b
=
self
.
zmap_0
(
x
)
x
=
self
.
h0
(
x
)
def
call
(
self
,
z
,
psi
,
phi
,
rho
):
a
,
b
=
self
.
zmap_0
(
z
)
x
=
self
.
h0
(
z
)
x
=
self
.
h0_aI
(
x
,
a
,
b
)
x
=
self
.
h0_a
(
x
)
x
=
self
.
h1
(
x
)
a
,
b
=
self
.
z_map_1
(
x
)
print
(
x
,
a
,
b
),
exit
()
a
,
b
=
self
.
z_map_1
(
z
)
x
=
self
.
h1_aI
(
x
,
a
,
b
)
a
,
b
=
self
.
z_map_2
(
x
)
a
,
b
=
self
.
z_map_2
(
z
)
x
=
self
.
h2_aI
(
x
,
a
,
b
)
x
=
self
.
h2
(
x
)
x
=
transform_3D
(
x
,
psi
,
phi
,
rho
)
x
=
self
.
trans_3d
(
x
,
psi
,
phi
,
rho
)
x
=
self
.
h2_proj1
(
x
)
x
=
self
.
h2_proj1_a
(
x
)
x
=
self
.
h2_proj2
(
x
)
x
=
self
.
h2_proj2_a
(
x
)
b
,
w
,
h
,
d
,
c
=
tf
.
shape
(
x
)
x
=
tf
.
reshape
(
x
,
[
b
,
w
,
h
,
d
*
c
])
s
=
tf
.
shape
(
x
)
x
=
tf
.
reshape
(
x
,
[
s
[
0
],
s
[
1
],
s
[
2
],
s
[
3
]
*
s
[
4
]
])
x
=
self
.
h4
(
x
)
x
=
self
.
h4_a
(
x
)
...
...
@@ -244,6 +244,7 @@ class Generator_AdaIN_res128(Model):
x
=
self
.
h8_a
(
x
)
x
=
self
.
h9
(
x
)
return
x
class
Discriminator_AdaIN_res128
(
Model
):
...
...
utils_sortem.py
View file @
434e9c23
...
...
@@ -3,7 +3,6 @@ import tensorflow as tf
import
numpy
as
np
from
os.path
import
join
import
tensorflow_addons
as
tfa
import
mrcfile
import
matplotlib.pyplot
as
plt
import
math
from
tensorflow.keras.layers
import
Dense
,
LeakyReLU
...
...
@@ -58,23 +57,11 @@ class transform_3D(tf.keras.layers.Layer):
beta
=
2
*
3.1415
*
tf
.
cast
(
beta
,
tf
.
float32
)
gamma
=
2
*
3.1415
*
tf
.
cast
(
gamma
,
tf
.
float32
)
voxels
=
tf
.
transpose
(
voxels
,
perm
=
[
1
,
2
,
3
,
0
,
4
])
dimensions
=
tf
.
shape
(
voxels
)[
0
]
x
=
tf
.
cast
(
tf
.
range
(
-
tf
.
cast
(
tf
.
floor
(
tf
.
cast
(
dimensions
,
tf
.
float32
)
/
2.0
),
tf
.
int32
),
tf
.
cast
(
tf
.
cast
(
dimensions
,
tf
.
float32
)
-
tf
.
floor
(
tf
.
cast
(
dimensions
,
tf
.
float32
)
/
2.0
),
tf
.
int32
)),
tf
.
float32
)
X
,
Y
,
Z
=
tf
.
meshgrid
(
x
,
x
,
x
)
tmp
=
tf
.
reshape
(
voxels
,[
-
1
])
X
=
tf
.
reshape
(
X
,[
-
1
]);
Y
=
tf
.
reshape
(
Y
,[
-
1
]);
Z
=
tf
.
reshape
(
Z
,[
-
1
])
X_mass
=
tf
.
round
(
tf
.
reduce_mean
(
X
*
tmp
))
Y_mass
=
tf
.
round
(
tf
.
reduce_mean
(
Y
*
tmp
))
Z_mass
=
tf
.
round
(
tf
.
reduce_mean
(
Z
*
tmp
))
coordinates
=
tf
.
stack
([
X
,
Y
,
Z
],
axis
=
0
)
kernel
=
tf
.
cast
(
coordinates
,
tf
.
float32
)
channels
=
tf
.
shape
(
voxels
)[
-
1
]
batchdim
=
tf
.
shape
(
voxels
)[
-
2
]
rotation_matrix_x
=
tf
.
stack
([
tf
.
constant
(
1.0
),
tf
.
constant
(
0.0
),
tf
.
constant
(
0.0
),
tf
.
constant
(
0.0
),
tf
.
cos
(
alpha
),
-
tf
.
sin
(
alpha
),
...
...
@@ -93,26 +80,26 @@ class transform_3D(tf.keras.layers.Layer):
rotation_matrix_y
=
tf
.
reshape
(
rotation_matrix_y
,
(
3
,
3
))
rotation_matrix_z
=
tf
.
reshape
(
rotation_matrix_z
,
(
3
,
3
))
s
=
tf
.
matmul
(
rotation_matrix_x
,
tf
.
matmul
(
rotation_matrix_y
,
rotation_matrix_z
))
r
=
tf
.
matmul
(
tf
.
matmul
(
rotation_matrix_x
,
tf
.
matmul
(
rotation_matrix_y
,
rotation_matrix_z
))
,
kernel
)
r
=
tf
.
matmul
(
tf
.
matmul
(
rotation_matrix_x
,
tf
.
matmul
(
rotation_matrix_y
,
rotation_matrix_z
))
,
self
.
kernel
)
x
,
y
,
z
=
tf
.
split
(
r
,
3
,
axis
=
0
)
X
=
tf
.
reshape
(
x
,[
-
1
])
Y
=
tf
.
reshape
(
y
,[
-
1
])
Z
=
tf
.
reshape
(
z
,[
-
1
])
X_lower
=
tf
.
math
.
floor
(
X
)
-
X_mass
# tf.clip_by_value(tf.math.floor(X),0,self.dimensions)
X_upper
=
tf
.
math
.
ceil
(
X
)
-
X_mass
# tf.clip_by_value(tf.math.ceil(X),0,self.dimensions)
X_lower
=
tf
.
math
.
floor
(
X
)
# tf.clip_by_value(tf.math.floor(X),0,self.dimensions)
X_upper
=
tf
.
math
.
ceil
(
X
)
# tf.clip_by_value(tf.math.ceil(X),0,self.dimensions)
Y_lower
=
tf
.
math
.
floor
(
Y
)
-
Y_mass
# tf.clip_by_value(tf.math.floor(Y),0,self.dimensions)
Y_upper
=
tf
.
math
.
ceil
(
Y
)
-
Y_mass
# tf.clip_by_value(tf.math.ceil(Y),0,self.dimensions)
Y_lower
=
tf
.
math
.
floor
(
Y
)
# tf.clip_by_value(tf.math.floor(Y),0,self.dimensions)
Y_upper
=
tf
.
math
.
ceil
(
Y
)
# tf.clip_by_value(tf.math.ceil(Y),0,self.dimensions)
Z_lower
=
tf
.
math
.
floor
(
Z
)
-
Z_mass
# tf.clip_by_value(tf.math.floor(Z),0,self.dimensions)
Z_upper
=
tf
.
math
.
ceil
(
Z
)
-
Z_mass
#tf.clip_by_value(tf.math.ceil(Z),0,self.dimensions)
Z_lower
=
tf
.
math
.
floor
(
Z
)
# tf.clip_by_value(tf.math.floor(Z),0,self.dimensions)
Z_upper
=
tf
.
math
.
ceil
(
Z
)
#tf.clip_by_value(tf.math.ceil(Z),0,self.dimensions)
x_d
=
(
X
-
X_lower
+
0.001
)
/
(
X_upper
-
X_lower
+
0.001
)
y_d
=
(
Y
-
Y_lower
+
0.001
)
/
(
Y_upper
-
Y_lower
+
0.001
)
z_d
=
(
Z
-
Z_lower
+
0.001
)
/
(
Z_upper
-
Z_lower
+
0.001
)
coord_000
=
tf
.
stack
([
X_lower
,
Y_lower
,
Z_lower
],
axis
=
1
)
+
tf
.
cast
(
tf
.
floor
(
dimensions
/
2
),
tf
.
float32
)
coord_001
=
tf
.
stack
([
X_lower
,
Y_lower
,
Z_upper
],
axis
=
1
)
+
tf
.
cast
(
tf
.
floor
(
dimensions
/
2
),
tf
.
float32
)
coord_011
=
tf
.
stack
([
X_lower
,
Y_upper
,
Z_upper
],
axis
=
1
)
+
tf
.
cast
(
tf
.
floor
(
dimensions
/
2
),
tf
.
float32
)
...
...
@@ -129,7 +116,11 @@ class transform_3D(tf.keras.layers.Layer):
c100
=
tf
.
gather_nd
(
voxels
,
tf
.
cast
(
coord_100
,
tf
.
int32
))
c010
=
tf
.
gather_nd
(
voxels
,
tf
.
cast
(
coord_010
,
tf
.
int32
))
c110
=
tf
.
gather_nd
(
voxels
,
tf
.
cast
(
coord_110
,
tf
.
int32
))
x_d
=
tf
.
expand_dims
(
tf
.
expand_dims
(
x_d
,
axis
=
1
),
axis
=
1
)
y_d
=
tf
.
expand_dims
(
tf
.
expand_dims
(
y_d
,
axis
=
1
),
axis
=
1
)
z_d
=
tf
.
expand_dims
(
tf
.
expand_dims
(
z_d
,
axis
=
1
),
axis
=
1
)
c00
=
c000
*
(
1
-
x_d
)
+
c100
*
x_d
c01
=
c001
*
(
1
-
x_d
)
+
c101
*
x_d
c10
=
c010
*
(
1
-
x_d
)
+
c110
*
x_d
...
...
@@ -137,13 +128,18 @@ class transform_3D(tf.keras.layers.Layer):
c0
=
c00
*
(
1
-
y_d
)
+
c10
*
y_d
c1
=
c01
*
(
1
-
y_d
)
+
c11
*
y_d
c
=
c0
*
(
1
-
z_d
)
+
c1
*
z_d
return
tf
.
reduce_sum
(
tf
.
reshape
(
c
,[
dimensions
,
dimensions
,
dimensions
]),
axis
=
0
)
c
=
tf
.
transpose
(
c
,
perm
=
[
1
,
0
,
2
])
out
=
tf
.
reshape
(
c
,[
batchdim
,
dimensions
,
dimensions
,
dimensions
,
channels
])
return
out
def
call
(
self
,
voxels
,
alpha
,
beta
,
gamma
):
image
=
tf
.
expand_dims
(
tf
.
map_fn
(
lambda
x
:
self
.
rotation_map
(
tf
.
squeeze
(
voxels
),
x
[
0
],
x
[
1
],
x
[
2
]),[
alpha
,
beta
,
gamma
],
dtype
=
tf
.
float32
),
axis
=-
1
)
s
=
tf
.
shape
(
image
)
image
=
tf
.
reshape
(
image
,[
s
[
0
]
*
s
[
1
],
s
[
2
],
s
[
3
],
s
[
4
],
s
[
5
]])
return
image
...
...
@@ -312,11 +308,13 @@ def apply_ctf(image,ctf_params,gamma,spherical_abberation,w_2):
return
tf
.
cast
(
image
,
tf
.
float32
)
def
z_maker
(
model
,
num_linear_params
):
k
=
np
.
random
.
random
(
num_linear_params
)
#k = k/np.sum(k)
z
=
np
.
dot
(
model
,
k
)
s
=
np
.
cbrt
(
z
.
shape
[
0
]).
astype
(
np
.
int32
)
z
=
np
.
reshape
(
z
,[
1
,
s
,
s
,
s
,
1
])
z
=
np
.
concatenate
([
z
]
*
10
,
axis
=
0
)
return
z
,
k
...
...
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment