Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Jonathan Juhl
SortEM
Commits
90bb6c10
Commit
90bb6c10
authored
May 20, 2021
by
Jonathan Juhl
Browse files
sortem.py
parent
6f250433
Changes
6
Expand all
Hide whitespace changes
Inline
Side-by-side
execute_sortem.py
View file @
90bb6c10
...
...
@@ -64,21 +64,22 @@ class control_flow:
take_this
=
[]
for
i
in
heads
:
if
'_rlnVoltage'
in
i
:
take_that
=
i
if
self
.
ctf
:
if
'_rlnVoltage'
in
i
:
take_that
=
i
if
'_rlnImageName'
in
i
:
take_this
=
i
header
=
take_this
name
=
header
.
index
(
'_rlnImageName'
)
voltage
=
take_that
.
index
(
'_rlnVoltage'
)
defocusU
=
header
.
index
(
'_rlnDefocusU'
)
defocusV
=
header
.
index
(
'_rlnDefocusV'
)
defocusAngle
=
header
.
index
(
'_rlnDefocusAngle'
)
abberation
=
take_that
.
index
(
'_rlnSphericalAberration'
)
amp_contrast
=
take_that
.
index
(
'_rlnAmplitudeContrast'
)
phase_shift
=
header
.
index
(
'_rlnPhaseShift'
)
if
self
.
ctf
:
voltage
=
take_that
.
index
(
'_rlnVoltage'
)
defocusU
=
header
.
index
(
'_rlnDefocusU'
)
defocusV
=
header
.
index
(
'_rlnDefocusV'
)
defocusAngle
=
header
.
index
(
'_rlnDefocusAngle'
)
abberation
=
take_that
.
index
(
'_rlnSphericalAberration'
)
amp_contrast
=
take_that
.
index
(
'_rlnAmplitudeContrast'
)
phase_shift
=
header
.
index
(
'_rlnPhaseShift'
)
ctf_params
=
[]
if
self
.
verbose
:
...
...
@@ -91,13 +92,14 @@ class control_flow:
print
(
"the --log true cannot be run as _rlnClassNumber i missing "
)
for
row
in
reader
:
if
len
(
take_that
)
==
len
(
row
.
split
())
and
counter
==
2
:
abberation_d
=
float
(
row
.
split
()[
abberation
])
amp_contrast_d
=
float
(
row
.
split
()[
amp_contrast
])
V
=
float
(
row
.
split
()[
voltage
])
electron_volts
=
(
1.23
*
10
**
3
)
/
np
.
sqrt
(
V
*
(
V
*
10
**
(
-
7
)
*
1.96
+
1
))
counter
=
0
if
self
.
ctf
:
if
len
(
take_that
)
==
len
(
row
.
split
())
and
counter
==
2
:
abberation_d
=
float
(
row
.
split
()[
abberation
])
amp_contrast_d
=
float
(
row
.
split
()[
amp_contrast
])
V
=
float
(
row
.
split
()[
voltage
])
electron_volts
=
(
1.23
*
10
**
3
)
/
np
.
sqrt
(
V
*
(
V
*
10
**
(
-
7
)
*
1.96
+
1
))
counter
=
0
if
len
(
header
)
==
len
(
row
.
split
()):
...
...
@@ -113,23 +115,27 @@ class control_flow:
labels_list
.
append
(
int
(
row
.
split
()[
class_num
]))
if
counter
==
1
:
V
=
float
(
row
.
split
()[
voltage
])
if
self
.
verbose
:
V
=
float
(
row
.
split
()[
voltage
])
abberation_d
=
float
(
row
.
split
()[
abberation
])
amp_contrast_d
=
float
(
row
.
split
()[
amp_contrast
])
abberation_d
=
float
(
row
.
split
()[
abberation
])
amp_contrast_d
=
float
(
row
.
split
()[
amp_contrast
])
counter
=
0
ctf_params
.
append
([
float
(
row
.
split
()[
phase_shift
]),
float
(
row
.
split
()[
defocusU
]),
float
(
row
.
split
()[
defocusV
]),
float
(
row
.
split
()[
defocusAngle
])])
if
self
.
ctf
:
ctf_params
.
append
([
float
(
row
.
split
()[
phase_shift
]),
float
(
row
.
split
()[
defocusU
]),
float
(
row
.
split
()[
defocusV
]),
float
(
row
.
split
()[
defocusAngle
])])
current_id
=
row
.
split
()[
name
].
split
(
'@'
)[
0
]
np
.
save
(
join
(
self
.
particle_stack_dir
,
'depth.npy'
),
f
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'names.npy'
),
names
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'electron_volts.npy'
),
V
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'spherical_abberation.npy'
),
abberation_d
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'amplitude_contrast.npy'
),
amp_contrast_d
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'ctf_params.npy'
),
np
.
asarray
(
ctf_params
))
np
.
save
(
join
(
self
.
particle_stack_dir
,
'labels.npy'
),
np
.
asarray
(
labels_list
))
if
self
.
ctf
:
np
.
save
(
join
(
self
.
particle_stack_dir
,
'electron_volts.npy'
),
V
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'spherical_abberation.npy'
),
abberation_d
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'amplitude_contrast.npy'
),
amp_contrast_d
)
np
.
save
(
join
(
self
.
particle_stack_dir
,
'ctf_params.npy'
),
np
.
asarray
(
ctf_params
))
if
self
.
verbose
:
np
.
save
(
join
(
self
.
particle_stack_dir
,
'labels.npy'
),
np
.
asarray
(
labels_list
))
return
f
,
np
.
unique
(
names
)
...
...
@@ -163,7 +169,8 @@ class control_flow:
DynAE
(
parameter_file_path
,
mrc_paths
)
final_labels
=
np
.
load
(
join
(
self
.
refined
,
'final_labels.npy'
))
final_labels
=
np
.
load
(
join
(
self
.
refined
,
'labels.npy'
))
self
.
write_star_file
(
star_files
,
final_labels
)
def
add_params
(
self
,
parameter_file_path
,
current_image
,
binary
,
num_particles
,
width
):
...
...
fac_sortem.py
View file @
90bb6c10
This diff is collapsed.
Click to expand it.
main_sortem.py
View file @
90bb6c10
...
...
@@ -44,6 +44,16 @@ def main():
parser
.
add_argument
(
'--ctf'
,
type
=
str
,
default
=
'True'
,
help
=
'Use CTF parameters for model.'
)
parser
.
add_argument
(
'--noise'
,
type
=
str
,
default
=
'True'
,
help
=
'Use the noise generator for model .'
)
parser
.
add_argument
(
'--median_noise'
,
type
=
int
,
default
=
10000
,
help
=
'random vectors for fitting.'
)
parser
.
add_argument
(
'--interpolation_count'
,
type
=
int
,
default
=
10
,
help
=
'interpolating images.'
)
parser
.
add_argument
(
'--angular_samples'
,
type
=
int
,
default
=
10000
,
help
=
'samples to make 2d histogram.'
)
parser
.
add_argument
(
'--feature_samples'
,
type
=
int
,
default
=
2000
,
help
=
'samples to make 2d for our z-features.'
)
parser
.
add_argument
(
'--angular_clusters'
,
type
=
int
,
default
=
8
,
help
=
'number of gaussians to draw from n^2 .'
)
args
=
parser
.
parse_args
()
...
...
@@ -74,7 +84,7 @@ def main():
width
=
0
s1
=
' '
.
join
(
gpu_list
)
l
=
[
args
.
ab
,
args
.
pb
,
args
.
num_parts
,
args
.
num_cpus
,
args
.
num_gpus
,
args
.
vi
,
' '
.
join
(
gpu_list
),
args
.
f16
,
args
.
verbose
,
args
.
epochs
,
args
.
mp
,
' '
.
join
(
args
.
star
),
args
.
lr
,
args
.
angels
,
args
.
ctf
,
args
.
noise
,
'current_image'
,
binary
,
num_particles
,
width
]
l
=
[
args
.
ab
,
args
.
pb
,
args
.
num_parts
,
args
.
num_cpus
,
args
.
num_gpus
,
args
.
vi
,
' '
.
join
(
gpu_list
),
args
.
f16
,
args
.
verbose
,
args
.
epochs
,
args
.
mp
,
' '
.
join
(
args
.
star
),
args
.
lr
,
args
.
angels
,
args
.
ctf
,
args
.
noise
,
args
.
median_noise
,
args
.
interpolation_count
,
args
.
angular_samples
,
args
.
angular_clusters
,
args
.
feature_samples
,
'current_image'
,
binary
,
num_particles
,
width
]
if
not
isdir
(
args
.
o
):
mkdir
(
args
.
o
)
...
...
models_sortem.py
0 → 100644
View file @
90bb6c10
from
super_clas_sortem
import
super_class
from
tensorflow.keras
import
Model
import
tensorflow
as
tf
from
tensorflow.keras.layers
import
Flatten
,
LeakyReLU
,
Conv2DTranspose
,
Dense
,
Conv2D
,
UpSampling2D
,
Conv3D
,
Conv3DTranspose
,
LeakyReLU
,
Dense
,
UpSampling3D
,
ReLU
from
utils_sortem
import
Spectral_norm
,
Instance_norm
,
transform_3D
,
Watersheed_Layer
class
Fit_Layer
(
Model
):
def
__init__
(
self
,
mean_matrix
,
variance_matrix
,
num_parts
):
super
(
Fit_Layer
,
self
).
__init__
()
self
.
watersheed_layer
=
Watersheed_Layer
(
mean_matrix
,
variance_matrix
,
num_parts
)
def
call
(
self
,
noise
):
diff
=
self
.
watersheed_layer
(
noise
)
return
diff
class
Cluster_Layer
(
Model
):
def
__init__
(
self
,
latent_dim
,
initializer_mean
,
initializer_variance
):
super
(
Cluster_Layer
,
self
).
__init__
()
self
.
dense_mean
=
Dense
(
latent_dim
,
use_bias
=
False
)
self
.
dense_var
=
Dense
(
1
,
use_bias
=
False
)
self
.
dense_mean_angel
=
Dense
(
2
,
kernel_initializer
=
tf
.
keras
.
initializers
.
Constant
(
initializer_mean
),
use_bias
=
False
)
self
.
dense_var_angel
=
Dense
(
1
,
kernel_initializer
=
tf
.
keras
.
initializers
.
Constant
(
initializer_variance
),
use_bias
=
False
)
def
call
(
self
,
catagorial_variable
,
catagorial_angel
):
catagorial_variable
=
Flatten
()(
catagorial_variable
)
mean
=
self
.
dense_mean
(
catagorial_variable
)
s
=
tf
.
shape
(
mean
)
batch
=
s
[
0
]
length
=
s
[
1
]
epsilon
=
tf
.
random
.
normal
(
shape
=
[
batch
,
length
])
logvar
=
self
.
dense_var
(
catagorial_variable
)
out
=
epsilon
*
logvar
+
mean
mean_angel
=
self
.
dense_mean_angel
(
catagorial_angel
)
logvar_angel
=
self
.
dense_var_angel
(
catagorial_angel
)
s
=
tf
.
shape
(
catagorial_angel
)
epsilon_angel
=
tf
.
random
.
normal
(
shape
=
[
s
[
0
],
2
])
out_angel
=
epsilon_angel
*
logvar_angel
+
mean_angel
return
tf
.
transpose
(
out
),
out_angel
def
AdaIn_3D
(
inputs
,
s1
,
b1
):
b
=
tf
.
shape
(
inputs
)[
0
]
w
=
tf
.
shape
(
inputs
)[
1
]
h
=
tf
.
shape
(
inputs
)[
2
]
d
=
tf
.
shape
(
inputs
)[
3
]
c
=
tf
.
shape
(
inputs
)[
4
]
inputs
=
Flatten
()(
inputs
)
mean
=
tf
.
reduce_mean
(
inputs
,
axis
=
1
,
keepdims
=
True
)
std
=
tf
.
math
.
reduce_std
(
inputs
,
axis
=
1
,
keepdims
=
True
)
inputs
=
tf
.
reshape
(((
inputs
-
mean
)
/
std
),[
b
,
w
,
h
,
d
,
c
])
out
=
tf
.
transpose
(
s1
*
tf
.
transpose
(
inputs
,
perm
=
[
1
,
2
,
3
,
0
,
4
])
+
b1
,
perm
=
[
3
,
0
,
1
,
2
,
4
])
return
out
def
AdaIn_2D
(
inputs
,
s1
,
b1
):
b
=
tf
.
shape
(
inputs
)[
0
]
w
=
tf
.
shape
(
inputs
)[
1
]
h
=
tf
.
shape
(
inputs
)[
2
]
c
=
tf
.
shape
(
inputs
)[
3
]
inputs
=
Flatten
()(
inputs
)
mean
=
tf
.
reduce_mean
(
inputs
,
axis
=
1
,
keepdims
=
True
)
std
=
tf
.
math
.
reduce_std
(
inputs
,
axis
=
1
,
keepdims
=
True
)
inputs
=
tf
.
reshape
(((
inputs
-
mean
)
/
std
),[
b
,
w
,
h
,
c
])
out
=
tf
.
transpose
(
s1
*
tf
.
transpose
(
inputs
,
perm
=
[
1
,
2
,
0
,
3
])
+
b1
,
perm
=
[
2
,
0
,
1
,
3
])
return
out
class
Double_Dense
(
Model
):
def
__init__
(
self
,
channels
):
super
(
Double_Dense
,
self
).
__init__
()
self
.
dense_1
=
Dense
(
channels
)
self
.
dense_2
=
Dense
(
channels
)
def
call
(
self
,
x
):
x
=
Flatten
()(
x
)
s
=
self
.
dense_1
(
x
)
c
=
self
.
dense_2
(
x
)
return
s
,
c
class
Generator_AdaIN_Noise
(
Model
):
def
__init__
(
self
,
gf_dim
=
64
):
super
(
Generator_AdaIN_Noise
,
self
).
__init__
()
self
.
gf_dim
=
gf_dim
self
.
zmap_0
=
Double_Dense
(
self
.
gf_dim
*
8
)
self
.
h0_aI
=
AdaIn_2D
self
.
h0_a
=
LeakyReLU
()
self
.
h1
=
Conv2DTranspose
(
self
.
gf_dim
*
4
,
3
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_1
=
Double_Dense
(
self
.
gf_dim
*
4
)
self
.
h1_aI
=
AdaIn_2D
self
.
h1_a
=
LeakyReLU
()
self
.
h2
=
Conv2DTranspose
(
self
.
gf_dim
*
2
,
3
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_2
=
Double_Dense
(
self
.
gf_dim
*
2
)
self
.
h2_aI
=
AdaIn_2D
self
.
h2_a
=
LeakyReLU
()
#=============================================================================================================
# h2_rotated = transform_3D(h2, view_in, 16, 16)
self
.
h2_proj1
=
Conv2DTranspose
(
self
.
gf_dim
,
3
,
padding
=
'SAME'
)
self
.
h2_proj1_a
=
LeakyReLU
()
self
.
h2_proj2
=
Conv2DTranspose
(
self
.
gf_dim
,
3
,
padding
=
'SAME'
)
self
.
h2_proj2_a
=
LeakyReLU
()
# =============================================================================================================
# Collapsing depth dimension
#h2_2d = tf.reshape(h2_proj2, [batch_size, s_h4, s_w4, s_d4 * self.gf_dim])
# 1X1 convolution
# =============================================================================================================
self
.
h3
=
Conv2DTranspose
(
self
.
gf_dim
*
8
,
1
)
self
.
h3_a
=
LeakyReLU
()
self
.
h4
=
Conv2DTranspose
(
self
.
gf_dim
*
4
,
4
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_4
=
Double_Dense
(
self
.
gf_dim
*
4
)
self
.
h4_aI
=
AdaIn_2D
self
.
h4_a
=
LeakyReLU
()
self
.
h5
=
Conv2DTranspose
(
self
.
gf_dim
,
4
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_5
=
Double_Dense
(
self
.
gf_dim
)
self
.
h5_aI
=
AdaIn_2D
self
.
h5_a
=
LeakyReLU
()
self
.
h6
=
Conv2DTranspose
(
self
.
gf_dim
//
2
,
4
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_6
=
Double_Dense
(
self
.
gf_dim
//
2
)
self
.
h6_aI
=
AdaIn_2D
self
.
h6_a
=
LeakyReLU
()
self
.
h7
=
Conv2DTranspose
(
1
,
4
,
activation
=
'tanh'
,
padding
=
'SAME'
)
def
call
(
self
,
z
):
z
=
tf
.
reshape
(
z
,[
-
1
,
4
,
4
,
1
])
a
,
b
=
self
.
zmap_0
(
z
)
# h0
x
=
self
.
h0_aI
(
z
,
a
,
b
)
# h0
x
=
self
.
h0_a
(
x
)
# h0
x
=
self
.
h1
(
x
)
a
,
b
=
self
.
z_map_1
(
z
)
x
=
self
.
h1_aI
(
x
,
a
,
b
)
x
=
self
.
h1_a
(
x
)
x
=
self
.
h2
(
x
)
a
,
b
=
self
.
z_map_2
(
z
)
x
=
self
.
h2_aI
(
x
,
a
,
b
)
x
=
self
.
h2_a
(
x
)
x
=
self
.
h2_proj1
(
x
)
x
=
self
.
h2_proj1_a
(
x
)
x
=
self
.
h2_proj2
(
x
)
x
=
self
.
h2_proj2_a
(
x
)
x
=
self
.
h3
(
x
)
x
=
self
.
h3_a
(
x
)
x
=
self
.
h4
(
x
)
a
,
b
=
self
.
z_map_4
(
z
)
x
=
self
.
h4_aI
(
x
,
a
,
b
)
x
=
self
.
h4_a
(
x
)
x
=
self
.
h5
(
x
)
a
,
b
=
self
.
z_map_5
(
z
)
x
=
self
.
h5_aI
(
x
,
a
,
b
)
x
=
self
.
h5_a
(
x
)
x
=
self
.
h6
(
x
)
a
,
b
=
self
.
z_map_6
(
z
)
x
=
self
.
h6_aI
(
x
,
a
,
b
)
x
=
self
.
h6_a
(
x
)
x
=
self
.
h7
(
x
)
return
x
class
Generator_AdaIN_res128
(
Model
):
def
__init__
(
self
,
image_width
,
gf_dim
=
64
):
super
(
Generator_AdaIN_res128
,
self
).
__init__
()
self
.
gf_dim
=
gf_dim
self
.
zmap_0
=
Double_Dense
(
self
.
gf_dim
*
8
)
self
.
h0_aI
=
AdaIn_3D
self
.
h0_a
=
LeakyReLU
()
self
.
h1
=
Conv3DTranspose
(
self
.
gf_dim
*
4
,
3
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_1
=
Double_Dense
(
self
.
gf_dim
*
4
)
self
.
h1_aI
=
AdaIn_3D
self
.
h1_a
=
LeakyReLU
()
self
.
h2
=
Conv3DTranspose
(
self
.
gf_dim
*
2
,
3
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_2
=
Double_Dense
(
self
.
gf_dim
*
2
)
self
.
h2_aI
=
AdaIn_3D
self
.
h2_a
=
LeakyReLU
()
#=============================================================================================================
# h2_rotated = transform_3D(h2, view_in, 16, 16)
self
.
trans_3d
=
transform_3D
(
image_width
)
self
.
h2_proj1
=
Conv3DTranspose
(
self
.
gf_dim
,
3
,
padding
=
'SAME'
)
self
.
h2_proj1_a
=
LeakyReLU
()
self
.
h2_proj2
=
Conv3DTranspose
(
self
.
gf_dim
,
3
,
padding
=
'SAME'
)
self
.
h2_proj2_a
=
LeakyReLU
()
# =============================================================================================================
# Collapsing depth dimension
#h2_2d = tf.reshape(h2_proj2, [batch_size, s_h4, s_w4, s_d4 * self.gf_dim])
# 1X1 convolution
# =============================================================================================================
self
.
h3
=
Conv2DTranspose
(
self
.
gf_dim
*
8
,
1
)
self
.
h3_a
=
LeakyReLU
()
self
.
h4
=
Conv2DTranspose
(
self
.
gf_dim
*
4
,
4
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_4
=
Double_Dense
(
self
.
gf_dim
*
4
)
self
.
h4_aI
=
AdaIn_2D
self
.
h4_a
=
LeakyReLU
()
self
.
h5
=
Conv2DTranspose
(
self
.
gf_dim
,
4
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_5
=
Double_Dense
(
self
.
gf_dim
)
self
.
h5_aI
=
AdaIn_2D
self
.
h5_a
=
LeakyReLU
()
self
.
h6
=
Conv2DTranspose
(
self
.
gf_dim
//
2
,
4
,
strides
=
2
,
padding
=
'SAME'
)
self
.
z_map_6
=
Double_Dense
(
self
.
gf_dim
//
2
)
self
.
h6_aI
=
AdaIn_2D
self
.
h6_a
=
LeakyReLU
()
self
.
h7
=
Conv2DTranspose
(
1
,
4
,
activation
=
'tanh'
,
padding
=
'SAME'
)
def
call
(
self
,
z_vector
,
angels
,
phi
,
t_x_pr_count
,
t_y_pr_count
,
t_z_pr_count
):
psi
,
rho
=
tf
.
split
(
angels
,
2
,
axis
=
1
)
z
=
tf
.
reshape
(
z_vector
,[
-
1
,
4
,
4
,
4
,
1
])
a
,
b
=
self
.
zmap_0
(
z
)
# h0
x
=
self
.
h0_aI
(
z
,
a
,
b
)
# h0
x
=
self
.
h0_a
(
x
)
# h0
x
=
self
.
h1
(
x
)
a
,
b
=
self
.
z_map_1
(
z
)
x
=
self
.
h1_aI
(
x
,
a
,
b
)
x
=
self
.
h1_a
(
x
)
x
=
self
.
h2
(
x
)
a
,
b
=
self
.
z_map_2
(
z
)
x
=
self
.
h2_aI
(
x
,
a
,
b
)
x
=
self
.
h2_a
(
x
)
x
=
self
.
trans_3d
(
x
,
tf
.
squeeze
(
psi
),
phi
,
tf
.
squeeze
(
rho
),
t_x_pr_count
,
t_y_pr_count
,
t_z_pr_count
)
x
=
self
.
h2_proj1
(
x
)
x
=
self
.
h2_proj1_a
(
x
)
x
=
self
.
h2_proj2
(
x
)
x
=
self
.
h2_proj2_a
(
x
)
s
=
tf
.
shape
(
x
)
x
=
tf
.
reshape
(
x
,
[
s
[
0
],
s
[
1
],
s
[
2
],
s
[
3
]
*
s
[
4
]])
x
=
self
.
h3
(
x
)
x
=
self
.
h3_a
(
x
)
x
=
self
.
h4
(
x
)
a
,
b
=
self
.
z_map_4
(
z
)
x
=
self
.
h4_aI
(
x
,
a
,
b
)
x
=
self
.
h4_a
(
x
)
x
=
self
.
h5
(
x
)
a
,
b
=
self
.
z_map_5
(
z
)
x
=
self
.
h5_aI
(
x
,
a
,
b
)
x
=
self
.
h5_a
(
x
)
x
=
self
.
h6
(
x
)
a
,
b
=
self
.
z_map_6
(
z
)
x
=
self
.
h6_aI
(
x
,
a
,
b
)
x
=
self
.
h6_a
(
x
)
x
=
self
.
h7
(
x
)
return
x
class
Discriminator_AdaIN_res128
(
Model
):
def
__init__
(
self
,
laten_z_dims
,
df_dim
=
64
):
super
(
Discriminator_AdaIN_res128
,
self
).
__init__
()
self
.
num_parts
=
laten_z_dims
self
.
df_dim
=
df_dim
self
.
h0
=
Conv2D
(
self
.
df_dim
,
3
)
self
.
dh0
=
Dense
(
1
)
self
.
h0_a
=
ReLU
()
self
.
df_dim
=
df_dim
self
.
instance_norm_1
=
Instance_norm
(
True
)
self
.
h1
=
Spectral_norm
(
self
.
df_dim
*
2
,
strides
=
2
)
self
.
dh1
=
Dense
(
1
)
self
.
h1_a
=
ReLU
()
self
.
h2
=
Spectral_norm
(
self
.
df_dim
*
4
,
strides
=
2
)
self
.
instance_norm_2
=
Instance_norm
(
True
)
self
.
dh2
=
Dense
(
1
)
self
.
h2_a
=
ReLU
()
self
.
h3
=
Spectral_norm
(
self
.
df_dim
*
8
,
strides
=
2
)
self
.
instance_norm_3
=
Instance_norm
(
True
)
self
.
dh3
=
Dense
(
1
)
self
.
h3_a
=
ReLU
()
self
.
h4
=
Spectral_norm
(
self
.
df_dim
*
16
,
strides
=
2
)
self
.
instance_norm_4
=
Instance_norm
(
True
)
self
.
dh4
=
Dense
(
1
)
self
.
h4_a
=
ReLU
()
#Returning logits to determine whether the images are real or fake
self
.
dense_out
=
Dense
(
1
)
self
.
act_out
=
ReLU
()
self
.
encode
=
Dense
(
64
)
self
.
predict
=
Dense
(
self
.
num_parts
,
activation
=
'softmax'
)
def
style
(
self
,
x
,
h1_mean
,
h1_var
):
h1_mean
=
Flatten
()(
h1_mean
)
h1_var
=
Flatten
()(
h1_var
)
d_h1_style
=
tf
.
concat
([
h1_mean
,
h1_var
],
0
)
return
d_h1_style
def
call
(
self
,
x
):
x
=
self
.
h0
(
x
)
x
=
self
.
h0_a
(
x
)
x
=
self
.
h1
(
x
)
x
,
h1_mean
,
h1_var
=
self
.
instance_norm_1
(
x
)
d_h1_style
=
self
.
style
(
x
,
h1_mean
,
h1_var
)
d_logist_0
=
self
.
dh0
(
d_h1_style
)
x
=
self
.
h1_a
(
x
)
x
=
self
.
h2
(
x
)
x
,
h2_mean
,
h2_var
=
self
.
instance_norm_2
(
x
)
d_h2_style
=
self
.
style
(
x
,
h2_mean
,
h2_var
)
d_logist_1
=
self
.
dh1
(
d_h2_style
)
x
=
self
.
h2_a
(
x
)
x
=
self
.
h3
(
x
)
x
,
h3_mean
,
h3_var
=
self
.
instance_norm_3
(
x
)
d_h3_style
=
self
.
style
(
x
,
h3_mean
,
h3_var
)
d_logist_2
=
self
.
dh2
(
d_h3_style
)
x
=
self
.
h3_a
(
x
)
x
=
self
.
h4
(
x
)
x
,
h4_mean
,
h4_var
=
self
.
instance_norm_4
(
x
)
d_h4_style
=
self
.
style
(
x
,
h4_mean
,
h4_var
)
d_logist_3
=
self
.
dh3
(
d_h4_style
)
x
=
self
.
h4_a
(
x
)
x
=
Flatten
()(
x
)
h5
=
self
.
dense_out
(
x
)
encode
=
self
.
encode
(
x
)
latent_out
=
self
.
act_out
(
encode
)
cont_vars
=
self
.
predict
(
latent_out
)
return
h5
,
cont_vars
,
d_logist_0
,
d_logist_1
,
d_logist_2
,
d_logist_3
,
encode
\ No newline at end of file
super_clas_sortem.py
View file @
90bb6c10
...
...
@@ -77,11 +77,16 @@ class super_class:
else
:
self
.
noise
=
False
self
.
batch_size_mean
=
int
(
parameters
[
16
])
self
.
interpolation_num_samples
=
int
(
parameters
[
17
])
self
.
batch_size_angels
=
int
(
parameters
[
18
])
self
.
bytes_pr_record
=
int
(
parameters
[
17
])
# the number of bytes the image
self
.
depth
=
int
(
parameters
[
18
])
# the number of bytes the image
self
.
width
=
int
(
parameters
[
19
])
# particle image size
self
.
angular_cluster
=
int
(
parameters
[
19
])
self
.
feature_batch
=
int
(
parameters
[
20
])
self
.
bytes_pr_record
=
int
(
parameters
[
22
])
# the number of bytes the image
self
.
depth
=
int
(
parameters
[
23
])
# the number of bytes the image
self
.
width
=
int
(
parameters
[
24
])
# particle image size
if
not
isdir
(
self
.
work_dir
):
...
...
utils_sortem.py
View file @
90bb6c10
...
...
@@ -12,19 +12,33 @@ from tensorflow.keras.layers import Dense,LeakyReLU,Flatten
class
Watersheed_Layer
(
tf
.
keras
.
layers
.
Layer
):
def
__init__
(
self
):
def
__init__
(
self
,
mean_matrix
,
variance_matrix
,
num_parts
):
super
(
Watersheed_Layer
,
self
).
__init__
()
self
.
mean_matrix
=
mean_matrix
self
.
mean_bias
=
variance_matrix
self
.
num_parts
=
num_parts
def
build
(
self
,
input_shape
):
self
.
kernel
=
self
.
add_weight
(
"offset"
,
shape
=
[
input_shape
[
0
],
input_shape
[
1
]],
trainable
=
True
)
shape
=
[
self
.
num_parts
,
input_shape
[
-
1
]],
trainable
=
True
,
initializer
=
tf
.
keras
.
initializers
.
Constant
(
self
.
mean_matrix
))
def
call
(
self
,
mean_matrix
,
mean_bias
,
variance_matrix
,
variance_bias
):
self
.
mean
=
self
.
add_weight
(
"offset"
,