Commit 6aeb6464 authored by Christian Marius Lillelund's avatar Christian Marius Lillelund
Browse files

made changes to cases

parent 62093e04
......@@ -4,15 +4,15 @@
target_name: "Alarm"
model_path: models/alarm/embeddings
threshold_weeks: 8
threshold_training: 10
use_real_ats_names: False
ats_resolution: 10
# Embedding Hyperparams --------------------------------------
train_ratio: 0.8
batch_size: 32
num_epochs: 10
verbose: False
verbose: True
network_layers: [128]
optimizer: "Adam"
......@@ -27,10 +27,4 @@ features_to_scale: ['Gender_Male', 'Gender_Female', 'BirthYear',
#
features: ['Gender_Male', 'Gender_Female', 'BirthYear',
'Cluster', 'LoanPeriod']
# Settings for dataset -------------------------------------------------
#
use_real_ats_names: False
ats_resolution: 50
\ No newline at end of file
'Cluster', 'LoanPeriod']
\ No newline at end of file
......@@ -4,15 +4,15 @@
target_name: "Complete"
model_path: models/complete/embeddings
threshold_weeks: 8
threshold_training: 10
use_real_ats_names: False
ats_resolution: 10
# Embedding Hyperparams --------------------------------------
train_ratio: 0.8
batch_size: 32
num_epochs: 5
verbose: False
verbose: True
network_layers: [128]
optimizer: "Adam"
......@@ -27,10 +27,4 @@ features_to_scale: ['Gender_Male', 'Gender_Female', 'BirthYear',
#
features: ['Gender_Male', 'Gender_Female', 'BirthYear',
'Cluster', 'LoanPeriod']
# Settings for dataset -------------------------------------------------
#
use_real_ats_names: False
ats_resolution: 50
\ No newline at end of file
'Cluster', 'LoanPeriod']
\ No newline at end of file
......@@ -4,15 +4,15 @@
target_name: "Compliance"
model_path: models/compliance/embeddings
threshold_weeks: 8
threshold_training: 10
use_real_ats_names: False
ats_resolution: 10
# Embedding Hyperparams --------------------------------------
train_ratio: 0.8
batch_size: 32
num_epochs: 10
verbose: False
num_epochs: 5
verbose: True
network_layers: [128]
optimizer: "Adam"
......@@ -27,10 +27,4 @@ features_to_scale: ['Gender_Male', 'Gender_Female', 'BirthYear',
#
features: ['Gender_Male', 'Gender_Female', 'BirthYear',
'Cluster', 'LoanPeriod']
# Settings for dataset -------------------------------------------------
#
use_real_ats_names: False
ats_resolution: 50
\ No newline at end of file
'Cluster', 'LoanPeriod']
\ No newline at end of file
......@@ -2,7 +2,7 @@
# Settings for data scripts -------------------------------------------------
#
ats_delimiter: 6
ats_delimiter: 8
threshold_weeks: 8
threshold_training: 10
fall_exercise_threshold: 3
......
---
# Dataset Stuff -------------------------------------------------
#
target_name: "Fall"
model_path: models/fall/embeddings
fall_period_months: 6
use_real_ats_names: False
ats_resolution: 10
# Embedding Hyperparams --------------------------------------
train_ratio: 0.8
batch_size: 32
num_epochs_ats: 10
num_epochs_ex: 5
verbose: False
num_epochs: 10
verbose: True
network_layers: [128]
optimizer: "Adam"
# Settings for data loader -------------------------------------------------
#
features_to_normalize: ['BirthYear', 'Cluster', 'LoanPeriod', 'NumberSplit',
'NumberScreening', 'NumberWeeks', 'MeanEvaluation',
'NumberTraining', 'NumberTrainingWeek',
'TimeBetweenTraining', 'NumberWeeksNoTraining',
'Needs', 'Physics', 'NumberAts', 'NumberEx']
features_to_scale: ['Gender_Male', 'Gender_Female', 'BirthYear', 'Cluster',
'LoanPeriod', 'NumberSplit', 'NumberScreening',
'NumberWeeks', 'MeanEvaluation',
'NumberTraining', 'NumberTrainingWeek',
'TimeBetweenTraining', 'NumberWeeksNoTraining',
'Needs', 'Physics', 'NumberAts', 'NumberEx']
features_to_normalize: ['BirthYear', 'Cluster', 'LoanPeriod', 'NumberAts']
features_to_scale: ['Gender_Male', 'Gender_Female', 'BirthYear',
'Cluster', 'LoanPeriod', 'NumberAts']
# Settings for data script -------------------------------------------------
#
features: ['Gender_Male', 'Gender_Female', 'BirthYear', 'Cluster',
'LoanPeriod', 'NumberSplit', 'NumberScreening', 'NumberWeeks',
'MeanEvaluation', 'NumberTraining', 'NumberTrainingWeek',
'TimeBetweenTraining', 'NumberWeeksNoTraining', 'Needs', 'Physics']
# Settings for dataset -------------------------------------------------
#
use_real_ats_names: False
ats_resolution: 50
ex_resolution: 9
risk_period_months: 6
\ No newline at end of file
features: ['Gender_Male', 'Gender_Female', 'BirthYear',
'Cluster', 'LoanPeriod']
\ No newline at end of file
This diff is collapsed.
#!/usr/bin/env python
from tools import file_writer, parser, cleaner
from tools import file_writer, raw_loader, cleaner
import paths as pt
def main():
parser20 = parser.Parser2020()
ats = parser20.parse_assistive_aids(pt.PATHS_2020[0], pt.RAW_DATA_DIR_2020)
td = parser20.parse_training_done(pt.PATHS_2020[1], pt.RAW_DATA_DIR_2020)
sc = parser20.parse_screening_content(pt.PATHS_2020[1], pt.RAW_DATA_DIR_2020)
tc = parser20.parse_training_cancelled(pt.PATHS_2020[1], pt.RAW_DATA_DIR_2020)
ss = parser20.parse_status_set(pt.PATHS_2020[1], pt.RAW_DATA_DIR_2020)
ic = parser20.parse_iso_classes('isoall.txt', pt.REFERENCES_DIR)
loader2020 = raw_loader.RawLoader2020()
ats = loader2020.load_assistive_aids(pt.PATHS_2020[0], pt.RAW_DATA_DIR_2020)
td = loader2020.load_training_done(pt.PATHS_2020[1], pt.RAW_DATA_DIR_2020)
sc = loader2020.load_screening_content(pt.PATHS_2020[1], pt.RAW_DATA_DIR_2020)
tc = loader2020.load_training_cancelled(pt.PATHS_2020[1], pt.RAW_DATA_DIR_2020)
ss = loader2020.load_status_set(pt.PATHS_2020[1], pt.RAW_DATA_DIR_2020)
ic = loader2020.load_iso_classes('isoall.txt', pt.REFERENCES_DIR)
cleaner2020 = cleaner.Cleaner2020()
patient_data = td[['CitizenId', 'Gender', 'BirthYear']].drop_duplicates(keep='first')
......
......@@ -9,12 +9,14 @@ from kmodes import kmodes
from pathlib import Path
from tools import file_reader, file_writer, preprocessor
ATS_RESOLUTION = 50
def main():
df = file_reader.read_csv(pt.INTERIM_DATA_DIR, 'screenings.csv',
converters={'CitizenId': str})
df = preprocessor.split_cat_columns(df, col_to_split='Ats', tag='Ats', resolution=10)
df = preprocessor.split_cat_columns(df, col_to_split='Ats', tag='Ats', resolution=ATS_RESOLUTION)
cols_ats = [str(i)+'Ats' for i in range(1, 11)]
cols_ats = [str(i)+'Ats' for i in range(1, ATS_RESOLUTION+1)]
header_list = ['CitizenId'] + cols_ats
df = df[header_list]
......
......@@ -82,31 +82,19 @@ def make_fall_count():
settings = yaml.safe_load(stream)
label_name = 'Fall'
ex = {str(i)+'Ex':str for i in range(1, settings['ex_resolution']+1)}
ats = {str(i)+'Ats':str for i in range(1, settings['ats_resolution']+1)}
converters = {**ex, **ats}
df = file_reader.read_csv(pt.PROCESSED_DATA_DIR, f'fall.csv', converters=converters)
df = file_reader.read_csv(pt.PROCESSED_DATA_DIR, f'fall.csv', converters=ats)
num_cols = embedder.get_numerical_cols(df, label_name)
# Extract exercises
cols_ex = [str(i)+'Ex' for i in range(1, settings['ex_resolution']+1)]
unique_ex = [df[f'{i}Ex'].unique() for i in range(1, settings['ex_resolution']+1)]
unique_ex = list(set(np.concatenate(unique_ex)))
df_ex = preprocessor.extract_cat_count(df, unique_ex, cols_ex, 'Ex_')
# Extract ats
cols_ats = [str(i)+'Ats' for i in range(1, settings['ats_resolution']+1)]
unique_ats = [df[f'{i}Ats'].unique() for i in range(1, settings['ats_resolution']+1)]
unique_ats = list(set(np.concatenate(unique_ats)))
df_ats = preprocessor.extract_cat_count(df, unique_ats, cols_ats, 'Ats_')
# Merge dataframes
df = pd.concat([df, df_ex, df_ats], axis=1)
ex_columns = ['Ex_' + ex for ex in unique_ex]
ats_columns = ['Ats_' + ats for ats in unique_ats]
df = df[num_cols + ex_columns + ats_columns + [label_name]]
df = df.drop(['Ex_0', 'Ats_0'], axis=1)
df_ats = df_ats.drop(['Ats_0'], axis=1)
df = df.drop(cols_ats, axis=1)
df = pd.concat([df.drop(label_name, axis=1), df_ats, df[[label_name]]], axis=1)
file_writer.write_csv(df, pt.PROCESSED_DATA_DIR, f'{label_name.lower()}_count.csv')
if __name__ == "__main__":
......
......@@ -22,34 +22,17 @@ def main(ats_resolution: int = None):
if ats_resolution == None:
ats_resolution = settings['ats_resolution']
if label_name == "Fall":
ex_resolution = settings['ex_resolution']
if label_name in ["Complete", "Compliance", "Alarm"]:
ats = {str(i)+'Ats':str for i in range(1, ats_resolution+1)}
df = file_reader.read_csv(pt.PROCESSED_DATA_DIR,
f'{label_name.lower()}.csv',
converters=ats)
else:
ex = {str(i)+'Ex':str for i in range(1, ex_resolution+1)}
ats = {str(i)+'Ats':str for i in range(1, ats_resolution+1)}
converters = {**ex, **ats}
df = file_reader.read_csv(pt.PROCESSED_DATA_DIR,
f'{label_name.lower()}.csv',
converters=converters)
ats = {str(i)+'Ats':str for i in range(1, ats_resolution+1)}
df = file_reader.read_csv(pt.PROCESSED_DATA_DIR,
f'{label_name.lower()}.csv',
converters=ats)
if label_name in ["Complete", "Compliance", "Alarm"]:
emb_cols = df.filter(regex='((\d+)[Ats])\w+', axis=1)
n_numerical_cols = df.shape[1] - emb_cols.shape[1] - 1
df_to_enc = df.iloc[:,n_numerical_cols:]
ats_cols = [str(i)+'Ats' for i in range(1, ats_resolution+1)]
df = df.drop(ats_cols, axis=1)
else:
ats_cols = [str(i)+'Ats' for i in range(1, ats_resolution+1)]
ex_cols = [str(i)+'Ex' for i in range(1, ex_resolution+1)]
df_ats_to_enc = df.filter(regex=f'Fall|((\d+)[Ats])\w+', axis=1)
df_ex_to_enc = df.filter(regex=f'Fall|((\d+)[Ex])\w+', axis=1)
df = df.drop(ats_cols + ex_cols, axis=1)
emb_cols = df.filter(regex='((\d+)[Ats])\w+', axis=1)
n_numerical_cols = df.shape[1] - emb_cols.shape[1] - 1
df_to_enc = df.iloc[:,n_numerical_cols:]
ats_cols = [str(i)+'Ats' for i in range(1, ats_resolution+1)]
df = df.drop(ats_cols, axis=1)
# Load embedded config
with open(Path.joinpath(pt.CONFIGS_DIR,
......@@ -58,44 +41,18 @@ def main(ats_resolution: int = None):
# Encode dataframe given params
model_path = Path.joinpath(pt.ROOT_DIR, emb_cfg['model_path'])
if label_name in ["Complete", "Compliance", "Alarm"]:
df_enc = encode_dataframe(df=df_to_enc,
target_name=emb_cfg['target_name'],
batch_size=emb_cfg['batch_size'],
train_ratio=emb_cfg['train_ratio'],
epochs=emb_cfg['num_epochs'],
optimizer=emb_cfg['optimizer'],
network_layers=emb_cfg['network_layers'],
verbose=emb_cfg['verbose'],
model_path=model_path)
else:
ats_enc = encode_dataframe(df=df_ats_to_enc,
target_name=emb_cfg['target_name'],
batch_size=emb_cfg['batch_size'],
train_ratio=emb_cfg['train_ratio'],
epochs=emb_cfg['num_epochs_ats'],
optimizer=emb_cfg['optimizer'],
network_layers=emb_cfg['network_layers'],
verbose=emb_cfg['verbose'],
model_path=model_path)
ex_enc = encode_dataframe(df=df_ex_to_enc,
target_name=emb_cfg['target_name'],
batch_size=emb_cfg['batch_size'],
train_ratio=emb_cfg['train_ratio'],
epochs=emb_cfg['num_epochs_ex'],
optimizer=emb_cfg['optimizer'],
network_layers=emb_cfg['network_layers'],
verbose=emb_cfg['verbose'],
model_path=model_path)
df_enc = encode_dataframe(df=df_to_enc,
target_name=emb_cfg['target_name'],
batch_size=emb_cfg['batch_size'],
train_ratio=emb_cfg['train_ratio'],
epochs=emb_cfg['num_epochs'],
optimizer=emb_cfg['optimizer'],
network_layers=emb_cfg['network_layers'],
verbose=emb_cfg['verbose'],
model_path=model_path)
df_rand = pd.DataFrame(np.random.rand(len(df),1), columns=['Rand']) # add random var
if label_name in ["Complete", "Compliance", "Alarm"]:
df = pd.concat([df.drop(label_name, axis=1), df_rand, df_enc, df.pop(label_name)], axis=1)
else:
df = pd.concat([df.drop(label_name, axis=1), df_rand, ats_enc, ex_enc,
df.pop(label_name)], axis=1)
df = pd.concat([df.drop(label_name, axis=1), df_rand, df_enc, df.pop(label_name)], axis=1)
file_writer.write_csv(df, pt.PROCESSED_DATA_DIR, f'{label_name.lower()}_emb.csv')
def encode_dataframe(df, target_name, batch_size, train_ratio, epochs,
......
......@@ -11,6 +11,13 @@ def main(ats_resolution: int = None):
converters={'CitizenId': str, 'Cluster': int})
screenings = file_reader.read_csv(pt.INTERIM_DATA_DIR, 'screenings.csv',
converters={'CitizenId': str})
with open(Path.joinpath(pt.CONFIGS_DIR, f'complete_emb.yaml'), 'r') as stream:
settings = yaml.safe_load(stream)
df = screenings.copy()
df['Cluster'] = clusters['Cluster']
accumulated_screenings = labeler.accumulate_screenings(df, settings)
for label_name in ['Complete', 'Compliance', 'Alarm', 'Fall']:
# Load settings for target
......@@ -19,28 +26,22 @@ def main(ats_resolution: int = None):
if ats_resolution == None:
ats_resolution = settings['ats_resolution']
if label_name == "Fall":
ex_resolution = settings['ex_resolution']
features = settings['features']
df = screenings.copy()
df['Cluster'] = clusters['Cluster']
# Encode target label
if label_name == 'Complete':
df = labeler.make_complete_label(df, settings)
df = labeler.make_complete_label(accumulated_screenings, settings)
elif label_name == 'Compliance':
df = labeler.make_compliance_label(df, settings)
df = labeler.make_compliance_label(accumulated_screenings, settings)
elif label_name == 'Alarm':
df = labeler.make_alarm_label(df, settings)
df['Ats'] = df['Ats'].apply(lambda x: x.replace('222718', '0'))
df = labeler.make_alarm_label(accumulated_screenings, settings)
df = df.replace({'Ats': {'22271812': '0', '22271813':'0',
'22271814': '0', '22271816': '0'}}, regex=True)
else:
df = labeler.make_fall_label(df, settings)
df = labeler.make_fall_label(accumulated_screenings, settings)
# Split cat columns by ATS resolution
df = preprocessor.split_cat_columns(df, col_to_split='Ats', tag='Ats', resolution=ats_resolution)
if label_name == "Fall":
df = preprocessor.split_cat_columns(df, col_to_split='Ex', tag='Ex', resolution=ex_resolution)
# One-hot-encode gender variable
object_cols = ['Gender']
......@@ -50,25 +51,13 @@ def main(ats_resolution: int = None):
df['Gender_Male'] = df['Gender_Male'].astype(int)
# Concat dataframe in proper order
if label_name in ["Complete", "Compliance", "Alarm"]:
ats_cols = df.filter(regex='Ats', axis=1)
df = pd.concat([df[features], ats_cols, df[[label_name]]], axis=1)
else:
ats_ex_cols = df.filter(regex='Ats|Ex', axis=1)
df = pd.concat([df[features], ats_ex_cols, df[[label_name]]], axis=1)
ats_cols = df.filter(regex='Ats', axis=1)
df = pd.concat([df[features], ats_cols, df[[label_name]]], axis=1)
if settings['use_real_ats_names']:
if label_name in ["Complete", "Compliance", "Alarm"]:
ats = file_reader.read_csv(pt.REFERENCES_DIR, 'ats.csv',
converters={'ats_id': str})
df = preprocessor.replace_cat_values(df, ats)
else:
ats = file_reader.read_csv(pt.REFERENCES_DIR, 'ats.csv',
converters={'ats_id': str})
ex = file_reader.read_csv(pt.REFERENCES_DIR, 'ex.csv',
converters={'ex_id': str})
df = preprocessor.replace_cat_values(df, ats)
df = preprocessor.replace_cat_values(df, ex)
ats = file_reader.read_csv(pt.REFERENCES_DIR, 'ats.csv',
converters={'ats_id': str})
df = preprocessor.replace_cat_values(df, ats)
file_writer.write_csv(df, pt.PROCESSED_DATA_DIR, f'{label_name.lower()}.csv')
......
......@@ -15,29 +15,14 @@ def main():
f'{target_name.lower()}_emb.yaml'), 'r') as stream:
settings = yaml.safe_load(stream)
if target_name in ["Complete", "Compliance", "Alarm"]:
ats = {str(i)+'Ats':str for i in range(1, settings['ats_resolution']+1)}
df = file_reader.read_csv(pt.PROCESSED_DATA_DIR, f'{target_name.lower()}.csv', converters=ats)
else:
ex = {str(i)+'Ex':str for i in range(1, settings['ex_resolution']+1)}
ats = {str(i)+'Ats':str for i in range(1, settings['ats_resolution']+1)}
converters = {**ex, **ats}
df = file_reader.read_csv(pt.PROCESSED_DATA_DIR, f'{target_name.lower()}.csv', converters=converters)
# One-hot encode targets
if target_name in ["Complete", "Compliance", "Alarm"]:
ats_cols = [str(i)+'Ats' for i in range(1, settings['ats_resolution']+1)]
df_enc = preprocessor.one_hot_encode(df, ats_cols)
df = pd.concat([df.drop(ats_cols + [target_name], axis=1),
df_enc, df[[target_name]]], axis=1)
else:
ex_cols = [str(i)+'Ex' for i in range(1, settings['ex_resolution']+1)]
ats_cols = [str(i)+'Ats' for i in range(1, settings['ats_resolution']+1)]
total_cols = ex_cols + ats_cols
df_enc = preprocessor.one_hot_encode(df, total_cols)
df = pd.concat([df.drop(total_cols + [target_name], axis=1),
df_enc, df[[target_name]]], axis=1)
ats = {str(i)+'Ats':str for i in range(1, settings['ats_resolution']+1)}
df = file_reader.read_csv(pt.PROCESSED_DATA_DIR, f'{target_name.lower()}.csv', converters=ats)
ats_cols = [str(i)+'Ats' for i in range(1, settings['ats_resolution']+1)]
df_enc = preprocessor.one_hot_encode(df, ats_cols)
df = pd.concat([df.drop(ats_cols + [target_name], axis=1),
df_enc, df[[target_name]]], axis=1)
# Save dataframe
file_writer.write_csv(df, pt.PROCESSED_DATA_DIR, f'{target_name.lower()}_ohe.csv')
......
......@@ -129,13 +129,13 @@ def get_screenings_by_id(data, id, settings):
single_screening['NumberAts'] = inputter.get_number_ats(citizen_data.ats, end_date)
single_screening['LoanPeriod'] = inputter.get_avg_loan_period(citizen_data.ats, end_date)
single_screening['Needs'] = screening.NeedForHelpScore
single_screening['NeedsReason'] = screening.NeedForHelpReason
single_screening['Physics']= screening.PhysicalStrengthScore
single_screening['PhysicsReason']= screening.PhysicalStrengthReason
single_screening['Needs'] = inputter.get_needs(screening)
single_screening['NeedsReason'] = inputter.get_needs_reason(screening)
single_screening['Physics']= inputter.get_physics(screening)
single_screening['PhysicsReason']= inputter.get_physics_reason(screening)
single_screening['Ex'] = screening.ExerciseContent
single_screening['NumberEx'] = inputter.get_number_exercises(screening.ExerciseContent)
single_screening['Ex'] = inputter.get_exercise_content(screening)
single_screening['NumberEx'] = inputter.get_number_exercises(screening)
single_screening['HasFallRisk'] = sum(map(screening.ExerciseContent.count,
settings['fall_exercises'])) > settings['fall_exercise_threshold']
......
......@@ -12,7 +12,7 @@ import xgboost as xgb
from pathlib import Path
import yaml
CASES = ["Complete", "Compliance", "Alarm", "Fall"]
CASES = ["Complete", "Compliance", "Alarm"]
DATASET_VERSION = 'emb'
def main():
......
......@@ -51,8 +51,8 @@ def get_max_loan_period(ats, end_date):
return days
return 0
def get_number_exercises(ex):
return len([str(elem) for elem in ex.split(',') if elem != '0'])
def get_number_exercises(screening):
return len([str(elem) for elem in screening.ExerciseContent.split(',') if elem != '0'])
def get_start_year(pre_screening):
return pd.to_datetime(pre_screening.ScreeningDate).year
......@@ -108,29 +108,38 @@ def get_interval_length(start_date, end_date):
def get_rehab_indicator(needs_start, physics_start):
return np.around(needs_start / (physics_start + 0.0001), decimals=2)
def get_needs(screening):
return screening.NeedForHelpScore
def get_physics(screening):
return screening.PhysicalStrengthScore
def get_needs_reason(screening):
needs_reason = screening.NeedForHelpReason
if not needs_reason == 'nan':
return needs_reason
else:
return np.nan
return 'Ingen forklaring'
def get_physics_reason(screening):
physics_reason = screening.PhysicalStrengthReason
if not physics_reason == 'nan':
return physics_reason
else:
return np.nan
return 'Ingen forklaring'
def get_exercise_content(screening):
return screening.ExerciseContent
def get_last_status(ssw):
if ssw.empty:
return np.nan
return 'Ingen'
else:
last_status = ssw['Status'].iat[-1]
if not pd.isnull(last_status):
return last_status.replace(' ', '')
else:
return np.nan
return 'Ingen'
def get_last_status_date(ssw, end_date, date_format):
if ssw.empty:
......
......@@ -15,8 +15,6 @@ def make_fall_label(df: pd.DataFrame, settings: dict):
return df
def make_complete_label(df: pd.DataFrame, settings: dict) -> pd.DataFrame:
df = accumulate_screenings(df, settings)
# Set first screening as baseline
df.loc[df['NumberScreening'] == 0, 'Baseline'] = 1
......@@ -34,8 +32,6 @@ def make_complete_label(df: pd.DataFrame, settings: dict) -> pd.DataFrame:
return df
def make_compliance_label(df: pd.DataFrame, settings: dict) -> pd.DataFrame:
df = accumulate_screenings(df, settings)
# Set first screening as baseline
df.loc[df['NumberScreening'] == 0, 'Baseline'] = 1
......@@ -61,8 +57,6 @@ def make_compliance_label(df: pd.DataFrame, settings: dict) -> pd.DataFrame:
return df
def make_alarm_label(df, settings):
df = accumulate_screenings(df, settings)
# Set first screening as baseline
df.loc[df['NumberScreening'] == 0, 'Baseline'] = 1
......@@ -79,6 +73,23 @@ def make_alarm_label(df, settings):
return df
def make_fall_label(df, settings):
# Set first screening as baseline
df.loc[df['NumberScreening'] == 0, 'Baseline'] = 1
# Calculate if citizens fall during their sessions