make_dataset_count.py 4.63 KB
Newer Older
1
#!/usr/bin/env python
2
import paths as pt
3
4
5
6
7
from tools import file_reader, file_writer
from tools import preprocessor
from utility import embedder
import pandas as pd
import numpy as np
8
9
from pathlib import Path
import yaml
10
11
12
13

def main():
    make_complete_count()
    make_compliance_count()
14
15
    make_fall_count()
    make_fall_test()
16
17
    
def make_complete_count():
18
19
    with open(Path.joinpath(pt.CONFIGS_DIR, "settings.yaml"), 'r') as stream:
        settings = yaml.safe_load(stream)
20
    case = 'Complete'
21
22
    ats = {str(i)+'Ats':str for i in range(1, settings['ats_resolution']+1)}
    df = file_reader.read_csv(pt.PROCESSED_DATA_DIR,
23
24
25
                              f'complete.csv',
                              converters=ats)
    
26
27
    cols_ats = [str(i)+'Ats' for i in range(1, settings['ats_resolution']+1)]
    unique_ats = [df[f'{i}Ats'].unique() for i in range(1, settings['ats_resolution']+1)]
28
29
    unique_ats = list(set(np.concatenate(unique_ats)))
    
30
    df_ats = preprocessor.extract_cat_count(df, unique_ats, cols_ats, 'Ats_')
thecml's avatar
thecml committed
31
32
    df_ats = df_ats.drop(['Ats_0'], axis=1)
    
33
34
    df = df.drop(cols_ats, axis=1)
    df = pd.concat([df.drop(case, axis=1), df_ats, df[[case]]], axis=1)
35

36
    file_writer.write_csv(df, pt.PROCESSED_DATA_DIR, 'complete_count.csv')
37
38
    
def make_compliance_count():
39
40
    with open(Path.joinpath(pt.CONFIGS_DIR, "settings.yaml"), 'r') as stream:
        settings = yaml.safe_load(stream)
41
    case = 'Compliance'
42
43
    ats = {str(i)+'Ats':str for i in range(1, settings['ats_resolution']+1)}
    df = file_reader.read_csv(pt.PROCESSED_DATA_DIR,
44
45
                              f'compliance.csv',
                              converters=ats)
46
47
    cols_ats = [str(i)+'Ats' for i in range(1, settings['ats_resolution']+1)]
    unique_ats = [df[f'{i}Ats'].unique() for i in range(1, settings['ats_resolution']+1)]
48
49
    unique_ats = list(set(np.concatenate(unique_ats)))
    
50
    df_ats = preprocessor.extract_cat_count(df, unique_ats, cols_ats, 'Ats_')
thecml's avatar
thecml committed
51
52
    df_ats = df_ats.drop(['Ats_0'], axis=1)
    
53
54
    df = df.drop(cols_ats, axis=1)
    df = pd.concat([df.drop(case, axis=1), df_ats, df[[case]]], axis=1)
55
        
56
    file_writer.write_csv(df, pt.PROCESSED_DATA_DIR, 'compliance_count.csv')
57

58
def make_fall_count():
59
60
    with open(Path.joinpath(pt.CONFIGS_DIR, "settings.yaml"), 'r') as stream:
        settings = yaml.safe_load(stream)
61
    case = 'Fall'
62
63
    ats = {str(i)+'Ats':str for i in range(1, settings['ats_resolution']+1)}
    df = file_reader.read_csv(pt.PROCESSED_DATA_DIR,
64
                              f'fall.csv',
65
66
                              converters=ats)
    
67
68
    cols_ats = [str(i)+'Ats' for i in range(1, settings['ats_resolution']+1)]
    unique_ats = [df[f'{i}Ats'].unique() for i in range(1, settings['ats_resolution']+1)]
69
70
    unique_ats = list(set(np.concatenate(unique_ats)))
    
71
    df_ats = preprocessor.extract_cat_count(df, unique_ats, cols_ats, 'Ats_')
thecml's avatar
thecml committed
72
73
    df_ats = df_ats.drop(['Ats_0'], axis=1)
    
74
75
    df = df.drop(cols_ats, axis=1)
    df = pd.concat([df.drop(case, axis=1), df_ats, df[[case]]], axis=1)
76
        
77
    file_writer.write_csv(df, pt.PROCESSED_DATA_DIR, 'fall_count.csv')
78

79
def make_fall_test():
80
81
    with open(Path.joinpath(pt.CONFIGS_DIR, "settings.yaml"), 'r') as stream:
        settings = yaml.safe_load(stream)
82
    case = 'Fall'
83
84
    ex = {str(i)+'Ex':str for i in range(1, settings['ex_resolution']+1)}
    ats = {str(i)+'Ats':str for i in range(1, settings['ats_resolution']+1)}
85
    converters = {**ex, **ats}
86
    df = file_reader.read_csv(pt.PROCESSED_DATA_DIR,
87
                              f'fall_test.csv',
88
89
90
91
92
                              converters=converters)
    
    num_cols = embedder.get_numerical_cols(df, case)
    
    # Extract exercises
93
94
    cols_ex = [str(i)+'Ex' for i in range(1, settings['ex_resolution']+1)]
    unique_ex = [df[f'{i}Ex'].unique() for i in range(1, settings['ex_resolution']+1)]
95
96
97
98
    unique_ex = list(set(np.concatenate(unique_ex)))
    df_ex = preprocessor.extract_cat_count(df, unique_ex, cols_ex, 'Ex_')
    
    # Extract ats
99
100
    cols_ats = [str(i)+'Ats' for i in range(1, settings['ats_resolution']+1)]
    unique_ats = [df[f'{i}Ats'].unique() for i in range(1, settings['ats_resolution']+1)]
101
102
103
104
105
106
107
108
109
    unique_ats = list(set(np.concatenate(unique_ats)))
    df_ats = preprocessor.extract_cat_count(df, unique_ats, cols_ats, 'Ats_')

    # Merge dataframes
    df = pd.concat([df, df_ex, df_ats], axis=1)
    ex_columns = ['Ex_' + ex for ex in unique_ex]
    ats_columns = ['Ats_' + ats for ats in unique_ats]
    df = df[num_cols + ex_columns + ats_columns + [case]]
    df = df.drop(['Ex_0', 'Ats_0'], axis=1)
110
    file_writer.write_csv(df, pt.PROCESSED_DATA_DIR, 'fall_test_count.csv')
111
112
113

if __name__ == "__main__":
    main()