1 from configparser import ConfigParser
2 from csv import DictReader
3 from datetime import datetime, timedelta
4 from itertools import chain
5 from logging import getLogger
6 from logging.config import fileConfig
8 from pathlib import Path
9 from sklearn import preprocessing
14 fileConfig((Path.cwd() / 'config') / 'logging.cfg')
19 Generate a pandas dataframe from a dictionary of features per datetime, which
20 respects the starting and ending dates of the study, and its precision (the
21 time step) as passed to the constructor. Missing feature values are completed.
23 - Missing datetimes are added first with np.NaN feature values,
24 - The dataframe is then constructed based on the filled feature dictionary,
25 - NaN values are then filled with last known values.
28 def __init__(self, config_file = None,
29 dict_features = None, dict_target = None):
31 Constructor that defines all needed attributes and collects features.
33 self._config = config_file
35 self._start = datetime.strptime(self._config['DATETIME']['start'],
37 self._end = datetime.strptime(self._config['DATETIME']['end'],
39 self._timestep = timedelta(hours =
40 self._config['DATETIME'].getfloat('hourStep'))
41 self._dict_features = dict_features
42 self._dict_target = dict_target
44 self._full_dict = None
45 self._dataframe = None
48 self._features = set(chain.from_iterable([tuple(u.keys())
49 for u in [*dict_features.values()]]))
51 feature_files = Path.cwd() / 'config' / 'features'
52 self._features = {feat : {'numerical': False} for feat in self._features}
53 for feature_file in listdir(feature_files):
54 if feature_file.endswith('csv'):
55 with open(feature_files / feature_file , "r") as f:
56 reader = DictReader(f, delimiter=',')
57 typed_names = {row['name']: row['type'] for row in reader}
58 for feature in self._features:
59 if feature.split('_')[0] in typed_names:
60 self._features[feature]['type'] = int(typed_names[feature.split('_')[0]])
61 elif feature_file.endswith('cfg'):
62 config = ConfigParser()
63 config.read(feature_files / feature_file)
64 for section in config:
65 if config.has_option(section, 'numerical'):
66 self._features[section]['numerical'] = config[section].getboolean('numerical')
68 self._numerical_columns = [k for k in self._features if self._features[k]['type'] == 1
69 or (self._features[k]['type'] == 3 and self._features[k]['numerical'])]
71 self._categorical_columns = [k for k in self._features if self._features[k]['type'] == 2
72 or (self._features[k]['type'] == 3 and not self._features[k]['numerical'])]
99 def timestep(self, x):
103 def _fill_dict(self):
105 Add datetime keys in the dated feature dictionary that are missing. The
106 features are then set to np.NaN. Add missing features in existing datetimes
109 logger.info("Adding missing dates and filling missing features with NaN values")
110 current = self._start
111 while current <= self._end:
112 self._datetimes.append(current)
113 if current not in self._dict_features:
114 self._dict_features[current] = {feature:np.NaN
115 for feature in self._features}
117 null_dict = {feature:np.NaN
118 for feature in self._features}
119 null_dict.update(self._dict_features[current])
120 self._dict_features[current] = null_dict
121 current += self._timestep
122 for k in self._dict_features:
123 null_dict = {feature:np.NaN
124 for feature in self._features}
125 null_dict.update(self._dict_features[k])
126 self._dict_features[k] = null_dict
128 self._full_dict = {k: self._dict_features[k]
129 for k in sorted(self._dict_features.keys())}
136 Returns the fully filled dated feature dictionary, ordered by datetimes
138 if self._full_dict is None:
140 return self._full_dict
145 Fill NaN values, either by propagation or by interpolation (linear or splines)
147 logger.info("Filling NaN numerical values in the feature dataframe")
148 # We interpolate (linearly or with splines) only numerical columns
150 if self._config['PREPROCESSING']['fill_method'] == 'propagate':
151 self._dataframe[self._numerical_columns] =\
152 self._dataframe[self._numerical_columns].fillna(method='ffill')
153 elif self._config['PREPROCESSING']['fill_method'] == 'linear':
154 self._dataframe[self._numerical_columns] =\
155 self._dataframe[self._numerical_columns].interpolate()
156 elif self._config['PREPROCESSING']['fill_method'] == 'spline':
157 self._dataframe[self._numerical_columns] =\
158 self._dataframe[self._numerical_columns].interpolate(method='spline',
159 order=self._config['PREPROCESSING'].getint('order'))
161 # For the categorical columns, NaN values are filled by duplicating
162 # the last known value (forward fill method)
163 logger.info("Filling NaN categorical values in the feature dataframe")
164 self._dataframe[self._categorical_columns] =\
165 self._dataframe[self._categorical_columns].fillna(method='ffill')
167 # Uncomment this line to fill NaN values at the beginning of the
168 # dataframe. This may not be a good idea, especially for features
169 # that are available only for recent years, e.g., air quality
170 #self._dataframe = self._dataframe.fillna(method='bfill')
172 # Dropping rows that are not related to our datetime window (start/
174 self._dataframe = self._dataframe.drop([k.to_pydatetime()
175 for k in self._dataframe.T
176 if k not in self._datetimes])
179 def _add_history(self):
181 Integrating previous nb of interventions as features
183 logger.info("Integrating previous nb of interventions as features")
184 nb_lines = self._config['HISTORY_KNOWLEDGE'].getint('nb_lines')
185 for k in range(1,nb_lines+1):
186 name = 'history_'+str(nb_lines-k+1)
187 self._dataframe[name] = [np.NaN]*k + list(self._dict_target.values())[:-k]
188 self._numerical_columns.append(name)
189 self._dataframe = self._dataframe[nb_lines:]
193 def _standardize(self):
195 Normalizing numerical features
197 logger.info("Standardizing numerical values in the feature dataframe")
198 # We operate only on numerical columns
199 self._dataframe[self._numerical_columns] =\
200 preprocessing.scale(self._dataframe[self._numerical_columns])
204 def _one_hot_encoding(self):
206 Apply a one hot encoding for category features
208 logger.info("One hot encoding for categorical feature")
210 # We store numerical columns
211 df_out = pd.DataFrame()
212 for col in self._numerical_columns:
213 df_out[col] = self._dataframe[col]
214 # The one hot encoding
215 for col in self._categorical_columns:
216 pd1 = pd.get_dummies(self._dataframe[col],prefix=col)
217 for col1 in pd1.columns:
218 df_out[col1] = pd1[col1]
219 self._dataframe = df_out
225 Returns the feature dataframe, after creating it if needed.
227 if self._dataframe is None:
228 logger.info("Creating feature dataframe from feature dictionary")
229 self._dataframe = pd.DataFrame.from_dict(self.full_dict,
231 # Dealing with NaN values
233 # Adding previous (historical) nb_interventions as features
235 # Normalizing numerical values
237 # Dealing with categorical features
238 self._one_hot_encoding()
239 return self._dataframe
243 def dataframe(self, df):