+ '''
+ Returns the fully filled dated feature dictionary, ordered by datetimes
+ '''
+ if self._full_dict is None:
+ self._fill_dict()
+ return self._full_dict
+
+
+ def _fill_nan(self):
+ '''
+ Fill NaN values, either by propagation or by interpolation (linear or splines)
+ '''
+ logger.info("Filling NaN numerical values in the feature dataframe")
+ # We interpolate (linearly or with splines) only numerical columns
+ # The interpolation
+ if self._config['PREPROCESSING']['fill_method'] == 'propagate':
+ self._dataframe[self._numerical_columns] =\
+ self._dataframe[self._numerical_columns].fillna(method='ffill')
+ elif self._config['PREPROCESSING']['fill_method'] == 'linear':
+ self._dataframe[self._numerical_columns] =\
+ self._dataframe[self._numerical_columns].interpolate()
+ elif self._config['PREPROCESSING']['fill_method'] == 'spline':
+ self._dataframe[self._numerical_columns] =\
+ self._dataframe[self._numerical_columns].interpolate(method='spline',
+ order=self._config['PREPROCESSING'].getint('order'))
+
+ # For the categorical columns, NaN values are filled by duplicating
+ # the last known value (forward fill method)
+ logger.info("Filling NaN categorical values in the feature dataframe")
+ self._dataframe[self._categorical_columns] =\
+ self._dataframe[self._categorical_columns].fillna(method='ffill')
+
+ # Uncomment this line to fill NaN values at the beginning of the
+ # dataframe. This may not be a good idea, especially for features
+ # that are available only for recent years, e.g., air quality
+ #self._dataframe = self._dataframe.fillna(method='bfill')
+
+ # Dropping rows that are not related to our datetime window (start/
+ # step / end)
+ logger.info("Dropping rows that are not related to our datetime window")
+ self._dataframe['datetime'] =\
+ self._dataframe.apply(lambda x: datetime(int(x.year), int(x.month), int(x.dayInMonth), int(x.hour)), axis=1)
+ self._dataframe['row_ok'] =\
+ self._dataframe.apply(lambda x:x.datetime in self._datetimes, axis=1)
+ self._dataframe = self._dataframe[self._dataframe['row_ok']]
+ self._dataframe = self._dataframe.drop(['datetime', 'row_ok'], axis=1)
+ logger.info("Rows dropped")
+
+
+ def _add_history(self):
+ '''
+ Integrating previous nb of interventions as features
+ '''
+ logger.info("Integrating previous nb of interventions as features")
+ nb_lines = self._config['HISTORY_KNOWLEDGE'].getint('nb_lines')
+ for k in range(1,nb_lines+1):
+ name = 'history_'+str(nb_lines-k+1)
+ self._dataframe[name] = [np.NaN]*k + list(self._dict_target.values())[:-k]
+ self._numerical_columns.append(name)
+ self._dataframe = self._dataframe[nb_lines:]
+
+
+
+ def _standardize(self):
+ '''
+ Normalizing numerical features
+ '''
+ logger.info("Standardizing numerical values in the feature dataframe")
+ # We operate only on numerical columns
+ self._dataframe[self._numerical_columns] =\
+ preprocessing.scale(self._dataframe[self._numerical_columns])
+
+
+
+ def _one_hot_encoding(self):
+ '''
+ Apply a one hot encoding for category features
+ '''
+ logger.info("One hot encoding for categorical feature")
+
+ # We store numerical columns
+ df_out = pd.DataFrame()
+ for col in self._numerical_columns:
+ df_out[col] = self._dataframe[col]
+ # The one hot encoding
+ for col in self._categorical_columns:
+ pd1 = pd.get_dummies(self._dataframe[col],prefix=col)
+ for col1 in pd1.columns:
+ df_out[col1] = pd1[col1]
+ self._dataframe = df_out