]> AND Private Git Repository - predictops.git/blobdiff - predictops/learn/preprocessing.py
Logo AND Algorithmique Numérique Distribuée

Private GIT Repository
Refactoring, and categorical / numerical / mixed NaN values are now
[predictops.git] / predictops / learn / preprocessing.py
index b58ffac00588fc22d7f7f3d37edcf63b791d16f4..939a7fa30e79d45314adec6f8d526362137e3e9b 100644 (file)
@@ -1,6 +1,10 @@
+from configparser import ConfigParser
+from csv import DictReader
+from datetime import datetime, timedelta
 from itertools import chain
 from logging import getLogger
 from logging.config import fileConfig
+from os import listdir
 from pathlib import Path
 
 import numpy as np
@@ -10,50 +14,175 @@ fileConfig((Path.cwd() / 'config') / 'logging.cfg')
 logger = getLogger()
 
 class Preprocessing:
-    def __init__(self, dict_features,
-                 start, end, timestep,
-                 features = None):
+    '''
+    Generate a pandas dataframe from a dictionary of features per datetime, which
+    respects the starting and ending dates of the study, and its precision (the
+    time step) as passed to the constructor. Missing feature values are completed.
+
+     - Missing datetimes are added first with np.NaN feature values,
+     - The dataframe is then constructed based on the filled feature dictionary,
+     - NaN values are then filled with last known values.
+    '''
+
+    def __init__(self, config_file = None, dict_features = None, features = None):
+        '''
+        Constructor that defines all needed attributes and collects features.
+        '''
+        self._config = ConfigParser()
+        self._config.read(config_file)
+
+        self._start = datetime.strptime(self._config['DATETIME']['start'],
+                                        '%m/%d/%Y %H:%M:%S')
+        self._end = datetime.strptime(self._config['DATETIME']['end'],
+                                        '%m/%d/%Y %H:%M:%S')
+        self._timestep = timedelta(hours =
+                                   self._config['DATETIME'].getfloat('hourStep'))
         self._dict_features = dict_features
-        self._start = start
-        self._end = end
-        self._timestep = timestep
+        self._full_dict = None
         self._dataframe = None
-
+        self._datetimes = []
+        # If features are not provided to the constructor, then we collect
+        # any existing feature in the dictionary
         if features != None:
             self._features = features
         else:
             self._features = set(chain.from_iterable([tuple(u.keys())
                                                       for u in [*dict_features.values()]]))
+        feature_files = Path.cwd() / 'config' / 'features'
+        self._features = {feat : {'numerical': False} for feat in self._features}
+        for feature_file in listdir(feature_files):
+            if feature_file.endswith('csv'):
+                with open(feature_files / feature_file , "r") as f:
+                    reader = DictReader(f, delimiter=',')
+                    typed_names = {row['name']: row['type'] for row in reader}
+                for feature in self._features:
+                    if feature.split('_')[0] in typed_names:
+                        self._features[feature]['type'] = int(typed_names[feature.split('_')[0]])
+            elif feature_file.endswith('cfg'):
+                config = ConfigParser()
+                config.read(feature_files / feature_file)
+                for section in config:
+                    if config.has_option(section, 'numerical'):
+                        self._features[section]['numerical'] = config[section].getboolean('numerical')
+
+
+
+    @property
+    def start(self):
+        return self._start
+
+    @start.setter
+    def start(self, x):
+        self._start = x
+
+
+    @property
+    def end(self):
+        return self._end
+
+    @end.setter
+    def end(self, x):
+        self._end = x
+
+
+    @property
+    def timestep(self):
+        return self._timestep
+
+    @timestep.setter
+    def timestep(self, x):
+        self._timestep = x
 
 
     def _fill_dict(self):
+        '''
+        Add datetime keys in the dated feature dictionary that are missing. The
+        features are then set to np.NaN. Add missing features in existing datetimes
+        too.
+        '''
+        logger.info("Adding missing dates and filling missing features with NaN values")
         current = self._start
         while current <= self._end:
+            self._datetimes.append(current)
             if current not in self._dict_features:
-                self._dict_features[current] = {feature:np.NaN for feature in self._features}
+                self._dict_features[current] = {feature:np.NaN
+                                                for feature in self._features}
             else:
-                null_dict = {feature:np.NaN for feature in self._features}
+                null_dict = {feature:np.NaN
+                             for feature in self._features}
                 null_dict.update(self._dict_features[current])
                 self._dict_features[current] = null_dict
             current += self._timestep
+        for k in self._dict_features:
+            null_dict = {feature:np.NaN
+                         for feature in self._features}
+            null_dict.update(self._dict_features[k])
+            self._dict_features[k] = null_dict
+
+        self._full_dict = {k: self._dict_features[k]
+                           for k in sorted(self._dict_features.keys())}
+
 
 
     @property
     def full_dict(self):
-        self._fill_dict()
-        return {k: self._dict_features[k] for k in sorted(self._dict_features.keys())}
+        '''
+        Returns the fully filled dated feature dictionary, ordered by datetimes
+        '''
+        if self._full_dict is None:
+            self._fill_dict()
+        return self._full_dict
+
 
 
     @property
     def dataframe(self):
+        '''
+        Returns the feature dataframe, after creating it if needed.
+        '''
         if self._dataframe is None:
-            self._dataframe = pd.DataFrame.from_dict(self.full_dict, orient='index')
+            logger.info("Creating feature dataframe from feature dictionary")
+            self._dataframe = pd.DataFrame.from_dict(self.full_dict,
+                                                     orient='index')
+            logger.info("Filling NaN numerical values in the feature dataframe")
+            # We interpolate (linearly or with splines) only numerical columns
+            numerical_columns = [k for k in self._features if self._features[k]['type'] == 1
+                       or (self._features[k]['type'] == 3 and self._features[k]['numerical'])]
+            # The interpolation
+            if self._config['PREPROCESSING']['fill_method'] == 'propagate':
+                self._dataframe[numerical_columns] =\
+                    self._dataframe[numerical_columns].fillna(method='ffill')
+            elif self._config['PREPROCESSING']['fill_method'] == 'linear':
+                self._dataframe[numerical_columns] =\
+                    self._dataframe[numerical_columns].interpolate()
+            elif self._config['PREPROCESSING']['fill_method'] == 'spline':
+                self._dataframe[numerical_columns] =\
+                    self._dataframe[numerical_columns].interpolate(method='spline',
+                         order=self._config['PREPROCESSING'].getint('order'))
+
+            # For the categorical columns, NaN values are filled by duplicating
+            # the last known value (forward fill method)
+            logger.info("Filling NaN categorical values in the feature dataframe")
+            categorical_columns = [k for k in self._features if self._features[k]['type'] == 2
+                       or (self._features[k]['type'] == 3 and not self._features[k]['numerical'])]
+            self._dataframe[categorical_columns] =\
+                self._dataframe[categorical_columns].fillna(method='ffill')
+
+            # Uncomment this line to fill NaN values at the beginning of the
+            # dataframe. This may not be a good idea, especially for features
+            # that are available only for recent years, e.g., air quality
+            #self._dataframe = self._dataframe.fillna(method='bfill')
+
+            # Dropping rows that are not related to our datetime window (start/
+            # step / end)
+            self._dataframe = self._dataframe.drop([k.to_pydatetime()
+                                                   for k in self._dataframe.T
+                                                   if k not in self._datetimes])
         return self._dataframe
 
+
     @dataframe.setter
     def dataframe(self, df):
         self._dataframe = df
 
 
-    def fill_na(self):
-        self.dataframe = self.dataframe.fillna(method='ffill')
\ No newline at end of file