]> AND Private Git Repository - predictops.git/commitdiff
Logo AND Algorithmique Numérique Distribuée

Private GIT Repository
Integrating historical features
authorChristophe Guyeux <christophe.guyeux@univ-fcomte.fr>
Tue, 18 Feb 2020 10:26:52 +0000 (11:26 +0100)
committerChristophe Guyeux <christophe.guyeux@univ-fcomte.fr>
Tue, 18 Feb 2020 10:26:52 +0000 (11:26 +0100)
config/learn.cfg
main.py
predictops/engine.py
predictops/learn/preprocessing.py

index f6b2e430f60debb523bc70bdccac3ab7a8fff8ae..1a9566eee07f32774bf5d8655cab5c3eb4b591c2 100644 (file)
@@ -19,5 +19,9 @@ fill_method = spline
 order       = 3
 
 
 order       = 3
 
 
+[HISTORY_KNOWLEDGE]
+nb_lines = 5
+
+
 [TARGET]
 config = (Path.cwd() / 'config') / 'targets' / 'sdis25.cfg'
 [TARGET]
 config = (Path.cwd() / 'config') / 'targets' / 'sdis25.cfg'
diff --git a/main.py b/main.py
index d1d7f9c24721819d8e9d0a39eb50a1eb52546e47..cf8fe81d13940c996d9abb1344c9a296d6f199f1 100644 (file)
--- a/main.py
+++ b/main.py
@@ -1,5 +1,4 @@
 from predictops.engine import Engine
 from predictops.engine import Engine
-from predictops.learn.preprocessing import Preprocessing
 
 from logging import getLogger
 from logging.config import fileConfig
 
 from logging import getLogger
 from logging.config import fileConfig
@@ -17,10 +16,8 @@ if __name__ == '__main__':
     engine.add_features()
     engine.add_target()
 
     engine.add_features()
     engine.add_target()
 
-    process = Preprocessing(config_file = config, dict_features = engine.X)
+    engine.add_preprocessing()
 
 
-    print(process.dataframe.head(n=20))
-    print(process.dataframe.tail(n=20))
 
     '''target = toarea(stream_file = Path.cwd() / 'data' / 'targets' / 'sdis25' / 'interventions.csv')
 
 
     '''target = toarea(stream_file = Path.cwd() / 'data' / 'targets' / 'sdis25' / 'interventions.csv')
 
index dedd2652cdd9efc97eaf24d26c1d3bd66a1398ec..44ab9c4c08bc089fd53038f5cd84fe0b4d557ef1 100644 (file)
@@ -7,6 +7,7 @@ from shutil import rmtree
 
 from .source.ephemeris import Ephemeris
 from .source.meteofrance import MeteoFrance
 
 from .source.ephemeris import Ephemeris
 from .source.meteofrance import MeteoFrance
+from .learn.preprocessing import Preprocessing
 from .target.target import Target
 
 fileConfig((Path.cwd() / 'config') / 'logging.cfg')
 from .target.target import Target
 
 fileConfig((Path.cwd() / 'config') / 'logging.cfg')
@@ -71,6 +72,14 @@ class Engine:
                               timestep = self._timestep)
 
 
                               timestep = self._timestep)
 
 
+    def add_preprocessing(self):
+        process = Preprocessing(config_file = self._config,
+                                dict_features = self.X,
+                                dict_target = self.y)
+        print(process.dataframe.head(n=2))
+
+
+
     @property
     def X(self):
         return self._X
     @property
     def X(self):
         return self._X
index 4197b8fed13cb4137e33655753976532e42987a2..187a5b73b664da58031d45d55839548a10ec1be1 100644 (file)
@@ -25,12 +25,12 @@ class Preprocessing:
      - NaN values are then filled with last known values.
     '''
 
      - NaN values are then filled with last known values.
     '''
 
-    def __init__(self, config_file = None, dict_features = None, features = None):
+    def __init__(self, config_file = None,
+                 dict_features = None, dict_target = None):
         '''
         Constructor that defines all needed attributes and collects features.
         '''
         '''
         Constructor that defines all needed attributes and collects features.
         '''
-        self._config = ConfigParser()
-        self._config.read(config_file)
+        self._config = config_file
 
         self._start = datetime.strptime(self._config['DATETIME']['start'],
                                         '%m/%d/%Y %H:%M:%S')
 
         self._start = datetime.strptime(self._config['DATETIME']['start'],
                                         '%m/%d/%Y %H:%M:%S')
@@ -39,16 +39,15 @@ class Preprocessing:
         self._timestep = timedelta(hours =
                                    self._config['DATETIME'].getfloat('hourStep'))
         self._dict_features = dict_features
         self._timestep = timedelta(hours =
                                    self._config['DATETIME'].getfloat('hourStep'))
         self._dict_features = dict_features
+        self._dict_target = dict_target
+
         self._full_dict = None
         self._dataframe = None
         self._datetimes = []
         self._full_dict = None
         self._dataframe = None
         self._datetimes = []
-        # If features are not provided to the constructor, then we collect
-        # any existing feature in the dictionary
-        if features != None:
-            self._features = features
-        else:
-            self._features = set(chain.from_iterable([tuple(u.keys())
+
+        self._features = set(chain.from_iterable([tuple(u.keys())
                                                       for u in [*dict_features.values()]]))
                                                       for u in [*dict_features.values()]]))
+
         feature_files = Path.cwd() / 'config' / 'features'
         self._features = {feat : {'numerical': False} for feat in self._features}
         for feature_file in listdir(feature_files):
         feature_files = Path.cwd() / 'config' / 'features'
         self._features = {feat : {'numerical': False} for feat in self._features}
         for feature_file in listdir(feature_files):
@@ -66,6 +65,12 @@ class Preprocessing:
                     if config.has_option(section, 'numerical'):
                         self._features[section]['numerical'] = config[section].getboolean('numerical')
 
                     if config.has_option(section, 'numerical'):
                         self._features[section]['numerical'] = config[section].getboolean('numerical')
 
+        self._numerical_columns = [k for k in self._features if self._features[k]['type'] == 1
+                   or (self._features[k]['type'] == 3 and self._features[k]['numerical'])]
+
+        self._categorical_columns = [k for k in self._features if self._features[k]['type'] == 2
+                   or (self._features[k]['type'] == 3 and not self._features[k]['numerical'])]
+
 
 
     @property
 
 
     @property
@@ -141,27 +146,23 @@ class Preprocessing:
         '''
         logger.info("Filling NaN numerical values in the feature dataframe")
         # We interpolate (linearly or with splines) only numerical columns
         '''
         logger.info("Filling NaN numerical values in the feature dataframe")
         # We interpolate (linearly or with splines) only numerical columns
-        numerical_columns = [k for k in self._features if self._features[k]['type'] == 1
-                   or (self._features[k]['type'] == 3 and self._features[k]['numerical'])]
         # The interpolation
         if self._config['PREPROCESSING']['fill_method'] == 'propagate':
         # The interpolation
         if self._config['PREPROCESSING']['fill_method'] == 'propagate':
-            self._dataframe[numerical_columns] =\
-                self._dataframe[numerical_columns].fillna(method='ffill')
+            self._dataframe[self._numerical_columns] =\
+                self._dataframe[self._numerical_columns].fillna(method='ffill')
         elif self._config['PREPROCESSING']['fill_method'] == 'linear':
         elif self._config['PREPROCESSING']['fill_method'] == 'linear':
-            self._dataframe[numerical_columns] =\
-                self._dataframe[numerical_columns].interpolate()
+            self._dataframe[self._numerical_columns] =\
+                self._dataframe[self._numerical_columns].interpolate()
         elif self._config['PREPROCESSING']['fill_method'] == 'spline':
         elif self._config['PREPROCESSING']['fill_method'] == 'spline':
-            self._dataframe[numerical_columns] =\
-                self._dataframe[numerical_columns].interpolate(method='spline',
+            self._dataframe[self._numerical_columns] =\
+                self._dataframe[self._numerical_columns].interpolate(method='spline',
                      order=self._config['PREPROCESSING'].getint('order'))
 
         # For the categorical columns, NaN values are filled by duplicating
         # the last known value (forward fill method)
         logger.info("Filling NaN categorical values in the feature dataframe")
                      order=self._config['PREPROCESSING'].getint('order'))
 
         # For the categorical columns, NaN values are filled by duplicating
         # the last known value (forward fill method)
         logger.info("Filling NaN categorical values in the feature dataframe")
-        categorical_columns = [k for k in self._features if self._features[k]['type'] == 2
-                   or (self._features[k]['type'] == 3 and not self._features[k]['numerical'])]
-        self._dataframe[categorical_columns] =\
-            self._dataframe[categorical_columns].fillna(method='ffill')
+        self._dataframe[self._categorical_columns] =\
+            self._dataframe[self._categorical_columns].fillna(method='ffill')
 
         # Uncomment this line to fill NaN values at the beginning of the
         # dataframe. This may not be a good idea, especially for features
 
         # Uncomment this line to fill NaN values at the beginning of the
         # dataframe. This may not be a good idea, especially for features
@@ -175,15 +176,33 @@ class Preprocessing:
                                                if k not in self._datetimes])
 
 
                                                if k not in self._datetimes])
 
 
+    def _add_history(self):
+        '''
+        Integrating previous nb of interventions as features
+        '''
+        logger.info("Integrating previous nb of interventions as features")
+        nb_lines = self._config['HISTORY_KNOWLEDGE'].getint('nb_lines')
+        print(len(self._dataframe))
+        print(self._dataframe.head(4))
+        for k in range(1,nb_lines+1):
+            name = 'history_'+str(nb_lines-k+1)
+            self._dataframe[name] = [np.NaN]*k + list(self._dict_target.values())[:-k]
+            self._numerical_columns.append(name)
+        self._dataframe = self._dataframe[nb_lines:]
+        print(self._dataframe.head(4))
+        print(len(self._dataframe))
+
+
+
     def _standardize(self):
         '''
         Normalizing numerical features
         '''
         logger.info("Standardizing numerical values in the feature dataframe")
         # We operate only on numerical columns
     def _standardize(self):
         '''
         Normalizing numerical features
         '''
         logger.info("Standardizing numerical values in the feature dataframe")
         # We operate only on numerical columns
-        numerical_columns = [k for k in self._features if self._features[k]['type'] == 1
-                   or (self._features[k]['type'] == 3 and self._features[k]['numerical'])]
-        self._dataframe[numerical_columns] = preprocessing.scale(self._dataframe[numerical_columns])
+        self._dataframe[self._numerical_columns] =\
+            preprocessing.scale(self._dataframe[self._numerical_columns])
+
 
 
     def _one_hot_encoding(self):
 
 
     def _one_hot_encoding(self):
@@ -191,18 +210,17 @@ class Preprocessing:
         Apply a one hot encoding for category features
         '''
         logger.info("One hot encoding for categorical feature")
         Apply a one hot encoding for category features
         '''
         logger.info("One hot encoding for categorical feature")
-        categorical_columns = [k for k in self._features if self._features[k]['type'] == 2
-                   or (self._features[k]['type'] == 3 and not self._features[k]['numerical'])]
 
 
-        # On fait un codage disjonctif complet des variables qualitatives
+        # We store numerical columns
         df_out = pd.DataFrame()
         df_out = pd.DataFrame()
-        for col in categorical_columns:
+        for col in  self._numerical_columns:
+            df_out[col] = self._dataframe[col]
+        # The one hot encoding
+        for col in self._categorical_columns:
             pd1 = pd.get_dummies(self._dataframe[col],prefix=col)
             for col1 in pd1.columns:
                 df_out[col1] = pd1[col1]
         self._dataframe = df_out
             pd1 = pd.get_dummies(self._dataframe[col],prefix=col)
             for col1 in pd1.columns:
                 df_out[col1] = pd1[col1]
         self._dataframe = df_out
-        print(self._dataframe.head())
-
 
 
     @property
 
 
     @property
@@ -216,6 +234,8 @@ class Preprocessing:
                                                      orient='index')
             # Dealing with NaN values
             self._fill_nan()
                                                      orient='index')
             # Dealing with NaN values
             self._fill_nan()
+            # Adding previous (historical) nb_interventions as features
+            self._add_history()
             # Normalizing numerical values
             self._standardize()
             # Dealing with categorical features
             # Normalizing numerical values
             self._standardize()
             # Dealing with categorical features