]> AND Private Git Repository - predictops.git/commitdiff
Logo AND Algorithmique Numérique Distribuée

Private GIT Repository
Refactoring, and categorical / numerical / mixed NaN values are now
authorChristophe Guyeux <christophe.guyeux@univ-fcomte.fr>
Mon, 17 Feb 2020 14:03:29 +0000 (15:03 +0100)
committerChristophe Guyeux <christophe.guyeux@univ-fcomte.fr>
Mon, 17 Feb 2020 14:03:29 +0000 (15:03 +0100)
filled accordingly.

README.md
config/features/ephemeris_features.csv
config/features/feature_ephemeris.cfg [moved from config/feature_ephemeris.cfg with 50% similarity]
config/features/feature_meteo.cfg [moved from config/feature_meteo.cfg with 100% similarity]
config/learn.cfg
predictops/engine.py
predictops/learn/preprocessing.py
predictops/source/source.py

index e224f20d0feeedb063fff3789954a69939d828dc..e2030e747e903730cb958e06dedbfd2d55c02afc 100644 (file)
--- a/README.md
+++ b/README.md
@@ -10,7 +10,7 @@ Initialisation de l'environnement de travail
 
 `python -m venv ~/.venvs/predictops`
 
-activer l'environnement :
+activer l'environnement :
 
 `source ~/.venvs/predictops/bin/activate`
 
@@ -42,7 +42,10 @@ Tout se passe dans le répertoire features
 
 2. Détailler le traitement de chaque famille de feature dans le cfg associé
 (feature_ephemeris.cfg, feature_meteo.cfg, etc.), en accord avec les fichiers
-csv associés dans le répertoire features.
+csv associés dans le répertoire features. Dans ces derniers, le type spécifie
+si la variable est numérique (1), qualitative (2), ou si elle peut être consi-
+dérée comme de l'un ou l'autre type (3), comme le jour dans l'année.
+
 
 
 Exécution
index 72060ef63d14e8efd6ceec86a2c79286bcb9742d..4b75f8a588b29b38e926c45f0bedbfa2d3c591b2 100644 (file)
@@ -1,7 +1,7 @@
 name,type
 hour,3
 dayInWeek,3
-dayInMonth,3
+dayInMonth,2
 dayInYear,3
 weekInYear,3
 month,3
similarity index 50%
rename from config/feature_ephemeris.cfg
rename to config/features/feature_ephemeris.cfg
index 6b37dcf1b3c4a0863b11f894483c08d0f573d5a8..ddd9f8b506775c9f7ebd631085e35cf8ffe7865d 100644 (file)
@@ -7,8 +7,20 @@ weekInYear = True
 month      = True
 year       = True
 
-[HOUR]
+[hour]
+numerical  = False
+
+[dayInWeek]
+numerical  = False
+
+[dayInYear]
+numerical  = False
+
+[weekInYear]
+numerical  = False
+
+[month]
 numerical  = True
 
-[YEAR]
+[year]
 numerical  = True
\ No newline at end of file
index bbd35575f92234503d6d9370bd4fbd6602cfe90d..9475c783e083916f0e045820a0e6f528d2d79807 100644 (file)
@@ -10,8 +10,8 @@ ephemeris   = True
 
 
 [FEATURE_CONFIG]
-meteofrance = (Path.cwd() / 'config') / 'feature_meteo.cfg'
-ephemeris   = (Path.cwd() / 'config') / 'feature_ephemeris.cfg'
+meteofrance = (Path.cwd() / 'config') / 'features' / 'feature_meteo.cfg'
+ephemeris   = (Path.cwd() / 'config') / 'features' / 'feature_ephemeris.cfg'
 
 
 [PREPROCESSING]
index 2ec62df816001a0877883ffae6470df8c67ba936..8ba5043ebfdec4ef386aef2cf223b7cb52407d0a 100644 (file)
@@ -5,8 +5,8 @@ from logging.config import fileConfig
 from pathlib import Path
 from shutil import rmtree
 
-from predictops.source.ephemeris import Ephemeris
-from predictops.source.meteofrance import MeteoFrance
+from .source.ephemeris import Ephemeris
+from .source.meteofrance import MeteoFrance
 
 fileConfig((Path.cwd() / 'config') / 'logging.cfg')
 logger = getLogger()
index 49d7ef89bc2b2644f34f2022c3ee53d9827db98f..939a7fa30e79d45314adec6f8d526362137e3e9b 100644 (file)
@@ -48,16 +48,23 @@ class Preprocessing:
         else:
             self._features = set(chain.from_iterable([tuple(u.keys())
                                                       for u in [*dict_features.values()]]))
-        csv_files = Path.cwd() / 'config' / 'features'
-        self._features = {feat : None for feat in self._features}
-        for csv_file in listdir(csv_files):
-            with open(csv_files / csv_file, "r") as f:
-                reader = DictReader(f, delimiter=',')
-                for row in reader:
-                    if row['name'] in self._features:
-                        self._features[row['name']] = row['type']
-        print(self._features)
-        exit()
+        feature_files = Path.cwd() / 'config' / 'features'
+        self._features = {feat : {'numerical': False} for feat in self._features}
+        for feature_file in listdir(feature_files):
+            if feature_file.endswith('csv'):
+                with open(feature_files / feature_file , "r") as f:
+                    reader = DictReader(f, delimiter=',')
+                    typed_names = {row['name']: row['type'] for row in reader}
+                for feature in self._features:
+                    if feature.split('_')[0] in typed_names:
+                        self._features[feature]['type'] = int(typed_names[feature.split('_')[0]])
+            elif feature_file.endswith('cfg'):
+                config = ConfigParser()
+                config.read(feature_files / feature_file)
+                for section in config:
+                    if config.has_option(section, 'numerical'):
+                        self._features[section]['numerical'] = config[section].getboolean('numerical')
+
 
 
     @property
@@ -137,21 +144,37 @@ class Preprocessing:
             logger.info("Creating feature dataframe from feature dictionary")
             self._dataframe = pd.DataFrame.from_dict(self.full_dict,
                                                      orient='index')
-            logger.info("Filling NaN values in the feature dataframe")
-
+            logger.info("Filling NaN numerical values in the feature dataframe")
+            # We interpolate (linearly or with splines) only numerical columns
+            numerical_columns = [k for k in self._features if self._features[k]['type'] == 1
+                       or (self._features[k]['type'] == 3 and self._features[k]['numerical'])]
+            # The interpolation
             if self._config['PREPROCESSING']['fill_method'] == 'propagate':
-                self._dataframe = self._dataframe.fillna(method='ffill')
+                self._dataframe[numerical_columns] =\
+                    self._dataframe[numerical_columns].fillna(method='ffill')
             elif self._config['PREPROCESSING']['fill_method'] == 'linear':
-                self._dataframe = self._dataframe.interpolate()
+                self._dataframe[numerical_columns] =\
+                    self._dataframe[numerical_columns].interpolate()
             elif self._config['PREPROCESSING']['fill_method'] == 'spline':
-                self._dataframe = self._dataframe.interpolate(method='spline',
-                                                              order=self._config['PREPROCESSING'].getint('order'))
+                self._dataframe[numerical_columns] =\
+                    self._dataframe[numerical_columns].interpolate(method='spline',
+                         order=self._config['PREPROCESSING'].getint('order'))
+
+            # For the categorical columns, NaN values are filled by duplicating
+            # the last known value (forward fill method)
+            logger.info("Filling NaN categorical values in the feature dataframe")
+            categorical_columns = [k for k in self._features if self._features[k]['type'] == 2
+                       or (self._features[k]['type'] == 3 and not self._features[k]['numerical'])]
+            self._dataframe[categorical_columns] =\
+                self._dataframe[categorical_columns].fillna(method='ffill')
 
             # Uncomment this line to fill NaN values at the beginning of the
             # dataframe. This may not be a good idea, especially for features
             # that are available only for recent years, e.g., air quality
             #self._dataframe = self._dataframe.fillna(method='bfill')
 
+            # Dropping rows that are not related to our datetime window (start/
+            # step / end)
             self._dataframe = self._dataframe.drop([k.to_pydatetime()
                                                    for k in self._dataframe.T
                                                    if k not in self._datetimes])
index 8e68716e3b85f74767c6067f6b9ea866ecd0cfc9..70f24da645e932495787804418af825fb86f7b9a 100644 (file)
@@ -1,3 +1,4 @@
+from configparser import ConfigParser
 from csv import DictReader
 from logging import getLogger
 from logging.config import fileConfig
@@ -11,14 +12,40 @@ logger = getLogger()
 class Source:
     def __init__(self):
         '''
-        Check if the same feature name is used in two different feature sources
+        Check if the same feature name is used in two different feature sources,
+        and if the sources of type 3 (being both categorical and numerical) have
+        a specified type in the feature_...cfg file
         '''
         logger.info('Check for redondant feature names')
-        csv_files = Path.cwd() / 'config' / 'features'
+        feature_files = Path.cwd() / 'config' / 'features'
         list_of_names = []
-        for csv_file in listdir(csv_files):
-            with open(csv_files / csv_file, "r") as f:
-                reader = DictReader(f, delimiter=',')
-                list_of_names.extend([row['name'] for row in reader])
+        for file_name in listdir(feature_files ):
+            if file_name.endswith('csv'):
+                with open(feature_files  / file_name, "r") as f:
+                    reader = DictReader(f, delimiter=',')
+                    list_of_names.extend([row['name'] for row in reader])
+
         if len(list_of_names) != len(set(list_of_names)):
-            raise ValueError("At least two features have the same name")
\ No newline at end of file
+            raise ValueError("At least two features have the same name")
+
+        logger.info('Check for specified feature types')
+        names_of_mixed_types = []
+        for file_name in listdir(feature_files):
+            if file_name.endswith('csv'):
+                with open(feature_files  / file_name, "r") as f:
+                    reader = DictReader(f, delimiter=',')
+                    names_of_mixed_types.extend([row['name'] for row in reader
+                                                 if row['type'] == '3'])
+
+        cfg_names_of_mixed_types = []
+        for file_name in listdir(feature_files):
+            if file_name.endswith('cfg'):
+                config = ConfigParser()
+                config.read(feature_files / file_name)
+                for section in config:
+                    if config.has_option(section, 'numerical'):
+                        cfg_names_of_mixed_types.append(section)
+
+        if sorted(names_of_mixed_types) != sorted(cfg_names_of_mixed_types):
+            raise ValueError(f"Problem with features of mixed types: "
+                             f"{set(names_of_mixed_types).symmetric_difference(cfg_names_of_mixed_types)}")