diff --git a/.gitignore b/.gitignore
index c76ddb39a2a58768e8b27d16f7e4ca0b08cce0fe..4cd6fc3664652fa76da71e428d288b7416a06c4a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,3 @@
-dataset.zip
-dataset/*
+files/dataset.zip
+files/dataset/*
 
diff --git a/docker-files/hadoop/.env b/docker-files/hadoop/.env
new file mode 100644
index 0000000000000000000000000000000000000000..91f29f7023d94f67ccbfde8657e749bcbaccf585
--- /dev/null
+++ b/docker-files/hadoop/.env
@@ -0,0 +1,5 @@
+CLUSTER_NAME=project
+ADMIN_NAME=centos
+ADMIN_PASSWORD=ensiie
+INSTALL_PYTHON=true # whether you want python or not (to run hadoop streaming)
+INSTALL_SQOOP=false
diff --git a/docker-files/hadoop/docker-compose-que-hadoop.yml b/docker-files/hadoop/docker-compose-que-hadoop.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4f3fcc6eaf00fa6b22abc7a27546248e28c2f275
--- /dev/null
+++ b/docker-files/hadoop/docker-compose-que-hadoop.yml
@@ -0,0 +1,59 @@
+services:
+  namenode:
+    image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
+    networks:
+      - hadoop
+    container_name: namenode
+    volumes:
+      - hadoop_namenode:/hadoop/dfs/name
+      - ../../files:/data/hdfs/files
+    environment:
+      - CLUSTER_NAME=test
+    env_file:
+      - ./hadoop.env
+    deploy:
+      mode: replicated
+      replicas: 1
+      placement:
+        constraints:
+          - node.hostname == akswnc4.aksw.uni-leipzig.de
+    ports:
+      - 9870:9870
+      - 9000:9000
+
+  datanode1:
+    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
+    container_name: datanode1
+    networks:
+      - hadoop
+    volumes:
+      - hadoop_datanode_1:/hadoop/dfs/data
+    env_file:
+      - ./hadoop.env
+    environment:
+      SERVICE_PRECONDITION: "namenode:9870"
+    deploy:
+      mode: global
+  datanode2:
+    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
+    container_name: datanode2
+    networks:
+      - hadoop
+    volumes:
+      - hadoop_datanode_2:/hadoop/dfs/data
+    env_file:
+      - ./hadoop.env
+    environment:
+      SERVICE_PRECONDITION: "namenode:9870"
+    deploy:
+      mode: global
+
+volumes:
+  hadoop_datanode_1:
+  hadoop_datanode_2:
+  hadoop_namenode:
+
+networks:
+  hadoop:
+
+
diff --git a/docker-files/hadoop/docker-compose.yml b/docker-files/hadoop/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4e2ba1b92ec99ee57b95f36383b9e36fcb079f07
--- /dev/null
+++ b/docker-files/hadoop/docker-compose.yml
@@ -0,0 +1,195 @@
+services:
+  namenode:
+    image: bde2020/hadoop-namenode:2.0.0-hadoop3.2.1-java8
+    networks:
+      - hadoop
+    container_name: namenode
+    volumes:
+      - hadoop_namenode:/hadoop/dfs/name
+      - ../../files:/data/hdfs/files
+    environment:
+      - CLUSTER_NAME=project
+    env_file:
+      - ./hadoop.env
+    deploy:
+      mode: replicated
+      replicas: 1
+      placement:
+        constraints:
+          - node.hostname == akswnc4.aksw.uni-leipzig.de
+    ports:
+      - 9870:9870
+      - 9000:9000
+
+  datanode1:
+    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
+    container_name: datanode1
+    networks:
+      - hadoop
+    volumes:
+      - hadoop_datanode_1:/hadoop/dfs/data
+    env_file:
+      - ./hadoop.env
+    environment:
+      SERVICE_PRECONDITION: "namenode:9870"
+    deploy:
+      mode: global
+  datanode2:
+    image: bde2020/hadoop-datanode:2.0.0-hadoop3.2.1-java8
+    container_name: datanode2
+    networks:
+      - hadoop
+    volumes:
+      - hadoop_datanode_2:/hadoop/dfs/data
+    env_file:
+      - ./hadoop.env
+    environment:
+      SERVICE_PRECONDITION: "namenode:9870"
+    deploy:
+      mode: global
+
+
+  resourcemanager:
+    image: bde2020/hadoop-resourcemanager:2.0.0-hadoop3.2.1-java8
+    container_name: resourcemanager
+    networks:
+      - hadoop
+    environment:
+      SERVICE_PRECONDITION: "namenode:9870 datanode1:9864 datanode2:9864"
+    env_file:
+      - ./hadoop.env
+    volumes:
+      - ../../files:/data/yarn/files
+    deploy:
+      mode: replicated
+      replicas: 1
+      placement:
+        constraints:
+          - node.hostname == akswnc4.aksw.uni-leipzig.de
+    healthcheck:
+      disable: true
+  nodemanager1:
+    image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
+    container_name: nodemanager1
+    networks:
+      - hadoop
+    environment:
+      SERVICE_PRECONDITION: "namenode:9870 datanode1:9864 resourcemanager:8088"
+    env_file:
+      - ./hadoop.env
+    deploy:
+      mode: global
+  nodemanager2:
+    image: bde2020/hadoop-nodemanager:2.0.0-hadoop3.2.1-java8
+    container_name: nodemanager2
+    networks:
+      - hadoop
+    environment:
+      SERVICE_PRECONDITION: "namenode:9870 datanode2:9864 resourcemanager:8088"
+    env_file:
+      - ./hadoop.env
+    deploy:
+      mode: global
+  historyserver:
+    image: bde2020/hadoop-historyserver:2.0.0-hadoop3.2.1-java8
+    container_name: historyserver
+    networks:
+      - hadoop
+    volumes:
+      - hadoop_historyserver:/hadoop/yarn/timeline
+    environment:
+      SERVICE_PRECONDITION: "namenode:9870 datanode1:9864 datanode2:9864 resourcemanager:8088"
+    env_file:
+      - ./hadoop.env
+    deploy:
+      mode: replicated
+      replicas: 1
+      placement:
+        constraints:
+          - node.hostname == akswnc4.aksw.uni-leipzig.de
+    ports:
+      - 28188:8188
+
+  hive-server:
+    image: bde2020/hive:2.3.2-postgresql-metastore
+    container_name: hive-server
+    env_file:
+      - ./hadoop-hive.env
+    volumes:
+      - ../../files:/data/hive/files
+    environment:
+      HIVE_CORE_CONF_javax_jdo_option_ConnectionURL: "jdbc:postgresql://hive-metastore/metastore"
+      SERVICE_PRECONDITION: "hive-metastore:9083"
+    ports:
+      - "10000:10000"
+    networks:
+      - hadoop
+
+  hive-metastore:
+    image: bde2020/hive:2.3.2-postgresql-metastore
+    container_name: hive-metastore
+    env_file:
+      - ./hadoop-hive.env
+    command: /opt/hive/bin/hive --service metastore
+    environment:
+      SERVICE_PRECONDITION: "namenode:9000 namenode:9870 datanode1:9864 datanode2:9864 hive-metastore-postgresql:5432"
+    ports:
+      - "9083:9083"
+    networks:
+      - hadoop
+
+  hive-metastore-postgresql:
+    image: bde2020/hive-metastore-postgresql:2.3.0
+    container_name: hive-metastore-postgresql
+    env_file:
+      - ./hadoop-hive.env
+    networks:
+      - hadoop
+    ports:
+      - "5432:5432"
+
+  spark-master:
+    image: bde2020/spark-master:3.1.1-hadoop3.2
+    container_name: spark-master
+    volumes:
+      - ../../files:/data/spark/files
+      - ../../python:/data/spark/python
+    ports:
+      - "28083:8080"
+      - "7077:7077"
+    environment:
+      - INIT_DAEMON_STEP=setup_spark
+    networks:
+      - hadoop
+  spark-worker-1:
+    image: bde2020/spark-worker:3.1.1-hadoop3.2
+    container_name: spark-worker-1
+    depends_on:
+      - spark-master
+    ports:
+      - "28081:8081"
+    environment:
+      - "SPARK_MASTER=spark://spark-master:7077"
+    networks:
+      - hadoop
+  spark-worker-2:
+    image: bde2020/spark-worker:3.1.1-hadoop3.2
+    container_name: spark-worker-2
+    depends_on:
+      - spark-master
+    ports:
+      - "28082:8081"
+    environment:
+      - "SPARK_MASTER=spark://spark-master:7077"
+    networks:
+      - hadoop
+
+volumes:
+  hadoop_datanode_1:
+  hadoop_datanode_2:
+  hadoop_namenode:
+  hadoop_historyserver:
+
+networks:
+  hadoop:
+
diff --git a/docker-files/hadoop/hadoop-16goRAM.env b/docker-files/hadoop/hadoop-16goRAM.env
new file mode 100644
index 0000000000000000000000000000000000000000..2aba2e7a3a8d83d766f412be5aaedda1e746cc1b
--- /dev/null
+++ b/docker-files/hadoop/hadoop-16goRAM.env
@@ -0,0 +1,43 @@
+CORE_CONF_fs_defaultFS=hdfs://namenode:9000
+CORE_CONF_hadoop_http_staticuser_user=root
+CORE_CONF_hadoop_proxyuser_hue_hosts=*
+CORE_CONF_hadoop_proxyuser_hue_groups=*
+CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec
+
+HDFS_CONF_dfs_webhdfs_enabled=true
+HDFS_CONF_dfs_permissions_enabled=false
+HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
+
+YARN_CONF_yarn_log___aggregation___enable=true
+YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
+YARN_CONF_yarn_resourcemanager_recovery_enabled=true
+YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
+YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=4096
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=2
+YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
+YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
+YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
+YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
+YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
+YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031
+YARN_CONF_yarn_timeline___service_enabled=true
+YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
+YARN_CONF_yarn_timeline___service_hostname=historyserver
+YARN_CONF_mapreduce_map_output_compress=true
+YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
+YARN_CONF_yarn_nodemanager_resource_memory___mb=2048
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=2
+YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
+YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
+YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
+
+MAPRED_CONF_mapreduce_framework_name=yarn
+MAPRED_CONF_mapred_child_java_opts=-Xmx2048m
+MAPRED_CONF_mapreduce_map_memory_mb=2048
+MAPRED_CONF_mapreduce_reduce_memory_mb=1536
+MAPRED_CONF_mapreduce_map_java_opts=-Xmx1536m
+MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx1024m
+MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
+MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
+MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
diff --git a/docker-files/hadoop/hadoop-8goRAM.env b/docker-files/hadoop/hadoop-8goRAM.env
new file mode 100644
index 0000000000000000000000000000000000000000..d1fcc2164340f21b22448aa58a05bdaa9dd5c2af
--- /dev/null
+++ b/docker-files/hadoop/hadoop-8goRAM.env
@@ -0,0 +1,43 @@
+CORE_CONF_fs_defaultFS=hdfs://namenode:9000
+CORE_CONF_hadoop_http_staticuser_user=root
+CORE_CONF_hadoop_proxyuser_hue_hosts=*
+CORE_CONF_hadoop_proxyuser_hue_groups=*
+CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec
+
+HDFS_CONF_dfs_webhdfs_enabled=true
+HDFS_CONF_dfs_permissions_enabled=false
+HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
+
+YARN_CONF_yarn_log___aggregation___enable=true
+YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
+YARN_CONF_yarn_resourcemanager_recovery_enabled=true
+YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
+YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=4096
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=2
+YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
+YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
+YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
+YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
+YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
+YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031
+YARN_CONF_yarn_timeline___service_enabled=true
+YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
+YARN_CONF_yarn_timeline___service_hostname=historyserver
+YARN_CONF_mapreduce_map_output_compress=true
+YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
+YARN_CONF_yarn_nodemanager_resource_memory___mb=1024
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=2
+YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
+YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
+YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
+
+MAPRED_CONF_mapreduce_framework_name=yarn
+MAPRED_CONF_mapred_child_java_opts=-Xmx1024m
+MAPRED_CONF_mapreduce_map_memory_mb=1024
+MAPRED_CONF_mapreduce_reduce_memory_mb=718
+MAPRED_CONF_mapreduce_map_java_opts=-Xmx718m
+MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx512m
+MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
+MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
+MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
diff --git a/docker-files/hadoop/hadoop-hive.env b/docker-files/hadoop/hadoop-hive.env
new file mode 100644
index 0000000000000000000000000000000000000000..489727e464a1ad035a6a2750e26ed392d69513eb
--- /dev/null
+++ b/docker-files/hadoop/hadoop-hive.env
@@ -0,0 +1,30 @@
+HIVE_SITE_CONF_javax_jdo_option_ConnectionURL=jdbc:postgresql://hive-metastore-postgresql/metastore
+HIVE_SITE_CONF_javax_jdo_option_ConnectionDriverName=org.postgresql.Driver
+HIVE_SITE_CONF_javax_jdo_option_ConnectionUserName=hive
+HIVE_SITE_CONF_javax_jdo_option_ConnectionPassword=hive
+HIVE_SITE_CONF_datanucleus_autoCreateSchema=false
+HIVE_SITE_CONF_hive_metastore_uris=thrift://hive-metastore:9083
+HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
+
+CORE_CONF_fs_defaultFS=hdfs://namenode:9000
+CORE_CONF_hadoop_http_staticuser_user=root
+CORE_CONF_hadoop_proxyuser_hue_hosts=*
+CORE_CONF_hadoop_proxyuser_hue_groups=*
+
+HDFS_CONF_dfs_webhdfs_enabled=true
+HDFS_CONF_dfs_permissions_enabled=false
+
+YARN_CONF_yarn_log___aggregation___enable=true
+YARN_CONF_yarn_resourcemanager_recovery_enabled=true
+YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
+YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
+YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
+YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
+YARN_CONF_yarn_timeline___service_enabled=true
+YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
+YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
+YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
+YARN_CONF_yarn_timeline___service_hostname=historyserver
+YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
+YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
+YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031
diff --git a/docker-files/hadoop/hadoop.env b/docker-files/hadoop/hadoop.env
new file mode 100644
index 0000000000000000000000000000000000000000..2aba2e7a3a8d83d766f412be5aaedda1e746cc1b
--- /dev/null
+++ b/docker-files/hadoop/hadoop.env
@@ -0,0 +1,43 @@
+CORE_CONF_fs_defaultFS=hdfs://namenode:9000
+CORE_CONF_hadoop_http_staticuser_user=root
+CORE_CONF_hadoop_proxyuser_hue_hosts=*
+CORE_CONF_hadoop_proxyuser_hue_groups=*
+CORE_CONF_io_compression_codecs=org.apache.hadoop.io.compress.SnappyCodec
+
+HDFS_CONF_dfs_webhdfs_enabled=true
+HDFS_CONF_dfs_permissions_enabled=false
+HDFS_CONF_dfs_namenode_datanode_registration_ip___hostname___check=false
+
+YARN_CONF_yarn_log___aggregation___enable=true
+YARN_CONF_yarn_log_server_url=http://historyserver:8188/applicationhistory/logs/
+YARN_CONF_yarn_resourcemanager_recovery_enabled=true
+YARN_CONF_yarn_resourcemanager_store_class=org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore
+YARN_CONF_yarn_resourcemanager_scheduler_class=org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___mb=4096
+YARN_CONF_yarn_scheduler_capacity_root_default_maximum___allocation___vcores=2
+YARN_CONF_yarn_resourcemanager_fs_state___store_uri=/rmstate
+YARN_CONF_yarn_resourcemanager_system___metrics___publisher_enabled=true
+YARN_CONF_yarn_resourcemanager_hostname=resourcemanager
+YARN_CONF_yarn_resourcemanager_address=resourcemanager:8032
+YARN_CONF_yarn_resourcemanager_scheduler_address=resourcemanager:8030
+YARN_CONF_yarn_resourcemanager_resource__tracker_address=resourcemanager:8031
+YARN_CONF_yarn_timeline___service_enabled=true
+YARN_CONF_yarn_timeline___service_generic___application___history_enabled=true
+YARN_CONF_yarn_timeline___service_hostname=historyserver
+YARN_CONF_mapreduce_map_output_compress=true
+YARN_CONF_mapred_map_output_compress_codec=org.apache.hadoop.io.compress.SnappyCodec
+YARN_CONF_yarn_nodemanager_resource_memory___mb=2048
+YARN_CONF_yarn_nodemanager_resource_cpu___vcores=2
+YARN_CONF_yarn_nodemanager_disk___health___checker_max___disk___utilization___per___disk___percentage=98.5
+YARN_CONF_yarn_nodemanager_remote___app___log___dir=/app-logs
+YARN_CONF_yarn_nodemanager_aux___services=mapreduce_shuffle
+
+MAPRED_CONF_mapreduce_framework_name=yarn
+MAPRED_CONF_mapred_child_java_opts=-Xmx2048m
+MAPRED_CONF_mapreduce_map_memory_mb=2048
+MAPRED_CONF_mapreduce_reduce_memory_mb=1536
+MAPRED_CONF_mapreduce_map_java_opts=-Xmx1536m
+MAPRED_CONF_mapreduce_reduce_java_opts=-Xmx1024m
+MAPRED_CONF_yarn_app_mapreduce_am_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
+MAPRED_CONF_mapreduce_map_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
+MAPRED_CONF_mapreduce_reduce_env=HADOOP_MAPRED_HOME=/opt/hadoop-3.2.1/
diff --git a/docker-files/hadoop/init.sql b/docker-files/hadoop/init.sql
new file mode 100644
index 0000000000000000000000000000000000000000..100d792491f25a8e55f395470c46d79768c7fa49
--- /dev/null
+++ b/docker-files/hadoop/init.sql
@@ -0,0 +1 @@
+CREATE DATABASE IF NOT EXISTS hue;
diff --git a/docker-files/hadoop/overrides/namenode/entrypoint.sh b/docker-files/hadoop/overrides/namenode/entrypoint.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0c93764ffc4074cf4b7bd4d9bf3201fa80d82e7a
--- /dev/null
+++ b/docker-files/hadoop/overrides/namenode/entrypoint.sh
@@ -0,0 +1,171 @@
+#!/bin/bash
+
+# Set some sensible defaults
+export CORE_CONF_fs_defaultFS=${CORE_CONF_fs_defaultFS:-hdfs://`hostname -f`:8020}
+
+function addProperty() {
+  local path=$1
+  local name=$2
+  local value=$3
+
+  local entry="<property><name>$name</name><value>${value}</value></property>"
+  local escapedEntry=$(echo $entry | sed 's/\//\\\//g')
+  sed -i "/<\/configuration>/ s/.*/${escapedEntry}\n&/" $path
+}
+
+function configure() {
+    local path=$1
+    local module=$2
+    local envPrefix=$3
+
+    local var
+    local value
+    
+    echo "Configuring $module"
+    for c in `printenv | perl -sne 'print "$1 " if m/^${envPrefix}_(.+?)=.*/' -- -envPrefix=$envPrefix`; do 
+        name=`echo ${c} | perl -pe 's/___/-/g; s/__/@/g; s/_/./g; s/@/_/g;'`
+        var="${envPrefix}_${c}"
+        value=${!var}
+        echo " - Setting $name=$value"
+        addProperty /etc/hadoop/$module-site.xml $name "$value"
+    done
+}
+
+configure /etc/hadoop/core-site.xml core CORE_CONF
+configure /etc/hadoop/hdfs-site.xml hdfs HDFS_CONF
+configure /etc/hadoop/yarn-site.xml yarn YARN_CONF
+configure /etc/hadoop/httpfs-site.xml httpfs HTTPFS_CONF
+configure /etc/hadoop/kms-site.xml kms KMS_CONF
+configure /etc/hadoop/mapred-site.xml mapred MAPRED_CONF
+
+if [ "$MULTIHOMED_NETWORK" = "1" ]; then
+    echo "Configuring for multihomed network"
+
+    # HDFS
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.client.use.datanode.hostname true
+    addProperty /etc/hadoop/hdfs-site.xml dfs.datanode.use.datanode.hostname true
+
+    # YARN
+    addProperty /etc/hadoop/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
+
+    # MAPRED
+    addProperty /etc/hadoop/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
+fi
+
+if [ -n "$GANGLIA_HOST" ]; then
+    mv /etc/hadoop/hadoop-metrics.properties /etc/hadoop/hadoop-metrics.properties.orig
+    mv /etc/hadoop/hadoop-metrics2.properties /etc/hadoop/hadoop-metrics2.properties.orig
+
+    for module in mapred jvm rpc ugi; do
+        echo "$module.class=org.apache.hadoop.metrics.ganglia.GangliaContext31"
+        echo "$module.period=10"
+        echo "$module.servers=$GANGLIA_HOST:8649"
+    done > /etc/hadoop/hadoop-metrics.properties
+    
+    for module in namenode datanode resourcemanager nodemanager mrappmaster jobhistoryserver; do
+        echo "$module.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31"
+        echo "$module.sink.ganglia.period=10"
+        echo "$module.sink.ganglia.supportsparse=true"
+        echo "$module.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both"
+        echo "$module.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40"
+        echo "$module.sink.ganglia.servers=$GANGLIA_HOST:8649"
+    done > /etc/hadoop/hadoop-metrics2.properties
+fi
+
+function wait_for_it()
+{
+    local serviceport=$1
+    local service=${serviceport%%:*}
+    local port=${serviceport#*:}
+    local retry_seconds=5
+    local max_try=100
+    let i=1
+
+    nc -z $service $port
+    result=$?
+
+    until [ $result -eq 0 ]; do
+      echo "[$i/$max_try] check for ${service}:${port}..."
+      echo "[$i/$max_try] ${service}:${port} is not available yet"
+      if (( $i == $max_try )); then
+        echo "[$i/$max_try] ${service}:${port} is still not available; giving up after ${max_try} tries. :/"
+        exit 1
+      fi
+      
+      echo "[$i/$max_try] try in ${retry_seconds}s once again ..."
+      let "i++"
+      sleep $retry_seconds
+
+      nc -z $service $port
+      result=$?
+    done
+    echo "[$i/$max_try] $service:${port} is available."
+}
+
+for i in ${SERVICE_PRECONDITION[@]}
+do
+    wait_for_it ${i}
+done
+
+# remove problematic package source
+sed -i '$ d' /etc/apt/sources.list
+
+# create user from env 
+useradd -s /bin/bash -p $(openssl passwd $ADMIN_PASSWORD) $ADMIN_NAME
+chown -R $ADMIN_NAME /home/$ADMIN_NAME/
+
+# install python
+if [[ $INSTALL_PYTHON == "true" ]]; then
+  apt-get update
+  echo Y | apt-get install nano python
+fi
+
+# install sqoop
+if [[ $INSTALL_SQOOP == "true" ]]; then
+     
+  echo "export HADOOP_MAPRED_HOME=/opt/hadoop-3.1.1" >> /root/.bashrc
+  echo "export HADOOP_COMMON_HOME=/opt/hadoop-3.1.1" >> /root/.bashrc
+  echo "export HADOOP_HDFS_HOME=/opt/hadoop-3.1.1" >> /root/.bashrc
+  echo "export YARN_HOME=/opt/hadoop-3.1.1" >> /root/.bashrc
+  echo "export HADOOP_COMMON_LIB_NATIVE_DIR=/opt/hadoop-3.1.1/lib/native" >> /root/.bashrc
+  echo "export SQOOP_HOME=/usr/lib/sqoop" >> /root/.bashrc
+
+  echo "export HADOOP_MAPRED_HOME=/opt/hadoop-3.1.1" >> /home/$ADMIN_NAME/.bashrc
+  echo "export HADOOP_COMMON_HOME=/opt/hadoop-3.1.1" >> /home/$ADMIN_NAME/.bashrc
+  echo "export HADOOP_HDFS_HOME=/opt/hadoop-3.1.1" >> /home/$ADMIN_NAME/.bashrc
+  echo "export YARN_HOME=/opt/hadoop-3.1.1" >> /home/$ADMIN_NAME/.bashrc
+  echo "export HADOOP_COMMON_LIB_NATIVE_DIR=/opt/hadoop-3.1.1/lib/native" >> /home/$ADMIN_NAME/.bashrc
+  echo "export SQOOP_HOME=/usr/lib/sqoop" >> /home/$ADMIN_NAME/.bashrc
+
+  cd /tmp
+
+  curl http://us.mirrors.quenda.co/apache/sqoop/1.4.7/sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz --output sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz
+  tar -xvf sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz
+  mv sqoop-1.4.7.bin__hadoop-2.6.0/ /usr/lib/sqoop
+  echo "export PATH=$PATH:/usr/lib/sqoop/bin" >> /root/.bashrc
+  echo "export PATH=$PATH:/usr/lib/sqoop/bin" >> /home/$ADMIN_NAME/.bashrc
+
+  curl https://downloads.mysql.com/archives/get/file/mysql-connector-java-8.0.16.tar.gz --output mysql-connector-java-8.0.16.tar.gz
+  tar -xvf mysql-connector-java-8.0.16.tar.gz
+  mv mysql-connector-java-8.0.16/mysql-connector-java-8.0.16.jar /usr/lib/sqoop/lib
+
+  curl https://jdbc.postgresql.org/download/postgresql-42.2.6.jar --output postgresql-42.2.6.jar
+  mv postgresql-42.2.6.jar /usr/lib/sqoop/lib
+
+  mv /usr/lib/sqoop/conf/sqoop-env-template.sh /usr/lib/sqoop/conf/sqoop-env.sh
+  echo "export HADOOP_COMMON_HOME=/opt/hadoop-3.1.1" >> /usr/lib/sqoop/conf/sqoop-env.sh
+  echo "export HADOOP_MAPRED_HOME=/opt/hadoop-3.1.1" >> /usr/lib/sqoop/conf/sqoop-env.sh
+
+  rm sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz
+  rm mysql-connector-java-8.0.16.tar.gz
+
+fi
+
+exec $@
diff --git a/docker-files/hadoop/overrides/nodemanagers/entrypoint.sh b/docker-files/hadoop/overrides/nodemanagers/entrypoint.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b8bce7f244bc79052e2ee91d5305700b3a6854f3
--- /dev/null
+++ b/docker-files/hadoop/overrides/nodemanagers/entrypoint.sh
@@ -0,0 +1,125 @@
+#!/bin/bash
+
+# Set some sensible defaults
+export CORE_CONF_fs_defaultFS=${CORE_CONF_fs_defaultFS:-hdfs://`hostname -f`:8020}
+
+function addProperty() {
+  local path=$1
+  local name=$2
+  local value=$3
+
+  local entry="<property><name>$name</name><value>${value}</value></property>"
+  local escapedEntry=$(echo $entry | sed 's/\//\\\//g')
+  sed -i "/<\/configuration>/ s/.*/${escapedEntry}\n&/" $path
+}
+
+function configure() {
+    local path=$1
+    local module=$2
+    local envPrefix=$3
+
+    local var
+    local value
+    
+    echo "Configuring $module"
+    for c in `printenv | perl -sne 'print "$1 " if m/^${envPrefix}_(.+?)=.*/' -- -envPrefix=$envPrefix`; do 
+        name=`echo ${c} | perl -pe 's/___/-/g; s/__/@/g; s/_/./g; s/@/_/g;'`
+        var="${envPrefix}_${c}"
+        value=${!var}
+        echo " - Setting $name=$value"
+        addProperty /etc/hadoop/$module-site.xml $name "$value"
+    done
+}
+
+configure /etc/hadoop/core-site.xml core CORE_CONF
+configure /etc/hadoop/hdfs-site.xml hdfs HDFS_CONF
+configure /etc/hadoop/yarn-site.xml yarn YARN_CONF
+configure /etc/hadoop/httpfs-site.xml httpfs HTTPFS_CONF
+configure /etc/hadoop/kms-site.xml kms KMS_CONF
+configure /etc/hadoop/mapred-site.xml mapred MAPRED_CONF
+
+if [ "$MULTIHOMED_NETWORK" = "1" ]; then
+    echo "Configuring for multihomed network"
+
+    # HDFS
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.rpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.servicerpc-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.http-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.namenode.https-bind-host 0.0.0.0
+    addProperty /etc/hadoop/hdfs-site.xml dfs.client.use.datanode.hostname true
+    addProperty /etc/hadoop/hdfs-site.xml dfs.datanode.use.datanode.hostname true
+
+    # YARN
+    addProperty /etc/hadoop/yarn-site.xml yarn.resourcemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.nodemanager.bind-host 0.0.0.0
+    addProperty /etc/hadoop/yarn-site.xml yarn.timeline-service.bind-host 0.0.0.0
+
+    # MAPRED
+    addProperty /etc/hadoop/mapred-site.xml yarn.nodemanager.bind-host 0.0.0.0
+fi
+
+if [ -n "$GANGLIA_HOST" ]; then
+    mv /etc/hadoop/hadoop-metrics.properties /etc/hadoop/hadoop-metrics.properties.orig
+    mv /etc/hadoop/hadoop-metrics2.properties /etc/hadoop/hadoop-metrics2.properties.orig
+
+    for module in mapred jvm rpc ugi; do
+        echo "$module.class=org.apache.hadoop.metrics.ganglia.GangliaContext31"
+        echo "$module.period=10"
+        echo "$module.servers=$GANGLIA_HOST:8649"
+    done > /etc/hadoop/hadoop-metrics.properties
+    
+    for module in namenode datanode resourcemanager nodemanager mrappmaster jobhistoryserver; do
+        echo "$module.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31"
+        echo "$module.sink.ganglia.period=10"
+        echo "$module.sink.ganglia.supportsparse=true"
+        echo "$module.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both"
+        echo "$module.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40"
+        echo "$module.sink.ganglia.servers=$GANGLIA_HOST:8649"
+    done > /etc/hadoop/hadoop-metrics2.properties
+fi
+
+function wait_for_it()
+{
+    local serviceport=$1
+    local service=${serviceport%%:*}
+    local port=${serviceport#*:}
+    local retry_seconds=5
+    local max_try=100
+    let i=1
+
+    nc -z $service $port
+    result=$?
+
+    until [ $result -eq 0 ]; do
+      echo "[$i/$max_try] check for ${service}:${port}..."
+      echo "[$i/$max_try] ${service}:${port} is not available yet"
+      if (( $i == $max_try )); then
+        echo "[$i/$max_try] ${service}:${port} is still not available; giving up after ${max_try} tries. :/"
+        exit 1
+      fi
+      
+      echo "[$i/$max_try] try in ${retry_seconds}s once again ..."
+      let "i++"
+      sleep $retry_seconds
+
+      nc -z $service $port
+      result=$?
+    done
+    echo "[$i/$max_try] $service:${port} is available."
+}
+
+for i in ${SERVICE_PRECONDITION[@]}
+do
+    wait_for_it ${i}
+done
+
+sed -i '$ d' /etc/apt/sources.list 
+
+# INSTALL PYTHON ON NODES
+if [[ $INSTALL_PYTHON == "true" ]]; then
+  apt-get update
+  echo Y | apt-get install nano python
+fi
+
+exec $@
\ No newline at end of file
diff --git a/docker-files/hadoop/purge-hadoop.sh b/docker-files/hadoop/purge-hadoop.sh
new file mode 100755
index 0000000000000000000000000000000000000000..d6a9d541268bda65204f983c371fbfda234619a6
--- /dev/null
+++ b/docker-files/hadoop/purge-hadoop.sh
@@ -0,0 +1,8 @@
+#!/bin/bash
+
+SCRIPT_PATH=$(dirname $(realpath $0))
+DOCKER_COMPOSE_FILE="docker-compose.yml"
+DOCKER_COMPOSE_PATH=$SCRIPT_PATH/$DOCKER_COMPOSE_FILE
+
+docker compose -f $DOCKER_COMPOSE_PATH down -v
+docker volume list | grep hadoop | awk '{ print $2 }' | xargs docker volume rm --force
diff --git a/docker-files/hadoop/start-hadoop.sh b/docker-files/hadoop/start-hadoop.sh
new file mode 100755
index 0000000000000000000000000000000000000000..f83ee127acb4dd173ea7ba539334b4a865bb7d12
--- /dev/null
+++ b/docker-files/hadoop/start-hadoop.sh
@@ -0,0 +1,27 @@
+#!/bin/bash
+# modification 2024 : test de la ram dispo et utilisation de tel ou tel fichier hadoop.env
+# 2024-05-20 Karnas : ajout des variables de chemin
+
+SCRIPT_PATH=$(dirname $(realpath $0))
+DOCKER_COMPOSE_FILE="docker-compose.yml"
+DOCKER_COMPOSE_PATH=$SCRIPT_PATH/$DOCKER_COMPOSE_FILE
+
+if [ $(free --giga | grep "^Mem" | awk '{ print $2 }') -lt 16 ]
+then
+  cp hadoop-16goRAM.env hadoop.env
+elif [ $(free --giga | grep "^Mem" | awk '{ print $2 }') -lt 8 ]
+then
+  cp hadoop-8goRAM.env hadoop.env
+fi
+
+docker network create hbase 2>/dev/null
+docker compose -f $DOCKER_COMPOSE_PATH up -d namenode hive-metastore-postgresql
+docker compose -f $DOCKER_COMPOSE_PATH up -d datanode1 datanode2
+docker compose -f $DOCKER_COMPOSE_PATH up -d resourcemanager nodemanager1 nodemanager2 historyserver
+docker compose -f $DOCKER_COMPOSE_PATH up -d hive-server hive-metastore
+docker compose -f $DOCKER_COMPOSE_PATH up -d spark-master spark-worker-1 spark-worker-2
+
+my_ip=`ip route get 1 | awk '{ for (i=1;i<=NF;i++) { if ( $i == "src" ) { print $(i+1) ; exit } } }'`
+echo "Namenode: (HDFS Filebrowser) http://${my_ip}:9870"
+echo "Spark-master: http://${my_ip}:28083"
+echo "History Server: http://${my_ip}:28188"
diff --git a/docker-files/hadoop/stop-hadoop.sh b/docker-files/hadoop/stop-hadoop.sh
new file mode 100755
index 0000000000000000000000000000000000000000..b523256a50e56e9e204fb59730281d0fa79878d2
--- /dev/null
+++ b/docker-files/hadoop/stop-hadoop.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+SCRIPT_PATH=$(dirname $(realpath $0))
+DOCKER_COMPOSE_FILE="docker-compose.yml"
+DOCKER_COMPOSE_PATH=$SCRIPT_PATH/$DOCKER_COMPOSE_FILE
+
+docker compose -f $DOCKER_COMPOSE_PATH stop
diff --git a/download_dataset.sh b/download_dataset.sh
index 2c1c5d1f149092831986227fc41927e9fa8042cd..ac74abdd9d945f5ec15fcad684534c68ed52c180 100755
--- a/download_dataset.sh
+++ b/download_dataset.sh
@@ -1,3 +1,3 @@
-wget "https://storage.googleapis.com/kaggle-data-sets/3384322/6207733/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20240519%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20240519T145233Z&X-Goog-Expires=259200&X-Goog-SignedHeaders=host&X-Goog-Signature=1ac8f9216a239f62f3aa19666ce2b09c188d1d34d5199cf254a3677292e1b893eb10d0e2280baf0cbfb1f21d38a2b99f55e3e080beaa4a376d07326750503e15f35e123e2efd21c2c300a82c5bc06c787528bbe5e0d6b7be5a31bc0e6fb458b9a59456233fb852c658827d1dd547ca683890de508dd88940526568357bdd28611409ed5db0e479abf7b6f98855cd942d0cebfae55d463f288640c594bce7e11cd9f460e941cec80a7713e7faa54e69e3e9c4e9e3cd87b11bc35aa74439f96f80c2d592c6a97519353ca099d62e7276bec190a99e9327aee45ab9531d86f8f6be65fb3931148dbd4342712849494a71adcfe0b4eb54051582393fe8a98ebf68bc" -c -O 'dataset.zip'
-mkdir dataset
-unzip dataset.zip -d dataset
+mkdir -p files/dataset
+wget "https://storage.googleapis.com/kaggle-data-sets/3384322/6207733/bundle/archive.zip?X-Goog-Algorithm=GOOG4-RSA-SHA256&X-Goog-Credential=gcp-kaggle-com%40kaggle-161607.iam.gserviceaccount.com%2F20240519%2Fauto%2Fstorage%2Fgoog4_request&X-Goog-Date=20240519T145233Z&X-Goog-Expires=259200&X-Goog-SignedHeaders=host&X-Goog-Signature=1ac8f9216a239f62f3aa19666ce2b09c188d1d34d5199cf254a3677292e1b893eb10d0e2280baf0cbfb1f21d38a2b99f55e3e080beaa4a376d07326750503e15f35e123e2efd21c2c300a82c5bc06c787528bbe5e0d6b7be5a31bc0e6fb458b9a59456233fb852c658827d1dd547ca683890de508dd88940526568357bdd28611409ed5db0e479abf7b6f98855cd942d0cebfae55d463f288640c594bce7e11cd9f460e941cec80a7713e7faa54e69e3e9c4e9e3cd87b11bc35aa74439f96f80c2d592c6a97519353ca099d62e7276bec190a99e9327aee45ab9531d86f8f6be65fb3931148dbd4342712849494a71adcfe0b4eb54051582393fe8a98ebf68bc" -c -O 'files/dataset.zip'
+unzip dataset.zip -d files/dataset
diff --git a/files/dataset/.gitkeep b/files/dataset/.gitkeep
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391