docker部署hadoop及spark

vers

ion: '3' services: spark: image: s1mplecc/spark-hadoop:3 hostname: master environment: - SPARK_MODE=master - SPARK_RPC_AUTHENTICATION_ENABLED=no - SPARK_RPC_ENCRYPTION_ENABLED=no - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no - SPARK_SSL_ENABLED=no volumes: - /data/docker-data/hadoop-spark/spark/share:/opt/share ports: - '8880:8080' - '4440:4040' - '8888:8088' - '8042:8042' - '9870:9870' - '19888:19888' - '7077:7077' extra_hosts: - "worker1:127.0.0.1" - "worker2:127.0.0.1" - "master:127.0.0.1" spark-worker-1: image: s1mplecc/spark-hadoop:3 hostname: worker1 environment: - SPARK_MODE=worker - SPARK_MASTER_URL=spark://master:7077 - SPARK_WORKER_MEMORY=1G - SPARK_WORKER_CORES=1 - SPARK_RPC_AUTHENTICATION_ENABLED=no - SPARK_RPC_ENCRYPTION_ENABLED=no - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no - SPARK_SSL_ENABLED=no volumes: - /data/docker-data/hadoop-spark/spark/share:/opt/share ports: - '8881:8081' extra_hosts: - "worker1:127.0.0.1" - "worker2:127.0.0.1" - "master:127.0.0.1" spark-worker-2: image: s1mplecc/spark-hadoop:3 hostname: worker2 environment: - SPARK_MODE=worker - SPARK_MASTER_URL=spark://master:7077 - SPARK_WORKER_MEMORY=1G - SPARK_WORKER_CORES=1 - SPARK_RPC_AUTHENTICATION_ENABLED=no - SPARK_RPC_ENCRYPTION_ENABLED=no - SPARK_LOCAL_STORAGE_ENCRYPTION_ENABLED=no - SPARK_SSL_ENABLED=no volumes: - /data/docker-data/hadoop-spark/spark/share:/opt/share ports: - '8882:8081' extra_hosts: - "worker1:127.0.0.1" - "worker2:127.0.0.1" - "master:127.0.0.1" # docker-compose -f docker_compose_spark_hadoop.yml up -d # docker-compose -f docker_compose_spark_hadoop.yml down #Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-spark #Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-spark/issues # work Spark Command: /opt/bitnami/java/bin/java -cp /opt/bitnami/spark/conf/:/opt/bitnami/spark/jars/*:/opt/hadoop/etc/hadoop/ -Xmx1g org.apache.spark.deploy.worker.Worker --webui-port 8081 spark://master:7077 # master Spark Command: /opt/bitnami/java/bin/java -cp /opt/bitnami/spark/conf/:/opt/bitnami/spark/jars/*:/opt/hadoop/etc/hadoop/ -Xmx1g org.apache.spark.deploy.master.Master --host master --port 7077 --webui-port 8080 # https://zhuanlan.zhihu.com/p/421375012?utm_medium=social&utm_oi=840347507367215104 # https://github.com/s1mplecc/spark-hadoop-docker #由于 Spark 使用了 Hadoop 的客户端依赖库,所以 Spark 安装包会指定依赖的 Hadoop 特定版本,如 spark-3.1.2-bin-hadoop3.2.tgz。而 bitnami/spark 镜像中只包含 Hadoop 客户端,并不包含服务器端。因此,如果需要使用 HDFS 和 YARN 功能,还需要部署 Hadoop 集群。 #将 Hadoop 部署在 Spark 集群上,可以避免不必要的网络通信,并且面向磁盘的 HDFS 与面向内存的 Spark 天生互补。因此,考虑在 bitnami/spark 镜像基础上构建安装有 Hadoop 的新镜像。 # sc._gateway.jvm.org.apache.hadoop.util.VersionInfo.getVersion() # 3.3.4 # 针对:ValueError: Cannot run multiple SparkContexts at once; existing SparkContext(app=PySparkShell, master=local[]) # 先执行 # sc.stop() # 然后再执行: # from pyspark import SparkConf # conf = SparkConf().setAppName('My App') # sc = SparkContext(conf=conf) # # count = sc.range(1, 1000 * 1000 * 100).filter(lambda x: x > 100).count() # print('count: ', count) # ./start-hadoop.sh #Starting OpenBSD Secure Shell server: sshd. #Starting namenodes on [master] #Starting secondary namenodes [master] #Starting resourcemanager #Starting nodemanagers # https://zhuanlan.zhihu.com/p/401967378 # https://mirrors.tuna.tsinghua.edu.cn 这个网页下载速度快一点 # 这个下载速度也快一点: http://archive.apache.org/dist/hadoop/core/