免密登录
ifconfig
vi /etc/hosts
scp /etc/hosts slave1:/etc/
scp /etc/hosts slave2:/etc/
ssh-keygen -t rsa
ssh-copy-id master
ssh-copy-id slave1
ssh-copy-id slave2
jdk解压安装
cd /opt/software
mkdir /opt/module
tar –zxvf jdk-tab键 –C /opt/module
cd /opt/module
mv jdk-……/ jdk
ls查看
vi /etc/profile
export JAVA_HOME=/opt/module/jdk
export PATH=$JAVA_HOME/bin:PATH
source /etc/profile
java –version
slave1/slave2创建
mkdir /opt/module
mkdir /opt/module
scp –r /opt/module/jdk slave1:/opt/module/jdk
scp –r /opr/module/jdk slave2:/opt/module/jdk
slave1: vi /etc/profile
export JAVA_HOME=/opt/module/jdk
source /etc/profile
java –version
slave2: vi /etc/profile
export JAVA_HOME=/opt/module/jdk
source /etc/profile
java -version
Hadoop配置
cd /opt/software
tar -zxcf /opt/software/Hadoop…… -C /opt/module
cd /opt/module
mv Hadoop……/ Hadoop
vi /etc/profile
export HADOOP_HOME=/opt/module/Hadoop
export PATH=$HADOOP/bin:$PATH
source /etc/profile
hadoop version
vim core-site.xml
vim hdfs-site.xml
vim yarn-site.xml
cp mapred-site.xml.template mapred-site.xml
vi mapred-site.xml
vim workers
master
slave1
slave2
vi hadoop-env.sh
export JAVA_HOME=/opt/module/jdk
vi mapred-env.sh
export JAVA_HOME=/opt/module/jdk
vi yarn-env.sh
export JAVA_HOME=/opt/module/jdk
scp -r /opt/module/hadoop/ slave1:/opt/module/hadoop/
scp -r /opt/module/hadoop/ slave2:/opt/module/hadoop/
hdfs namenode -format
start-all.sh
Flume配置
cd /opt/software
mkdir /opt/module
cd /opt/module
tar –zxvf /opt/software/apache-flume…… -C /opt/module
mv /opt/module/apache-……/ flume
vim /etc/profile
export FLUME_HOME=/opt/module/flume
export PATH=$FMULE_HOME/bin:$PATH
source /etc/profile
rm –rf /opt/module/flume/lib/guava-……
vim /opt/module/flume/conf/log4j.
flume.log.dir=/opt/module/flume/logs
cd /opt/hadoop/sbin
./start-all.sh
Cd /opt/apache-flume-……/bin
Flume-ng agent –-conf conf/ --name a1 –-conf-file /opt/module/flume/conf/log4j/properties –Dflume.root.logger=INFO,console
Spark 配置
cd /data/bigdata
tar -xvf /data/software/spark-2.3.2-bin-hadoop2.7.tgz
mv spark-bin-hadoop2.7 spark-2.3.2
cd
vim /etc/profile
export SPARK_HOME=/data/bigdata/spark-2.3.2
export PATH=$SPARK_HOME/BIN:$PATH
source /etc/profile
cd /data/bigdata/spark-2.3.2/conf
cp spark-env.sh.template spark-env.sh
vim spark-env.sh
export JAVA_HOME=/opt/jdk
export HADOOP_CONF_DIR=/opt/hadoop-2.7.3/etc/hadoop
export SPARK_DIST_CLASSPATH=$(/opt/hadoop-2.7.3/bin/hadoop classpath)
cd /data/bigdata
scp -r spark-2.3.2 root@slave1:/data/bigdata
scp -r spark-2.3.2 root@slave2:/data/bigdata
cd /data/bigdata/sprk-2.3.2/conf
cp slaves.template slaves
vim slaves
去掉localhost
master slave1 slave2
cd /data/bigdata/spark-2.3.2
./sbin/start-all.sh
Jps
Master jps worker
FLINK配置
mkdir /opt/module
tar –xvf /opt/software/flink-1.14.6-bin-scala_2.12.tgz -C/opt/ module
mv /opt/module/flink-1.14.6/ /opt/module/flink
vi /etc/profile
export FLINK_HOME=/opt/module/flink
export PATH=“$FLINK_HOME/bin:$PATH”
soure /etc/profile
vim /opt/module/flink/conf/flink-conf.yaml
内容jobmanager.rpc.address:master
Vim /opt/module/flink/conf/workers
Master slave1 slave2
Scp –r /opt/module/flink slave1:/opt/module
Scp -r /opt/module/flink slave2:/opt/module
./bin/start-cluster.sh
Cd /opt/module/flink/bin
./flink run -m master:8081 /opt/module/flink/examples/batch/WordCount.jar
def main(args: Array[String]): Unit = {
System.setProperty("HADOOP_USER_NAME","root")
val sparkSession: SparkSession = SparkSession.builder()
.master("local[*]")
.config("dfs.client.use.datanode.hostname","true")
.config("hive.exec.dynamic.partition.mode","nonstrict")
.config("spark.sql.warehouse.dir","hdfs://192.168.23.94:9820/user/hive/wa
rehouse/")
.appName("spark read mysql")
.enableHiveSupport()
.getOrCreate()
val MYSQLDBURL: String = "jdbc:mysql://192.168.23.94:3306/ds_pub?useUnicode=true&characterEncoding=utf-8"
val properties: Properties = new Properties()
properties.put("user", "root")
properties.put("password", "123456")
properties.put("driver", "com.mysql.jdbc.Driver")
val readMySQLDF: DataFrame = sparkSession.read.jdbc(MYSQLDBURL,"order_info", properties)
val readMySQLDF2: DataFrame = readMySQLDF.withColumn("etl_date",lit("20230401"))
readMySQLDF2.write
.mode(SaveMode.Overwrite)
.format("hive")
.partitionBy("etl_date")
.saveAsTable("ods.order_info_par")
sparkSession.close()
}
1. export JAVA_HOME=/opt/jdk
2. export HBASE_CLASSPATH=/opt/module/hadoop-2.7.3/etc/hadoop
3. export HBASE_MANAGES_ZK=false
1.
2.
3.
4.
5.
6.
7.
8.
9.
10.
11.
12.
13.
14.
Clickhouse
./clickhuose -dient-2… -C /opt/module
设置远程访问移除监听文件
vim /opt/module/clickhouse-server-…/config.xml
内容:
注释文件:
启动:cd /opt/module/deckhouse
Deckhouse start
查看状态
Deckhouse status