部署组件
● mysql
● hive-metastore
● hive-server2
配置文件
xml
apiVersion: v1
kind: ConfigMap
metadata:
name: hive
data:
hive-site.xml: |-
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>xxxxx</value>
</property>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://192.168.199.58:3306/hive?createDatabaseIfNotExist=true&useSSL=false</value>
<!-- 根据角色修改IP地址 -->
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
</property>
<property>
<name>hive.cli.print.current.db</name>
<value>true</value>
</property>
<property>
<name>hive.cli.print.header</name>
<value>true</value>
</property>
<property>
<name>hive.exec.post.hooks</name>
<value>org.apache.atlas.hive.hook.HiveHook</value>
</property>
<property>
<name>hive.metastore.uris</name>
<value>thrift://192.168.199.57:9083</value>
<!-- 根据角色修改IP地址 -->
</property>
<property>
<name>hive.exec.scratchdir</name>
<value>/user/hive/tmp</value>
</property>
<property>
<name>hive.metastore.warehouse.dir</name>
<value>/user/hive/warehouse</value>
</property>
<property>
<name>hive.querylog.location</name>
<value>/tmp/hive/querylog</value>
</property>
<property>
<name>hive.server2.webui.host</name>
<value>0.0.0.0</value>
</property>
<property>
<name>hive.server2.webui.port</name>
<value>10002</value>
</property>
<property>
<name>hive.server2.thrift.port</name>
<value>10000</value>
</property>
</configuration>
mysql部署文件
yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hive-mysql
labels:
app: hive-mysql
spec:
selector:
matchLabels:
app: hive-mysql
replicas: 1
template:
metadata:
labels:
app: hive-mysql
spec:
containers:
- name: hive-mysql
image: mysql:5.7.32
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 200m
memory: 500Mi
env:
- name: MYSQL_ROOT_PASSWORD
value: xxxxxx
args:
- --character-set-server=utf8mb4
- --collation-server=utf8mb4_unicode_ci
volumeMounts:
- name: localtime
mountPath: /etc/localtime
- name: data
mountPath: /var/lib/mysql
volumes:
- name: localtime
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
- name: data
hostPath:
path: /var/lib/mysql
restartPolicy: Always
hostNetwork: true
hostAliases:
- ip: "192.168.199.56"
hostnames:
- "bigdata199056"
- ip: "192.168.199.57"
hostnames:
- "bigdata199057"
- ip: "192.168.199.58"
hostnames:
- "bigdata199058"
nodeSelector:
hive-mysql: "true"
tolerations:
- key: "bigdata"
value: "true"
operator: "Equal"
effect: "NoSchedule"
hive metastore部署文件
yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hive-metastore
labels:
app: hive-metastore
spec:
selector:
matchLabels:
app: hive-metastore
replicas: 1
template:
metadata:
labels:
app: hive-metastore
spec:
initContainers:
- name: mysql-init
image: hive:2.3.8
imagePullPolicy: IfNotPresent
command: # 通过schematool校验数据库,决定是否初始化mysql
- "sh"
- "-c"
- "if schematool -dbType mysql -validate; then schematool -dbType mysql -initSchema; fi"
volumeMounts:
- name: localtime
mountPath: /etc/localtime
- name: hive-config
mountPath: /opt/hive/conf/hive-site.xml
subPath: hive-site.xml
containers:
- name: hive-metastore
image: harbor.gistack.cn/library/hive:2.3.8
imagePullPolicy: IfNotPresent
resources:
limits: # 根据规划修改
cpu: 2000m
memory: 4Gi
command:
- "sh"
- "-c"
- "hive --service metastore -v"
volumeMounts:
- name: localtime
mountPath: /etc/localtime
- name: hadoop-config
mountPath: /opt/hadoop/etc/hadoop/core-site.xml
subPath: core-site.xml
- name: hadoop-config
mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml
subPath: hdfs-site.xml
- name: hadoop-config
mountPath: /opt/hadoop/etc/hadoop/yarn-site.xml
subPath: yarn-site.xml
- name: hadoop-config
mountPath: /opt/hadoop/etc/hadoop/mapred-site.xml
subPath: mapred-site.xml
- name: hive-config
mountPath: /opt/hive/conf/hive-site.xml
subPath: hive-site.xml
volumes:
- name: localtime
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
- name: hadoop-config
configMap:
name: hadoop
- name: hive-config
configMap:
name: hive
restartPolicy: Always
hostNetwork: true
hostAliases:
- ip: "192.168.199.56"
hostnames:
- "bigdata199056"
- ip: "192.168.199.57"
hostnames:
- "bigdata199057"
- ip: "192.168.199.58"
hostnames:
- "bigdata199058"
nodeSelector:
hive-metastore: "true"
tolerations:
- key: "bigdata"
value: "true"
operator: "Equal"
effect: "NoSchedule"
mysql和hive-metastore部署
shell
> kubectl.exe create -f .\hive-config.yaml -n bigdata
configmap/hive created
> kubectl.exe create -f .\hive-mysql.yaml -n bigdata
deployment.apps/hive-mysql created
> kubectl.exe create -f .\hive-metastore.yaml -n bigdata
deployment.apps/hive-metastore created
>
hive-metastore初始化数据库和运行情况:
hvie-server2部署
yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: hiveserver2
labels:
app: hiveserver2
spec:
selector:
matchLabels:
app: hiveserver2
replicas: 1
template:
metadata:
labels:
app: hiveserver2
spec:
containers:
- name: hiveserver2
image: hive:2.3.8
imagePullPolicy: IfNotPresent
resources:
limits:
cpu: 2000m
memory: 4Gi
command:
- "sh"
- "-c"
- "hive --service hiveserver2"
volumeMounts:
- name: localtime
mountPath: /etc/localtime
- name: hadoop-config
mountPath: /opt/hadoop/etc/hadoop/core-site.xml
subPath: core-site.xml
- name: hadoop-config
mountPath: /opt/hadoop/etc/hadoop/hdfs-site.xml
subPath: hdfs-site.xml
- name: hadoop-config
mountPath: /opt/hadoop/etc/hadoop/yarn-site.xml
subPath: yarn-site.xml
- name: hadoop-config
mountPath: /opt/hadoop/etc/hadoop/mapred-site.xml
subPath: mapred-site.xml
- name: hive-config
mountPath: /opt/hive/conf/hive-site.xml
subPath: hive-site.xml
volumes:
- name: localtime
hostPath:
path: /usr/share/zoneinfo/Asia/Shanghai
- name: hadoop-config
configMap:
name: hadoop
- name: hive-config
configMap:
name: hive
restartPolicy: Always
hostNetwork: true
hostAliases:
- ip: "192.168.199.56"
hostnames:
- "bigdata199056"
- ip: "192.168.199.57"
hostnames:
- "bigdata199057"
- ip: "192.168.199.58"
hostnames:
- "bigdata199058"
nodeSelector:
hiveserver2: "true"
tolerations:
- key: "bigdata"
value: "true"
operator: "Equal"
effect: "NoSchedule"
shell
> kubectl.exe create -f .\hiveserver2.yaml -n bigdata
deployment.apps/hiveserver2 created
>
访问hive web服务:
通过SQL客户端连接:
建表测试:
插入数据,查看任务: