1 Star 0 Fork 17

adler/hadoop-on-kubernetes

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
values.yaml 5.29 KB
一键复制 编辑 原始数据 按行查看 历史
liugp 提交于 2023-07-08 09:25 . init
image:
repository: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/hadoop_hive
tag: v1
pullPolicy: IfNotPresent
# The version of the hadoop libraries being used in the image.
hadoopVersion: 3.3.5
logLevel: INFO
# Select antiAffinity as either hard or soft, default is soft
antiAffinity: "soft"
hdfs:
nameNode:
replicas: 1
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
dataNode:
# Will be used as dfs.datanode.hostname
# You still need to set up services + ingress for every DN
# Datanodes will expect to
externalHostname: example.com
externalDataPortRangeStart: 9866
externalHTTPPortRangeStart: 9864
replicas: 3
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
webhdfs:
enabled: true
jounralNode:
replicas: 3
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
mrHistoryserver:
pdbMinAvailable: 1
replicas: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "1000m"
yarn:
resourceManager:
pdbMinAvailable: 1
replicas: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "1000m"
nodeManager:
pdbMinAvailable: 1
# The number of YARN NodeManager instances.
replicas: 3
# Create statefulsets in parallel (K8S 1.7+)
parallelCreate: false
# CPU and memory resources allocated to each node manager pod.
# This should be tuned to fit your workload.
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
proxyServer:
pdbMinAvailable: 1
replicas: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "1000m"
hive:
metastore:
replicas: 1
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
hiveserver2:
replicas: 1
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "1000m"
persistence:
nameNode:
enabled: true
enabledStorageClass: false
storageClass: "hadoop-nn-local-storage"
accessMode: ReadWriteOnce
size: 1Gi
local:
#- name: hadoop-nn-0
# host: "local-168-182-110"
# path: "/opt/bigdata/servers/hadoop/nn/data/data1"
volumes:
- name: nn1
mountPath: /opt/apache/hadoop/data/hdfs/namenode
hostPath: /opt/bigdata/servers/hadoop/nn/data/data1
dataNode:
enabled: true
enabledStorageClass: false
storageClass: "hadoop-dn-local-storage"
accessMode: ReadWriteOnce
size: 1Gi
#local:
#- name: hadoop-dn-0
# host: "local-168-182-110"
# path: "/opt/bigdata/servers/hadoop/dn/data/data1"
#- name: hadoop-dn-1
# host: "local-168-182-110"
# path: "/opt/bigdata/servers/hadoop/dn/data/data2"
#- name: hadoop-dn-2
# host: "local-168-182-110"
# path: "/opt/bigdata/servers/hadoop/dn/data/data3"
#- name: hadoop-dn-3
# host: "local-168-182-111"
# path: "/opt/bigdata/servers/hadoop/dn/data/data1"
#- name: hadoop-dn-4
# host: "local-168-182-111"
# path: "/opt/bigdata/servers/hadoop/dn/data/data2"
#- name: hadoop-dn-5
# host: "local-168-182-111"
# path: "/opt/bigdata/servers/hadoop/dn/data/data3"
#- name: hadoop-dn-6
# host: "local-168-182-112"
# path: "/opt/bigdata/servers/hadoop/dn/data/data1"
#- name: hadoop-dn-7
# host: "local-168-182-112"
# path: "/opt/bigdata/servers/hadoop/dn/data/data2"
#- name: hadoop-dn-8
# host: "local-168-182-112"
# path: "/opt/bigdata/servers/hadoop/dn/data/data3"
volumes:
- name: dfs1
mountPath: /opt/apache/hdfs/datanode1
hostPath: /opt/bigdata/servers/hadoop/dn/data/data1
- name: dfs2
mountPath: /opt/apache/hdfs/datanode2
hostPath: /opt/bigdata/servers/hadoop/dn/data/data2
- name: dfs3
mountPath: /opt/apache/hdfs/datanode3
hostPath: /opt/bigdata/servers/hadoop/dn/data/data3
service:
nameNode:
type: NodePort
ports:
dfs: 9000
webhdfs: 9870
nodePorts:
dfs: 30900
webhdfs: 30870
dataNode:
type: NodePort
ports:
webhdfs: 9864
nodePorts:
webhdfs: 30864
mrHistoryserver:
type: NodePort
ports:
web: 19888
nodePorts:
web: 30888
resourceManager:
type: NodePort
ports:
web: 8088
nodePorts:
web: 30088
nodeManager:
type: NodePort
ports:
web: 8042
nodePorts:
web: 30042
proxyServer:
type: NodePort
ports:
web: 9111
nodePorts:
web: 30911
hive:
metastore:
type: NodePort
port: 9083
nodePort: 31183
hiveserver2:
type: NodePort
port: 10000
nodePort: 30000
securityContext:
runAsUser: 10000
privileged: true
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Docker
1
https://gitee.com/adler1109/hadoop-on-kubernetes.git
git@gitee.com:adler1109/hadoop-on-kubernetes.git
adler1109
hadoop-on-kubernetes
hadoop-on-kubernetes
master

搜索帮助