代码拉取完成,页面将自动刷新
同步操作将从 大数据老司机/hadoop-on-kubernetes 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
image:
repository: registry.cn-hangzhou.aliyuncs.com/bigdata_cloudnative/hadoop_hive
tag: v1
pullPolicy: IfNotPresent
# The version of the hadoop libraries being used in the image.
hadoopVersion: 3.3.5
logLevel: INFO
# Select antiAffinity as either hard or soft, default is soft
antiAffinity: "soft"
hdfs:
nameNode:
replicas: 1
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
dataNode:
# Will be used as dfs.datanode.hostname
# You still need to set up services + ingress for every DN
# Datanodes will expect to
externalHostname: example.com
externalDataPortRangeStart: 9866
externalHTTPPortRangeStart: 9864
replicas: 3
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
webhdfs:
enabled: true
jounralNode:
replicas: 3
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
mrHistoryserver:
pdbMinAvailable: 1
replicas: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "1000m"
yarn:
resourceManager:
pdbMinAvailable: 1
replicas: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "1000m"
nodeManager:
pdbMinAvailable: 1
# The number of YARN NodeManager instances.
replicas: 3
# Create statefulsets in parallel (K8S 1.7+)
parallelCreate: false
# CPU and memory resources allocated to each node manager pod.
# This should be tuned to fit your workload.
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
proxyServer:
pdbMinAvailable: 1
replicas: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "1000m"
hive:
metastore:
replicas: 1
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "2048Mi"
cpu: "1000m"
hiveserver2:
replicas: 1
pdbMinAvailable: 1
resources:
requests:
memory: "1024Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "1000m"
persistence:
nameNode:
enabled: true
enabledStorageClass: false
storageClass: "hadoop-nn-local-storage"
accessMode: ReadWriteOnce
size: 1Gi
local:
#- name: hadoop-nn-0
# host: "local-168-182-110"
# path: "/opt/bigdata/servers/hadoop/nn/data/data1"
volumes:
- name: nn1
mountPath: /opt/apache/hadoop/data/hdfs/namenode
hostPath: /opt/bigdata/servers/hadoop/nn/data/data1
dataNode:
enabled: true
enabledStorageClass: false
storageClass: "hadoop-dn-local-storage"
accessMode: ReadWriteOnce
size: 1Gi
#local:
#- name: hadoop-dn-0
# host: "local-168-182-110"
# path: "/opt/bigdata/servers/hadoop/dn/data/data1"
#- name: hadoop-dn-1
# host: "local-168-182-110"
# path: "/opt/bigdata/servers/hadoop/dn/data/data2"
#- name: hadoop-dn-2
# host: "local-168-182-110"
# path: "/opt/bigdata/servers/hadoop/dn/data/data3"
#- name: hadoop-dn-3
# host: "local-168-182-111"
# path: "/opt/bigdata/servers/hadoop/dn/data/data1"
#- name: hadoop-dn-4
# host: "local-168-182-111"
# path: "/opt/bigdata/servers/hadoop/dn/data/data2"
#- name: hadoop-dn-5
# host: "local-168-182-111"
# path: "/opt/bigdata/servers/hadoop/dn/data/data3"
#- name: hadoop-dn-6
# host: "local-168-182-112"
# path: "/opt/bigdata/servers/hadoop/dn/data/data1"
#- name: hadoop-dn-7
# host: "local-168-182-112"
# path: "/opt/bigdata/servers/hadoop/dn/data/data2"
#- name: hadoop-dn-8
# host: "local-168-182-112"
# path: "/opt/bigdata/servers/hadoop/dn/data/data3"
volumes:
- name: dfs1
mountPath: /opt/apache/hdfs/datanode1
hostPath: /opt/bigdata/servers/hadoop/dn/data/data1
- name: dfs2
mountPath: /opt/apache/hdfs/datanode2
hostPath: /opt/bigdata/servers/hadoop/dn/data/data2
- name: dfs3
mountPath: /opt/apache/hdfs/datanode3
hostPath: /opt/bigdata/servers/hadoop/dn/data/data3
service:
nameNode:
type: NodePort
ports:
dfs: 9000
webhdfs: 9870
nodePorts:
dfs: 30900
webhdfs: 30870
dataNode:
type: NodePort
ports:
webhdfs: 9864
nodePorts:
webhdfs: 30864
mrHistoryserver:
type: NodePort
ports:
web: 19888
nodePorts:
web: 30888
resourceManager:
type: NodePort
ports:
web: 8088
nodePorts:
web: 30088
nodeManager:
type: NodePort
ports:
web: 8042
nodePorts:
web: 30042
proxyServer:
type: NodePort
ports:
web: 9111
nodePorts:
web: 30911
hive:
metastore:
type: NodePort
port: 9083
nodePort: 31183
hiveserver2:
type: NodePort
port: 10000
nodePort: 30000
securityContext:
runAsUser: 10000
privileged: true
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。