代码拉取完成,页面将自动刷新
同步操作将从 Apache Big Data/apache-hadoop 强制同步,此操作会覆盖自 Fork 仓库以来所做的任何修改,且无法恢复!!!
确定后同步将在后台操作,完成时将刷新页面,请耐心等待。
#!/usr/bin/env bash
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -e # exit on error
cd "$(dirname "$0")" # connect to root
docker build -t hadoop-build dev-support/docker
if [ "$(uname -s)" = "Linux" ]; then
USER_NAME=${SUDO_USER:=$USER}
USER_ID=$(id -u "${USER_NAME}")
GROUP_ID=$(id -g "${USER_NAME}")
# man docker-run
# When using SELinux, mounted directories may not be accessible
# to the container. To work around this, with Docker prior to 1.7
# one needs to run the "chcon -Rt svirt_sandbox_file_t" command on
# the directories. With Docker 1.7 and later the z mount option
# does this automatically.
if command -v selinuxenabled >/dev/null && selinuxenabled; then
DCKR_VER=$(docker -v|
awk '$1 == "Docker" && $2 == "version" {split($3,ver,".");print ver[1]"."ver[2]}')
DCKR_MAJ=${DCKR_VER%.*}
DCKR_MIN=${DCKR_VER#*.}
if [ "${DCKR_MAJ}" -eq 1 ] && [ "${DCKR_MIN}" -ge 7 ] ||
[ "${DCKR_MAJ}" -gt 1 ]; then
V_OPTS=:z
else
for d in "${PWD}" "${HOME}/.m2"; do
ctx=$(stat --printf='%C' "$d"|cut -d':' -f3)
if [ "$ctx" != svirt_sandbox_file_t ] && [ "$ctx" != container_file_t ]; then
printf 'INFO: SELinux is enabled.\n'
printf '\tMounted %s may not be accessible to the container.\n' "$d"
printf 'INFO: If so, on the host, run the following command:\n'
printf '\t# chcon -Rt svirt_sandbox_file_t %s\n' "$d"
fi
done
fi
fi
else # boot2docker uid and gid
USER_NAME=$USER
USER_ID=1000
GROUP_ID=50
fi
docker build -t "hadoop-build-${USER_ID}" - <<UserSpecificDocker
FROM hadoop-build
RUN groupadd --non-unique -g ${GROUP_ID} ${USER_NAME}
RUN useradd -g ${GROUP_ID} -u ${USER_ID} -k /root -m ${USER_NAME}
RUN echo "${USER_NAME} ALL=NOPASSWD: ALL" > "/etc/sudoers.d/hadoop-build-${USER_ID}"
ENV HOME /home/${USER_NAME}
UserSpecificDocker
#If this env varible is empty, docker will be started
# in non interactive mode
DOCKER_INTERACTIVE_RUN=${DOCKER_INTERACTIVE_RUN-"-i -t"}
# By mapping the .m2 directory you can do an mvn install from
# within the container and use the result on your normal
# system. And this also is a significant speedup in subsequent
# builds because the dependencies are downloaded only once.
docker run --rm=true $DOCKER_INTERACTIVE_RUN \
-v "${PWD}:/home/${USER_NAME}/hadoop${V_OPTS:-}" \
-w "/home/${USER_NAME}/hadoop" \
-v "${HOME}/.m2:/home/${USER_NAME}/.m2${V_OPTS:-}" \
-u "${USER_NAME}" \
"hadoop-build-${USER_ID}" "$@"
此处可能存在不合适展示的内容,页面不予展示。您可通过相关编辑功能自查并修改。
如您确认内容无涉及 不当用语 / 纯广告导流 / 暴力 / 低俗色情 / 侵权 / 盗版 / 虚假 / 无价值内容或违法国家有关法律法规的内容,可点击提交进行申诉,我们将尽快为您处理。