Hadoop-Yarn-启动篇

一、源码下载

下面是hadoop官方源码下载地址,我下载的是hadoop-3.2.4,那就一起来看下吧

Index of /dist/hadoop/core

二、脚本部分

1、start-yarn.sh

如果我们想单独启动Yarn会用到$HADOOP_HOME/sbin/start-yarn.sh,下面我们就看看start-yarn.sh的内容

TypeScript 复制代码
#!/usr/bin/env bash


## @description  usage info
## @audience     private
## @stability    evolving
## @replaceable  no
function hadoop_usage
{
  hadoop_generate_usage "${MYNAME}" false
}
#获取当前文件的文件名
MYNAME="${BASH_SOURCE-$0}"

bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)

#找到 libexec
if [[ -n "${HADOOP_HOME}" ]]; then
  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
# shellcheck disable=SC2034
# shellcheck扫描出的每一个错误都会有一个编号,以SC+4位数字组成
# SC2034表示:变量赋值后未被使用
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/yarn-config.sh" ]]; then
  . "${HADOOP_LIBEXEC_DIR}/yarn-config.sh"
else
  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/yarn-config.sh." 2>&1
  exit 1
fi

HADOOP_JUMBO_RETCOUNTER=0

# start resourceManager
# 判断集群中的yarn是否开启了HA,这里我们先看不开启HA的部分
HARM=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.enabled 2>&-)
if [[ ${HARM} = "false" ]]; then
  echo "Starting resourcemanager"
  #到这里会不会想 hadoop_uservar_su 是个什么东西?
  #hadoop_uservar_su其实是hadoop源码中common 模块中的hadoop-functions.sh中定义的一个函数
  #以下还有很多类似的函数,基本都在这里面定义,下面让我们看看hadoop-functions.sh
  hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --daemon start \
      resourcemanager
  (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
else
  logicals=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey yarn.resourcemanager.ha.rm-ids 2>&-)
  logicals=${logicals//,/ }
  for id in ${logicals}
  do
      rmhost=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey "yarn.resourcemanager.hostname.${id}" 2>&-)
      RMHOSTS="${RMHOSTS} ${rmhost}"
  done
  echo "Starting resourcemanagers on [${RMHOSTS}]"
  hadoop_uservar_su yarn resourcemanager "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --daemon start \
      --workers \
      --hostnames "${RMHOSTS}" \
      resourcemanager
  (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
fi

# start nodemanager
echo "Starting nodemanagers"
hadoop_uservar_su yarn nodemanager "${HADOOP_YARN_HOME}/bin/yarn" \
    --config "${HADOOP_CONF_DIR}" \
    --workers \
    --daemon start \
    nodemanager
(( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))


# start proxyserver
PROXYSERVER=$("${HADOOP_HDFS_HOME}/bin/hdfs" getconf -confKey  yarn.web-proxy.address 2>&- | cut -f1 -d:)
if [[ -n ${PROXYSERVER} ]]; then
 hadoop_uservar_su yarn proxyserver "${HADOOP_YARN_HOME}/bin/yarn" \
      --config "${HADOOP_CONF_DIR}" \
      --workers \
      --hostnames "${PROXYSERVER}" \
      --daemon start \
      proxyserver
 (( HADOOP_JUMBO_RETCOUNTER=HADOOP_JUMBO_RETCOUNTER + $? ))
fi

exit ${HADOOP_JUMBO_RETCOUNTER}

整体看start-yarn.sh可以看出,总共需要启动三个角色即:resourceManager、nodemanager、proxyserver。我们先看resourceManager

启动resourceManager会判断Yarn是否开启了HA,这里我们只分析没有开启HA的情况

2、hadoop-functions.sh

hadoop-functions.sh总共有2700多行,这里只列举Yarn启动期间涉及的函数,有兴趣的同学可以自行阅读。

TypeScript 复制代码
#!/usr/bin/env bash

## @description  当以root身份运行时,通过su执行命令,并额外支持可能以root身份合法启动的命令
## @description  (例如,datanode)(这将由start-*/stop-*脚本使用。)
## @audience     private
## @stability    evolving
## @replaceable  no
## @param        user
## @param        commandstring
## @return       exitstatus
function hadoop_uservar_su
{

  declare program=$1
  declare command=$2
  shift 2

  declare uprogram
  declare ucommand
  declare uvar
  declare svar

  if hadoop_privilege_check; then
    uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)

    svar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" SECURE_USER)

    if [[ -n "${!uvar}" ]]; then
      hadoop_su "${!uvar}" "$@"
    elif [[ -n "${!svar}" ]]; then
      ## 如果我们在这里,那么没有定义USER的SECURE_USER我们已经有特权了,
      ## 所以只需运行该命令,并希望一切顺利
      "$@"
    else
      hadoop_error "ERROR: Attempting to operate on ${program} ${command} as root"
      hadoop_error "ERROR: but there is no ${uvar} defined. Aborting operation."
      return 1
    fi
  else
    "$@"
  fi
}

## @description  生成自定义子命令var
## @audience     public
## @stability    stable
## @replaceable  yes
## @param        command
## @param        subcommand
## @param        customid
## @return       string
function hadoop_build_custom_subcmd_var
{
  declare program=$1
  declare command=$2
  declare custom=$3
  declare uprogram
  declare ucommand

  if [[ -z "${BASH_VERSINFO[0]}" ]] \
     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
  else
    uprogram=${program^^}
    ucommand=${command^^}
  fi

  echo "${uprogram}_${ucommand}_${custom}"
}


## @description  如果找到给定的用户,则在以root身份运行时通过su执行命令;
## @description  如果没有找到,则退出并失败。
## @description  否则只需运行它。(这将由start-*/stop-*脚本使用。)
## @audience     private
## @stability    evolving
## @replaceable  yes
## @param        user
## @param        commandstring
## @return       exitstatus
function hadoop_su
{
  declare user=$1
  shift

  if hadoop_privilege_check; then
    if hadoop_verify_user_resolves user; then
       su -l "${user}" -- "$@"
    else
      hadoop_error "ERROR: Refusing to run as root: ${user} account is not found. Aborting."
      return 1
    fi
  else
    "$@"
  fi
}

## @description  给定一个文件名或目录,返回它的绝对版本
## @description  这是readlink的替代品,它是不可移植的
## @audience     public
## @stability    stable
## @param        fsobj
## @replaceable  no
## @return       0 success
## @return       1 failure
## @return       stdout abspath
function hadoop_abs
{
  declare obj=$1
  declare dir
  declare fn
  declare dirret

  if [[ ! -e ${obj} ]]; then
    return 1
  elif [[ -d ${obj} ]]; then
    dir=${obj}
  else
    dir=$(dirname -- "${obj}")
    fn=$(basename -- "${obj}")
    fn="/${fn}"
  fi

  dir=$(cd -P -- "${dir}" >/dev/null 2>/dev/null && pwd -P)
  dirret=$?
  if [[ ${dirret} = 0 ]]; then
    echo "${dir}${fn}"
    return 0
  fi
  return 1
}


## @description  验证是否允许${USER}执行给定的子命令。
## @audience     public
## @stability    stable
## @replaceable  yes
## @param        subcommand
## @return       1 on no re-exec needed
## @return       0 on need to re-exec
function hadoop_need_reexec
{
  declare program=$1
  declare command=$2
  declare uvar


  #默认false
  if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
    return 1
  fi

  # if we have privilege, and the _USER is defined, and _USER is
  # set to someone who isn't us, then yes, we should re-exec.
  # otherwise no, don't re-exec and let the system deal with it.

  if hadoop_privilege_check; then
    uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)
    if [[ -n ${!uvar} ]]; then
      if [[ ${!uvar} !=  "${USER}" ]]; then
        return 0
      fi
    fi
  fi
  return 1
}


## @description  验证是否允许${USER}执行给定的子命令
## @audience     public
## @stability    stable
## @replaceable  yes
## @param        command
## @param        subcommand
## @return       return 0 on success
## @return       exit 1 on failure
function hadoop_verify_user_perm
{
  declare program=$1
  declare command=$2
  declare uvar

  uvar=$(hadoop_build_custom_subcmd_var "${program}" "${command}" USER)

  if [[ -n ${!uvar} ]]; then
    if [[ ${!uvar} !=  "${USER}" ]]; then
      hadoop_error "ERROR: ${command} can only be executed by ${!uvar}."
      exit 1
    fi
  fi
  return 0
}


## @description  如果HADOOP_SUBCMD_SUPPORTDAEMONIZATION为false,
## @description  则将HADOOP_CLIENT_OPTS变量添加到HADOOP_OPTS
## @audience     public
## @stability    stable
## @replaceable  yes
function hadoop_add_client_opts
{
  if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = false
     || -z "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" ]]; then
    hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
    HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
  fi
}

## @description  要处理的实用程序例程--工人模式
## @audience     private
## @stability    evolving
## @replaceable  yes
## @param        commandarray
function hadoop_common_worker_mode_execute
{
  #
  # 输入应该是用户以数组形式给出的命令行
  #
  local argv=("$@")

  # 如果--workers仍在命令行上,请将其删除以防止循环。
  # 还可以删除--hostname和--hosts以及arg值
  local argsSize=${#argv[@]};
  for (( i = 0; i < argsSize; i++ ))
  do
    if [[ "${argv[$i]}" =~ ^--workers$ ]]; then
      unset argv[$i]
    elif [[ "${argv[$i]}" =~ ^--hostnames$ ]] ||
      [[ "${argv[$i]}" =~ ^--hosts$ ]]; then
      unset argv[$i];
      let i++;
      unset argv[$i];
    fi
  done
  if [[ ${QATESTMODE} = true ]]; then
    echo "${argv[@]}"
    return
  fi
  hadoop_connect_to_hosts -- "${argv[@]}"
}


## @description  连接到${HADOOP_WORKERS}或${HADOOP_WORKER_NAMES}并执行命令。
## @audience     private
## @stability    evolving
## @replaceable  yes
## @param        command
## @param        [...]
function hadoop_connect_to_hosts
{
  # shellcheck disable=SC2124
  local params="$@"
  local worker_file
  local tmpslvnames

  #
  # ssh (or whatever) to a host
  #
  # 用户可以指定主机名或主机名所在的文件(不能同时指定两者)
  if [[ -n "${HADOOP_WORKERS}" && -n "${HADOOP_WORKER_NAMES}" ]] ; then
    hadoop_error "ERROR: Both HADOOP_WORKERS and HADOOP_WORKER_NAMES were defined. Aborting."
    exit 1
  elif [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
    if [[ -n "${HADOOP_WORKERS}" ]]; then
      worker_file=${HADOOP_WORKERS}
    elif [[ -f "${HADOOP_CONF_DIR}/workers" ]]; then
      worker_file=${HADOOP_CONF_DIR}/workers
    elif [[ -f "${HADOOP_CONF_DIR}/slaves" ]]; then
      hadoop_error "WARNING: 'slaves' file has been deprecated. Please use 'workers' file instead."
      worker_file=${HADOOP_CONF_DIR}/slaves
    fi
  fi

  # 如果pdsh可用,让我们使用它。否则默认为ssh循环。(啊)
  if [[ -e '/usr/bin/pdsh' ]]; then
    if [[ -z "${HADOOP_WORKER_NAMES}" ]] ; then
      # 如果给了我们一个文件,就让pdsh来处理它
      # shellcheck disable=SC2086
      PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
      -f "${HADOOP_SSH_PARALLEL}" -w ^"${worker_file}" $"${@// /\\ }" 2>&1
    else
      # pdsh-arg主机列表中不允许有空格
      # shellcheck disable=SC2086
      tmpslvnames=$(echo ${HADOOP_WORKER_NAMES} | tr -s ' ' ,)
      PDSH_SSH_ARGS_APPEND="${HADOOP_SSH_OPTS}" pdsh \
        -f "${HADOOP_SSH_PARALLEL}" \
        -w "${tmpslvnames}" $"${@// /\\ }" 2>&1
    fi
  else
    if [[ -z "${HADOOP_WORKER_NAMES}" ]]; then
      HADOOP_WORKER_NAMES=$(sed 's/#.*$//;/^$/d' "${worker_file}")
    fi
    hadoop_connect_to_hosts_without_pdsh "${params}"
  fi
}


## @description  连接到${HADOOP_WORKER_NAMES}并在不支持pdsh的环境下执行命令
## @audience     private
## @stability    evolving
## @replaceable  yes
## @param        command
## @param        [...]
function hadoop_connect_to_hosts_without_pdsh
{
  # shellcheck disable=SC2124
  local params="$@"
  local workers=(${HADOOP_WORKER_NAMES})
  for (( i = 0; i < ${#workers[@]}; i++ ))
  do
    if (( i != 0 && i % HADOOP_SSH_PARALLEL == 0 )); then
      wait
    fi
    # shellcheck disable=SC2086
    hadoop_actual_ssh "${workers[$i]}" ${params} &
  done
  wait
}

## @description  通过ssh,登录"hostname"并运行"command"`
## @audience     private
## @stability    evolving
## @replaceable  yes
## @param        hostname
## @param        command
## @param        [...]
function hadoop_actual_ssh
{
  # 我们将此函数传递给xargs,应获取hostname,然后是命令行的其余部分
  local worker=$1
  shift

  # shellcheck disable=SC2086
  ssh ${HADOOP_SSH_OPTS} ${worker} $"${@// /\\ }" 2>&1 | sed "s/^/$worker: /"
}


## @description  通用shell脚本opton解析器。将HADOOP_PARSE_COUNTER设置为调用方应移位的数字
## @audience     private
## @stability    evolving
## @replaceable  yes
## @param        [parameters, typically "$@"]
function hadoop_parse_args
{
  HADOOP_DAEMON_MODE="default"
  HADOOP_PARSE_COUNTER=0

  # 并非所有命令都支持此处支持的所有选项,这些选项是:
  hadoop_add_option "--config dir" "Hadoop config directory"
  hadoop_add_option "--debug" "turn on shell script debug mode"
  hadoop_add_option "--help" "usage information"

  while true; do
    hadoop_debug "hadoop_parse_args: processing $1"
    case $1 in
      --buildpaths)
        HADOOP_ENABLE_BUILD_PATHS=true
        shift
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
      ;;
      --config)
        shift
        confdir=$1
        shift
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
        if [[ -d "${confdir}" ]]; then
          HADOOP_CONF_DIR="${confdir}"
        elif [[ -z "${confdir}" ]]; then
          hadoop_error "ERROR: No parameter provided for --config "
          hadoop_exit_with_usage 1
        else
          hadoop_error "ERROR: Cannot find configuration directory \"${confdir}\""
          hadoop_exit_with_usage 1
        fi
      ;;
      --daemon)
        shift
        HADOOP_DAEMON_MODE=$1
        shift
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
        if [[ -z "${HADOOP_DAEMON_MODE}" || \
          ! "${HADOOP_DAEMON_MODE}" =~ ^st(art|op|atus)$ ]]; then
          hadoop_error "ERROR: --daemon must be followed by either \"start\", \"stop\", or \"status\"."
          hadoop_exit_with_usage 1
        fi
      ;;
      --debug)
        shift
        HADOOP_SHELL_SCRIPT_DEBUG=true
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
      ;;
      --help|-help|-h|help|--h|--\?|-\?|\?)
        hadoop_exit_with_usage 0
      ;;
      --hostnames)
        shift
        HADOOP_WORKER_NAMES="$1"
        shift
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
      ;;
      --hosts)
        shift
        hadoop_populate_workers_file "$1"
        shift
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
      ;;
      --loglevel)
        shift
        # shellcheck disable=SC2034
        HADOOP_LOGLEVEL="$1"
        shift
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+2))
      ;;
      --reexec)
        shift
        if [[ "${HADOOP_REEXECED_CMD}" = true ]]; then
          hadoop_error "ERROR: re-exec fork bomb prevention: --reexec already called"
          exit 1
        fi
        HADOOP_REEXECED_CMD=true
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
      ;;
      --workers)
        shift
        # shellcheck disable=SC2034
        HADOOP_WORKER_MODE=true
        ((HADOOP_PARSE_COUNTER=HADOOP_PARSE_COUNTER+1))
      ;;
      *)
        break
      ;;
    esac
  done

  hadoop_debug "hadoop_parse: asking caller to skip ${HADOOP_PARSE_COUNTER}"
}


## @description  将自定义(程序)_(命令)_OPTS添加到HADOOP_OPTS。
## @description  还处理3.x之前版本中不推荐使用的案例。
## @audience     public
## @stability    evolving
## @replaceable  yes
## @param        program
## @param        subcommand
## @return       will exit on failure conditions
function hadoop_subcommand_opts
{
  declare program=$1
  declare command=$2
  declare uvar
  declare depvar
  declare uprogram
  declare ucommand

  if [[ -z "${program}" || -z "${command}" ]]; then
    return 1
  fi

  # bash 4 and up have built-in ways to upper and lower
  # case the contents of vars.  This is faster than
  # calling tr.

  ## We don't call hadoop_build_custom_subcmd_var here
  ## since we need to construct this for the deprecation
  ## cases. For Hadoop 4.x, this needs to get cleaned up.

  if [[ -z "${BASH_VERSINFO[0]}" ]] \
     || [[ "${BASH_VERSINFO[0]}" -lt 4 ]]; then
    uprogram=$(echo "${program}" | tr '[:lower:]' '[:upper:]')
    ucommand=$(echo "${command}" | tr '[:lower:]' '[:upper:]')
  else
    uprogram=${program^^}
    ucommand=${command^^}
  fi

  uvar="${uprogram}_${ucommand}_OPTS"

  # Let's handle all of the deprecation cases early
  # HADOOP_NAMENODE_OPTS -> HDFS_NAMENODE_OPTS

  depvar="HADOOP_${ucommand}_OPTS"

  if [[ "${depvar}" != "${uvar}" ]]; then
    if [[ -n "${!depvar}" ]]; then
      hadoop_deprecate_envvar "${depvar}" "${uvar}"
    fi
  fi

  if [[ -n ${!uvar} ]]; then
    hadoop_debug "Appending ${uvar} onto HADOOP_OPTS"
    HADOOP_OPTS="${HADOOP_OPTS} ${!uvar}"
    return 0
  fi
}



## @description 处理主程序项中的子命令
## @audience private
## @stability evolving
## @replaceable yes
function hadoop_generic_java_subcmd_handler
{
  declare priv_outfile
  declare priv_errfile
  declare priv_pidfile
  declare daemon_outfile
  declare daemon_pidfile
  declare secureuser

  # 确定守护程序是否将在安全模式下运行的默认/预期方式由hadoop_detect_priv_subcmd定义。
  # 如果返回true,则设置安全用户var并告诉世界我们处于安全模式

  if hadoop_detect_priv_subcmd "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"; then
    HADOOP_SUBCMD_SECURESERVICE=true
    secureuser=$(hadoop_build_custom_subcmd_var "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}" SECURE_USER)

    if ! hadoop_verify_user_resolves "${secureuser}"; then
      hadoop_error "ERROR: User defined in ${secureuser} (${!secureuser}) does not exist. Aborting."
      exit 1
    fi

    HADOOP_SECURE_USER="${!secureuser}"
  fi

  # 检查我们是否在安全模式下运行。
  # 从上面的内容来看,第三方可以做一些不同的事情------安全服务需要一些额外的设置------
  # 如果是,那么我们需要定义所有的priv和daemon内容
  # 如果不是,那么我们只需要定义daemon的内容。
  # 请注意,两者之间的守护进程变量有目的地不同

  if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then

    hadoop_subcommand_secure_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"

    hadoop_verify_secure_prereq
    hadoop_setup_secure_service
    priv_outfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
    priv_errfile="${HADOOP_LOG_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.err"
    priv_pidfile="${HADOOP_PID_DIR}/privileged-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
    daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
    daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
  else
    daemon_outfile="${HADOOP_LOG_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.out"
    daemon_pidfile="${HADOOP_PID_DIR}/hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}.pid"
  fi

  # 我们真的处于守护进程模式吗?
  # 如果是,请使用守护程序记录器和相应的日志文件。
  if [[ "${HADOOP_DAEMON_MODE}" != "default" ]]; then
    HADOOP_ROOT_LOGGER="${HADOOP_DAEMON_ROOT_LOGGER}"
    if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
      HADOOP_LOGFILE="hadoop-${HADOOP_SECURE_USER}-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
    else
      HADOOP_LOGFILE="hadoop-${HADOOP_IDENT_STRING}-${HADOOP_SUBCMD}-${HOSTNAME}.log"
    fi
  fi

  # 完成环境定义:系统属性、env-var、类路径等。
  hadoop_finalize

  # 完成启动守护进程的艰巨工作,或者只是执行我们的交互式java类
  if [[ "${HADOOP_SUBCMD_SUPPORTDAEMONIZATION}" = true ]]; then
    if [[ "${HADOOP_SUBCMD_SECURESERVICE}" = true ]]; then
      hadoop_secure_daemon_handler \
        "${HADOOP_DAEMON_MODE}" \
        "${HADOOP_SUBCMD}" \
        "${HADOOP_SECURE_CLASSNAME}" \
        "${daemon_pidfile}" \
        "${daemon_outfile}" \
        "${priv_pidfile}" \
        "${priv_outfile}" \
        "${priv_errfile}" \
        "${HADOOP_SUBCMD_ARGS[@]}"
    else
      hadoop_daemon_handler \
        "${HADOOP_DAEMON_MODE}" \
        "${HADOOP_SUBCMD}" \
        "${HADOOP_CLASSNAME}" \
        "${daemon_pidfile}" \
        "${daemon_outfile}" \
        "${HADOOP_SUBCMD_ARGS[@]}"
    fi
    exit $?
  else
    hadoop_java_exec "${HADOOP_SUBCMD}" "${HADOOP_CLASSNAME}" "${HADOOP_SUBCMD_ARGS[@]}"
  fi
}

总结下来就是:切换到yarn用户执行

"${HADOOP_YARN_HOME}/bin/yarn" \

--config "${HADOOP_CONF_DIR}" \

--daemon start \

resourcemanager

下面我们看下yarn命令中的实现

3、yarn命令

TypeScript 复制代码
#!/usr/bin/env bash


#正在执行的脚本的名称
HADOOP_SHELL_EXECNAME="yarn"
#MYNAME会得到当前脚本名称,即:yarn
MYNAME="${BASH_SOURCE-$0}"

## @description  构建yarn命令的用法文本
## @audience     public
## @stability    stable
## @replaceable  no
function hadoop_usage
{
  hadoop_add_option "--buildpaths" "attempt to add class files from build tree"
  hadoop_add_option "--daemon (start|status|stop)" "operate on a daemon"
  hadoop_add_option "--hostnames list[,of,host,names]" "hosts to use in worker mode"
  hadoop_add_option "--loglevel level" "set the log4j level for this command"
  hadoop_add_option "--hosts filename" "list of hosts to use in worker mode"
  hadoop_add_option "--workers" "turn on worker mode"

  hadoop_add_subcommand "app|application" client "prints application(s) report/kill application/manage long running application"
  hadoop_add_subcommand "applicationattempt" client "prints applicationattempt(s) report"
  hadoop_add_subcommand "classpath" client "prints the class path needed to get the hadoop jar and the required libraries"
  hadoop_add_subcommand "cluster" client "prints cluster information"
  hadoop_add_subcommand "container" client "prints container(s) report"
  hadoop_add_subcommand "daemonlog" admin "get/set the log level for each daemon"
  hadoop_add_subcommand "envvars" client "display computed Hadoop environment variables"
  hadoop_add_subcommand "jar <jar>" client "run a jar file"
  hadoop_add_subcommand "logs" client "dump container logs"
  hadoop_add_subcommand "node" admin "prints node report(s)"
  hadoop_add_subcommand "nodemanager" daemon "run a nodemanager on each worker"
  hadoop_add_subcommand "proxyserver" daemon "run the web app proxy server"
  hadoop_add_subcommand "queue" client "prints queue information"
  hadoop_add_subcommand "registrydns" daemon "run the registry DNS server"
  hadoop_add_subcommand "resourcemanager" daemon "run the ResourceManager"
  hadoop_add_subcommand "rmadmin" admin "admin tools"
  hadoop_add_subcommand "router" daemon "run the Router daemon"
  hadoop_add_subcommand "schedulerconf" client "Updates scheduler configuration"
  hadoop_add_subcommand "scmadmin" admin "SharedCacheManager admin tools"
  hadoop_add_subcommand "sharedcachemanager" daemon "run the SharedCacheManager daemon"
  hadoop_add_subcommand "timelinereader" client "run the timeline reader server"
  hadoop_add_subcommand "timelineserver" daemon "run the timeline server"
  hadoop_add_subcommand "top" client "view cluster information"
  hadoop_add_subcommand "nodeattributes" client "node attributes cli client"
  hadoop_add_subcommand "version" client "print the version"
  hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
}

## @description  yarn命令的默认命令处理程序
## @audience     public
## @stability    stable
## @replaceable  no
## @param        CLI arguments
function yarncmd_case
{
  subcmd=$1
  shift

  case ${subcmd} in
    app|application|applicationattempt|container)
      HADOOP_CLASSNAME=org.apache.hadoop.yarn.client.cli.ApplicationCLI
      set -- "${subcmd}" "$@"
      HADOOP_SUBCMD_ARGS=("$@")
      local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\
${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\
${HADOOP_HDFS_HOME}/${HDFS_DIR},\
${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\
${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\
${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
      hadoop_translate_cygwin_path sld
      hadoop_add_param HADOOP_OPTS service.libdir "-Dservice.libdir=${sld}"
    ;;

    #......省略......

    nodemanager)
      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
      hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/*"
      hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*"
      #NodeManager启动类
      HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.nodemanager.NodeManager'
      # Backwards compatibility
      if [[ -n "${YARN_NODEMANAGER_HEAPSIZE}" ]]; then
        HADOOP_HEAPSIZE_MAX="${YARN_NODEMANAGER_HEAPSIZE}"
      fi
    ;;
    proxyserver)
      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
      #WebAppProxyServer启动类
      HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer'
      # Backwards compatibility
      if [[ -n "${YARN_PROXYSERVER_HEAPSIZE}" ]]; then
        HADOOP_HEAPSIZE_MAX="${YARN_PROXYSERVER_HEAPSIZE}"
      fi
    ;;
    resourcemanager)
      HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
      hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/*"
      hadoop_add_classpath "$HADOOP_YARN_HOME/$YARN_DIR/timelineservice/lib/*"
      #ResourceManager启动类
      HADOOP_CLASSNAME='org.apache.hadoop.yarn.server.resourcemanager.ResourceManager'
      # Backwards compatibility
      if [[ -n "${YARN_RESOURCEMANAGER_HEAPSIZE}" ]]; then
        HADOOP_HEAPSIZE_MAX="${YARN_RESOURCEMANAGER_HEAPSIZE}"
      fi
      local sld="${HADOOP_YARN_HOME}/${YARN_DIR},\
${HADOOP_YARN_HOME}/${YARN_LIB_JARS_DIR},\
${HADOOP_HDFS_HOME}/${HDFS_DIR},\
${HADOOP_HDFS_HOME}/${HDFS_LIB_JARS_DIR},\
${HADOOP_COMMON_HOME}/${HADOOP_COMMON_DIR},\
${HADOOP_COMMON_HOME}/${HADOOP_COMMON_LIB_JARS_DIR}"
      #详细请看第2步中 hadoop_translate_cygwin_path  的处理逻辑
      hadoop_translate_cygwin_path sld
      hadoop_add_param HADOOP_OPTS service.libdir "-Dservice.libdir=${sld}"
    ;;
    
    #......省略......

    *)
      HADOOP_CLASSNAME="${subcmd}"
      if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then
        hadoop_exit_with_usage 1
      fi
    ;;
  esac
}

# 找到libexec...
if [[ -n "${HADOOP_HOME}" ]]; then
  HADOOP_DEFAULT_LIBEXEC_DIR="${HADOOP_HOME}/libexec"
else
  bin=$(cd -P -- "$(dirname -- "${MYNAME}")" >/dev/null && pwd -P)
  HADOOP_DEFAULT_LIBEXEC_DIR="${bin}/../libexec"
fi

HADOOP_LIBEXEC_DIR="${HADOOP_LIBEXEC_DIR:-$HADOOP_DEFAULT_LIBEXEC_DIR}"
HADOOP_NEW_CONFIG=true
if [[ -f "${HADOOP_LIBEXEC_DIR}/yarn-config.sh" ]]; then
  # shellcheck source=./hadoop-yarn-project/hadoop-yarn/bin/yarn-config.sh
  . "${HADOOP_LIBEXEC_DIR}/yarn-config.sh"
else
  echo "ERROR: Cannot execute ${HADOOP_LIBEXEC_DIR}/yarn-config.sh." 2>&1
  exit 1
fi

# hadoop_abs 也是 第2步 hadoop-functions.sh 中的函数 ,感兴趣的可以看第2步
# hadoop_abs 的作用是:给定一个文件名或目录,返回它的绝对版本
MYNAME=$(hadoop_abs "${MYNAME}")

# 如果未指定参数,则显示用法
# $#表示执行脚本传入参数的个数
if [[ $# = 0 ]]; then
  hadoop_exit_with_usage 1
fi

# 获取参数
# shift 命令左移 shift执行后可以使 后面的参数向前移动,比如 shift 可以使 $2 成为 $1 , shift 3 可以使 $4 成为 $1
# 此时 HADOOP_SUBCMD=resourcemanager
HADOOP_SUBCMD=$1
shift

#检测用户是否可以执行给定的命令
#如果此节点是resourcemanager角色,那么就启动并退出脚本
#hadoop_uservar_su yarn resourcemanager yarn --reexec 
if hadoop_need_reexec yarn "${HADOOP_SUBCMD}"; then
  hadoop_uservar_su yarn "${HADOOP_SUBCMD}" \
    "${MYNAME}" \
    "--reexec" \
    "${HADOOP_USER_PARAMS[@]}"
  exit $?
fi

#验证是否允许 yarn 执行给定的子命令。
hadoop_verify_user_perm "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"

#得到子命令的所有参数,就像数组一样,可以得到某一个参数或全部参数
HADOOP_SUBCMD_ARGS=("$@")

#判断 yarn_subcommand_"${HADOOP_SUBCMD}" 函数是否存在
#如果是直接执行
#如果不是运行上面的 yarncmd_case 匹配到子命令进行执行
if declare -f yarn_subcommand_"${HADOOP_SUBCMD}" >/dev/null 2>&1; then
  hadoop_debug "Calling dynamically: yarn_subcommand_${HADOOP_SUBCMD} ${HADOOP_SUBCMD_ARGS[*]}"
  "yarn_subcommand_${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
else
  yarncmd_case "${HADOOP_SUBCMD}" "${HADOOP_SUBCMD_ARGS[@]}"
fi

#目前还不清楚YARN_CLIENT_OPTS是否真的是一个有用的东西,
#可以与HADOOP_CLIENT _OPTS分离。有人可能会使用它,
#所以在我们(潜在地)将其添加到命令行之前,
#我们不要弃用它,只需重写HADOOP_CLIENT_OPTS
if [[ -n "${YARN_CLIENT_OPTS}" ]]; then
  HADOOP_CLIENT_OPTS=${YARN_CLIENT_OPTS}
fi

#如果HADOOP_SUBCMD_SUPPORTDAEMONIZATION为false,则将HADOOP_CLIENT_OPTS变量添加到HADOOP_OPTS
hadoop_add_client_opts

#命令中包含了 --workers 参数,就代表着需要链接到其他节点机器上执行命令
#hadoop_common_worker_mode_execute 函数最终会通过 ssh 到远程节点执行命令 ,详见第2步
if [[ ${HADOOP_WORKER_MODE} = true ]]; then
  hadoop_common_worker_mode_execute "${HADOOP_YARN_HOME}/bin/yarn" "${HADOOP_USER_PARAMS[@]}"
  exit $?
fi

#将自定义(程序)_(命令)_OPTS添加到HADOOP_OPTS
hadoop_subcommand_opts "${HADOOP_SHELL_EXECNAME}" "${HADOOP_SUBCMD}"

# 此时所有内容都在全局中,因此调用泛型处理程序
# 这里会完成启动守护进程的艰巨工作,或者只是执行我们的交互式java类
hadoop_generic_java_subcmd_handler

下面会执行对应的java类,启动守护进程,我们看resourcemanager、nodemanager、proxyserver对应的java类,即:

org.apache.hadoop.yarn.server.resourcemanager.ResourceManager

org.apache.hadoop.yarn.server.nodemanager.NodeManager

org.apache.hadoop.yarn.server.webproxy.WebAppProxyServer

三、java部分

1、ResourceManager

1.1、main

java 复制代码
public static void main(String argv[]) {
    Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
    StringUtils.startupShutdownMessage(ResourceManager.class, argv, LOG);
    try {
      Configuration conf = new YarnConfiguration();
      GenericOptionsParser hParser = new GenericOptionsParser(conf, argv);
      argv = hParser.getRemainingArgs();
      // 如果格式化状态存储,则删除RMStateStore;否则正常启动
      if (argv.length >= 1) {
        if (argv[0].equals("-format-state-store")) {
          deleteRMStateStore(conf);
        } else if (argv[0].equals("-format-conf-store")) {
          deleteRMConfStore(conf);
        } else if (argv[0].equals("-remove-application-from-state-store")
            && argv.length == 2) {
          removeApplication(conf, argv[1]);
        } else {
          printUsage(System.err);
        }
      } else {
        ResourceManager resourceManager = new ResourceManager();
        //添加一个具有优先级的shutdownBook,优先级越高,运行得越早。
        //具有相同优先级的ShutdownHook以不确定的顺序运行。
        //
        //ShutdownHookManager允许以确定的顺序运行shutdownHook,优先考虑更高的优先级。
        //JVM以不确定的顺序或并行方式运行ShutdownHook。这个类注册一个JVM shutdownBook,
        //并根据优先级按顺序运行注册到它(这个类)的所有shutdownook。
        //除非一个钩子注册了通过addShutdownHook(Runnable,int,long,TimeUnit)
        //显式设置的关闭,否则分配给它的关闭时间由配置选项设置
        //
        //CompositeServiceShutdownHook 是 CompositeService的JVM Shutdown挂钩,
        //它将在JVM关闭的情况下优雅地停止给定的CompositeService。
        ShutdownHookManager.get().addShutdownHook(
          new CompositeServiceShutdownHook(resourceManager),
          SHUTDOWN_HOOK_PRIORITY);
        //初始化,详细看该类中的serviceInit(),也就是第1.2步
        resourceManager.init(conf);
        //启动resourceManager ,详细看该类的serviceStart(),也就是第1.3步
        resourceManager.start();
      }
    } catch (Throwable t) {
      LOG.error(FATAL, "Error starting ResourceManager", t);
      System.exit(-1);
    }
  }

1.2、serviceInit

java 复制代码
protected void serviceInit(Configuration conf) throws Exception {
    this.conf = conf;
    UserGroupInformation.setConfiguration(conf);
    //RMContextImpl类包含两个服务上下文。
    //  1、serviceContext:这些服务被称为Always-On服务。无论RM的HA状态如何,都需要始终运行的服务
    //  2、activeServiceCotext:活动服务上下文。只需要在活动RM上运行的服务。
    //注意:如果有任何新服务要添加到上下文中,请按照上面的描述将其添加到正确的上下文中。
    this.rmContext = new RMContextImpl();
    rmContext.setResourceManager(this);

    //实现ConfigurationProvider的基类。真正的ConfigurationProvider实现需要从中派生,并实现加载方法来实际加载配置。
    this.configurationProvider =
        ConfigurationProviderFactory.getConfigurationProvider(conf);
    this.configurationProvider.init(this.conf);
    rmContext.setConfigurationProvider(configurationProvider);

    //加载 core-site.xml
    loadConfigurationXml(YarnConfiguration.CORE_SITE_CONFIGURATION_FILE);

    //使用加载的core-site.xml进行refreshSuperUserGroupsConfiguration,
    //或者使用RM特定的配置先覆盖常见配置(如果存在)

    //查找名称以YarnConfiguration开头的所有配置。RM_PROXY_USER_PREFIX,
    //并通过将前缀替换为ProxyUsers为每个前缀添加一条记录。CONF_HADOOP_PROXYUSER
    RMServerUtils.processRMProxyUsersConf(conf);
    //使用属性的默认代理用户前缀刷新配置。
    ProxyUsers.refreshSuperUserGroupsConfiguration(this.conf);

    // 加载 yarn-site.xml
    loadConfigurationXml(YarnConfiguration.YARN_SITE_CONFIGURATION_FILE);

    //配置的健全性检查
    validateConfigs(this.conf);
    
    //登录前应设置HA配置
    //如果HA开启,验证Resource Manager HA的配置
    this.rmContext.setHAEnabled(HAUtil.isHAEnabled(this.conf));
    if (this.rmContext.isHAEnabled()) {
      HAUtil.verifyAndSetConfiguration(this.conf);
    }

    // 设置UGI并进行登录
    // 如果启用了安全性,请使用登录用户
    // 如果未启用安全性,则使用当前用户
    this.rmLoginUGI = UserGroupInformation.getCurrentUser();
    try {
      //详细见下方
      doSecureLogin();
    } catch(IOException ie) {
      throw new YarnRuntimeException("Failed to login", ie);
    }

    //使用setupDispatcher()注册所有AlwaysOn服务的处理程序
    //注册alwaysOn服务的处理程序
    rmDispatcher = setupDispatcher();
    addIfService(rmDispatcher);
    rmContext.setDispatcher(rmDispatcher);

    // 以下服务的顺序不应更改,因为服务将以相同的顺序启动
    // 由于选民服务需要初始化和启动管理服务,我们首先添加管理服务,然后添加选民服务

    adminService = createAdminService();
    addService(adminService);
    rmContext.setRMAdminService(adminService);

    //必须在管理员服务后添加选举人
    if (this.rmContext.isHAEnabled()) {
      //如果RM配置为使用嵌入的领导人选举人,则初始化领导人选举人。

      //判断两个配置是否开启
      //    1、yarn.resourcemanager.ha.automatic-failover.enabled 默认true (启用自动故障切换。默认情况下,仅当启用HA时才启用)
      //    2、yarn.resourcemanager.ha.automatic-failover.embedded 默认true (启用嵌入式自动故障切换。默认情况下,只有在启用HA时才会启用它。嵌入式elector依赖于RM状态存储来处理围栏,主要用于与ZKRMStateStore结合使用。)
      if (HAUtil.isAutomaticFailoverEnabled(conf)
          && HAUtil.isAutomaticFailoverEmbedded(conf)) {
        EmbeddedElector elector = createEmbeddedElector();
        addIfService(elector);
        rmContext.setLeaderElectorService(elector);
      }
    }

    rmContext.setYarnConfiguration(conf);

    //创建 RMActiveServices 的实例并对其进行初始化。
    //RMActiveServices处理RM中的所有活动服务。

    //创建StandByTransitionRunnable (这是一个线程)
    //它是将RM转换为备用状态的类。同一个StandByTransitionRunnable对象可以在多个线程中使用,
    //但只能运行一次。这是因为RM在转换到备用状态后可以返回到活动状态,
    //而在旧上下文中运行的相同RM无法再次转换到待机状态。
    //每次RM转换到活动状态时,都会创建一个新的可运行程序。

    //创建RMSecretManagerService,用于密钥管理

    //创建ContainerAllocationExpirer 用于容器的申请和到期管理

    //创建AMLivelinessMonitor AM的活动监控器

    //创建RMAppLifetimeMonitor 此服务将根据给定的生存期值监视应用程序。如果应用程序运行超过给定时间,则应用程序将被终止。

    //创建RMNodeLabelsManager 节点标签管理

    //创建NodeAttributesManager 此类捕获属性与RM的所有交互。
    
    //创建AllocationTagsManager 应用程序/容器标签和节点/机架之间的内存映射。受约束的亲和性/反亲和性和基数放置所需。

    //创建PlacementConstraintManagerService 用于存储和检索放置约束的接口

    //在此处添加资源配置文件,因为它已被AbstractYarnScheduler使用

    //创建ResourceProfilesManager 资源配置文件管理器的接口。提供一个接口以获取可用配置文件和一些辅助函数的列表。

    //创建MultiNodeSortingManager 节点排序管理器,用于运行所有排序线程和策略。扩展SchedulerNode

    //创建RMDelegatedNodeLabelsUpdater 定期更新ResourceManager的节点标签映射。它从RMNodeLabelsMappingProvider收集节点标签,并通过RMNodeLabels Manager更新节点标签映射。当配置"yars.node labels.configuration type"设置为"委派集中式"时,将启用此服务。

    //如果设置了yarn.resourcemanager.recovery.enabled 默认false (使RM在启动后恢复状态。如果为true,则必须指定yarn.resourcemanager.store.class。)则 创建 RMStateStore
    //RMStateStore 实现ResourceManager状态存储的基类。负责异步通知和与YARN对象的接口。真正的存储实现需要从中派生,并实现阻塞存储和加载方法来实际存储和加载状态。

    //确定UserGroupInformation是使用Kerberos来确定用户身份,还是依赖于简单身份验证
    //如果是Kerberos,则要创建DelegationTokenRenewer(续订应用程序委派令牌的服务)
    
    //为NodesListManager注册事件处理程序
    //NodesListManager:负责读取主机/排除文件以限制对RM的访问等

    //初始化调度程序
    //创建ResourceScheduler YarnScheduler的子类(使用此接口与调度程序进行通信,以便分配资源、清理资源。)
    //可以通过 yarn.resourcemanager.scheduler.class 设置 默认是 容量调度,即:org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler
    
    //注册RmAppEvents的事件处理程序
    //创建ApplicationEventDispatcher 

    //注册RmAppAttemptEvents的事件处理程序
    //创建ApplicationAttemptEventDispatcher

    //为RmNodes注册事件处理程序
    //创建NodeEventDispatcher

    //创建NMLivelinessMonitor 即 NodeManager生命周期监听
    //nm.liveness-monitor.expiry-interval-ms 默认 600000 ms 600s 
    //没 nm.liveness-monitor.expiry-interval-ms / 3 时间 监听一次 即默认 200s
    
    //创建ResourceTrackerService 资源跟踪

    //初始化指标系统、JVM和日志记录相关指标

    //创建JvmPauseMonitor 
    //该类设置一个简单的线程,该线程在睡眠短时间间隔的循环中运行。
    //如果睡眠时间明显长于其目标时间,则表示JVM或主机已暂停处理,
    //这可能会导致其他问题。如果检测到这样的暂停,线程将记录一条消息。

    //初始化预订系统
    //通过yarn.resourcemanager.reservation-system.enable设置启用标识 默认false 
    //如果启用了,创建ReservationSystem
    //可以通过yarn.resourcemanager.reservation-system.class设置启动类
    //默认是CapacityReservationSystem.class

    //创建ApplicationMasterService 
    
    //创建ApplicationACLsManager

    //创建QueueACLsManager

    //创建RMAppManager (此类管理资源管理器的应用程序列表。)

    //注册RMAppManagerEvents的事件处理程序

    //创建ClientRMService (资源管理器的客户端接口。该模块处理从客户端到资源管理器的所有rpc接口。)
    
    //创建ApplicationMasterLauncher

    //创建RMNMInfo (JMX bean列出所有节点管理器的状态)

    //是否在ResourceManager上启用服务rest api
    //可以通过 yarn.webapp.api-service.enable 设置 默认false
    //如果启用,则创建SystemServiceManager 默认实例化类为org.apache.hadoop.yarn.service.client.SystemServiceManagerImpl
    //SystemServiceManager实现
    //扫描配置系统服务路径
    //服务路径结构如下:
    //SYSTEM_SERVICE_DIR_PATH
    //|---- sync
    //|     |--- user1
    //|     |    |---- service1.yarnfile
    //|     |    |---- service2.yarnfile
    //|     |--- user2
    //|     |    |---- service1.yarnfile
    //|     |    ....
    //|     |
    //|---- async
    //|     |--- user3
    //|     |    |---- service1.yarnfile
    //|     |    |---- service2.yarnfile
    //|     |--- user4
    //|     |    |---- service1.yarnfile
    //|     |    ....
    //|     |
    //sync: 这些服务在服务同步启动时启动。这是一个阻塞服务启动
    //async: 这些服务在单独的线程中启动,在服务启动后没有任何延迟。非阻塞服务启动

    createAndInitActiveServices(false);


    //获取用于绑定的URL(RM的实际绑定地址)
    //其中可以指定绑定主机名以覆盖webAppURLWithoutScheme中的主机名。将使用webAppURLWithoutScheme中指定的端口。
    //可以通过 yarn.resourcemanager.bind-host 设置
    webAppAddress = WebAppUtils.getWebAppBindURL(this.conf,
                      YarnConfiguration.RM_BIND_HOST,
                      WebAppUtils.getRMWebAppURLWithoutScheme(this.conf));

    //创建RMApplicationHistoryWriter
    //ResourceManager使用这个类来编写RMApp、RMAppAttempt和RMContainer的信息。
    //    这些API是非阻塞的,只需安排一个写入历史事件。
    //    一个自包含的调度器向量将在单独的线程中处理事件,并提取将要持久化的所需字段。
    //    然后,提取的信息将通过ApplicationHistoryStore的实现持久化
    RMApplicationHistoryWriter rmApplicationHistoryWriter =
        createRMApplicationHistoryWriter();
    addService(rmApplicationHistoryWriter);
    rmContext.setRMApplicationHistoryWriter(rmApplicationHistoryWriter);

    //首先初始化RM时间线收集器,以便系统度量发布者可以绑定到它
    if (YarnConfiguration.timelineServiceV2Enabled(this.conf)) {
      RMTimelineCollectorManager timelineCollectorManager =
          createRMTimelineCollectorManager();
      addService(timelineCollectorManager);
      rmContext.setRMTimelineCollectorManager(timelineCollectorManager);
    }

    //创建CombinedSystemMetricsPublisher
    //度量系统发布者
    SystemMetricsPublisher systemMetricsPublisher =
        createSystemMetricsPublisher();
    addIfService(systemMetricsPublisher);
    rmContext.setSystemMetricsPublisher(systemMetricsPublisher);

    //注册ResourceManagerMXBean
    //MBeans.register("ResourceManager", "ResourceManager", this);
    //使用标准命名约定来注册MBean
    registerMXBean();

    super.serviceInit(this.conf);
  }


  protected void doSecureLogin() throws IOException {
    //从配置中检索RM绑定地址
	InetSocketAddress socAddr = getBindAddress(conf);
    //以配置中指定的主体身份登录。将用户的Kerberos主体名称中的$host替换为hostname。
    //如果是非安全模式-返回。如果没有可用的钥匙扣,则除一个例外


    SecurityUtil.login(this.conf, YarnConfiguration.RM_KEYTAB,
        YarnConfiguration.RM_PRINCIPAL, socAddr.getHostName());

    // 如果启用了安全性 将 UGI of loginUser 设置给 rmLoginUGI 
    if (UserGroupInformation.isSecurityEnabled()) {
      this.rmLoginUGI = UserGroupInformation.getLoginUser();
    }
  }

1.3、serviceStart

java 复制代码
protected void serviceStart() throws Exception {
      RMStateStore rmStore = rmContext.getStateStore();
      //无论恢复启用,状态存储都需要启动,因为应用程序需要事件才能移动到其他状态。
      //RMStateStore启动
      //实现ResourceManager状态存储的基类。
      //负责异步通知和与YARN对象的接口。真正的存储实现需要从中派生,并实现阻塞存储和加载方法来实际存储和加载状态。
      rmStore.start();

      //第1.2步 serviceInit() 时对其进行过初始化 ,默认false 
      if(recoveryEnabled) {
        try {
          LOG.info("Recovery started");
          rmStore.checkVersion();
          if (rmContext.isWorkPreservingRecoveryEnabled()) {
            rmContext.setEpoch(rmStore.getAndIncrementEpoch());
          }
          RMState state = rmStore.loadState();
          recover(state);
          LOG.info("Recovery ended");
        } catch (Exception e) {
          // the Exception from loadState() needs to be handled for
          // HA and we need to give up master status if we got fenced
          LOG.error("Failed to load/recover state", e);
          throw e;
        }
      } else {
        //是否开启联合身份验证
        //可以通过 yarn.federation.enabled 设置 默认false 
        if (HAUtil.isFederationEnabled(conf)) {
          long epoch = conf.getLong(YarnConfiguration.RM_EPOCH,
              YarnConfiguration.DEFAULT_RM_EPOCH);
          rmContext.setEpoch(epoch);
          LOG.info("Epoch set for Federation: " + epoch);
        }
      }

      super.serviceStart();
    }


  protected void serviceStart() throws Exception {
    //获取在serviceInit中添加的所有服务列表
    List<Service> services = getServices();
    if (LOG.isDebugEnabled()) {
      LOG.debug(getName() + ": starting services, size=" + services.size());
    }
    for (Service service : services) {
      //启动服务。如果失败,将停止服务并引发异常
      service.start();
    }
    super.serviceStart();
  }

2、NodeManager

注意:以下启动场景都是在单个节点中运行的

2.1、main

java 复制代码
  public static void main(String[] args) throws IOException {
    Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
    StringUtils.startupShutdownMessage(NodeManager.class, args, LOG);
    @SuppressWarnings("resource")
    NodeManager nodeManager = new NodeManager();
    Configuration conf = new YarnConfiguration();
    //创建一个GenericOptionsParser以仅解析通用Hadoop参数。
    //getRemainingArgs()可以获得除泛型参数之外的字符串参数数组
    new GenericOptionsParser(conf, args);
    //初始化并启动NodeManager,我们在第2.2步详细看下
    nodeManager.initAndStartNodeManager(conf, false);
  }

2.2、initAndStartNodeManager

java 复制代码
private void initAndStartNodeManager(Configuration conf, boolean hasToReboot) {
    try {
      //如果我们是基于Unix的系统,但没有bash,则无法启动。
      //Bash是在基于Unix的系统下启动容器所必需的。
      //也就是说容器的启动是通过Bash来启动的
      if (!Shell.WINDOWS) {
        if (!Shell.checkIsBashSupported()) {
          String message =
              "Failing NodeManager start since we're on a "
                  + "Unix-based system but bash doesn't seem to be available.";
          LOG.error(message);
          throw new YarnRuntimeException(message);
        }
      }

      //如果我们正在重新启动,请移除旧的挂钩
      if (hasToReboot && null != nodeManagerShutdownHook) {
        ShutdownHookManager.get().removeShutdownHook(nodeManagerShutdownHook);
      }
      
      //CompositeService的JVM Shutdown挂钩,它将在JVM关闭的情况下优雅地停止给定的CompositeService。
      nodeManagerShutdownHook = new CompositeServiceShutdownHook(this);
      ShutdownHookManager.get().addShutdownHook(nodeManagerShutdownHook,
                                                SHUTDOWN_HOOK_PRIORITY);
      //只有当从main()函数实例化NodeManager时,才应调用系统出口
      this.shouldExitOnShutdownEvent = true;
      //初始化NodeManager,详细看第2.3步(本类的serviceInit())
      this.init(conf);
      //启动NodeManager,详细看第2.4步(本类的serviceStart())
      this.start();
    } catch (Throwable t) {
      LOG.error("Error starting NodeManager", t);
      System.exit(-1);
    }
  }

2.3、serviceInit

java 复制代码
protected void serviceInit(Configuration conf) throws Exception {
    UserGroupInformation.setConfiguration(conf);
    //是否启用RM保留工作的恢复
    //可以通过yarn.resourcemanager.work-preserving-recovery.enabled设置,默认true
    rmWorkPreservingRestartEnabled = conf.getBoolean(YarnConfiguration
            .RM_WORK_PRESERVING_RECOVERY_ENABLED,
        YarnConfiguration.DEFAULT_RM_WORK_PRESERVING_RECOVERY_ENABLED);

    try {
      //初始化并启动RecoveryStore
      initAndStartRecoveryStore(conf);
    } catch (IOException e) {
      String recoveryDirName = conf.get(YarnConfiguration.NM_RECOVERY_DIR);
      throw new
          YarnRuntimeException("Unable to initialize recovery directory at "
              + recoveryDirName, e);
    }

    //创建NMContainerTokenSecretManager
    //NodeManager容器密钥管理
    //NM只保留两个主钥匙。RM知道的当前密钥和上一个滚动间隔中的密钥。
    NMContainerTokenSecretManager containerTokenSecretManager =
        new NMContainerTokenSecretManager(conf, nmStore);

    //创建NMTokenSecretManagerInNM
    //里面有一个应用到尝试任务列表的映射和尝试应用和其密钥的映射
    NMTokenSecretManagerInNM nmTokenSecretManager =
        new NMTokenSecretManagerInNM(nmStore);

    recoverTokens(nmTokenSecretManager, containerTokenSecretManager);
    
    this.aclsManager = new ApplicationACLsManager(conf);

    //创建LocalDirsHandlerService
    //提供检查节点本地目录运行状况功能的类。
    //这通过定期检查nodemanager本地目录和nodemanager日志目录的运行状况来专门管理它们。
    this.dirsHandler = new LocalDirsHandlerService(metrics);

    //是否启用分布式计划
    //可以通过yarn.nodemanager.distributed-scheduling.enabled 设置 默认 false
    boolean isDistSchedulingEnabled =
        conf.getBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED,
            YarnConfiguration.DEFAULT_DIST_SCHEDULING_ENABLED);

    //创建NM上下文
    this.context = createNMContext(containerTokenSecretManager,
        nmTokenSecretManager, nmStore, isDistSchedulingEnabled, conf);

    //创建ResourcePluginManager
    //管理在此NodeManager上配置的ResourcePlugin
    //ResourcePlugin:
    //ResourcePlugin是节点管理器的一个接口,可以更容易地支持新资源类型的发现/管理/隔离
    //主要有两部分:
    //    createResourceHandler:
    //            当资源类型需要任何特殊隔离时,插件需要返回ResourceHandler。
    //            这将在NodeManager启动期间添加到ResourceHandlerChain中。
    //            当不需要特殊隔离时,返回null。
    //    getNodeResourceHandlerInstance:
    //            当资源类型需要任何发现机制时,插件需要返回NodeResourceUpdaterPlugin
    //            例如,如果我们想在NM注册期间设置资源值或在NM-RM心跳期间发送更新,
    //            我们可以实现NodeResourceUpdaterPlugin
    //            并更新NodeHeartbeatRequest或RegisterNodeManagerRequest的字段
    //            这将在每次节点状态更新或节点注册时调用,请避免每次都创建新实例
    ResourcePluginManager pluginManager = createResourcePluginManager();

    //
    pluginManager.initialize(context);
    ((NMContext)context).setResourcePluginManager(pluginManager);

    //创建ContainerExecutor
    //这个类是用于在底层操作系统上启动容器的机制的抽象。所有执行器实现都必须扩展它
    //可以通过yarn.nodemanager.container-executor.class设置该执行器
    //默认是DefaultContainerExecutor.class
    //DefaultContainerExecuter类提供通用的容器执行服务。通过ProcessBuilder以独立于平台的方式处理流程执行
    //ProcessBuilder用于创建操作系统进程
    //原来容器的创建最终是调用java的ProcessBuilder在操作系统中创建一个进程来实现的
    ContainerExecutor exec = createContainerExecutor(conf);
    try {
      //运行执行器初始化步骤。验证必要的配置和权限是否到位。
      exec.init(context);
    } catch (IOException e) {
      throw new YarnRuntimeException("Failed to initialize container executor", e);
    }    
    DeletionService del = createDeletionService(exec);
    addService(del);

    //NodeManager级调度器
    //AsyncDispatcher:
    //在单独的线程中调度事件。目前只有一个线程能做到这一点。每个事件类型类可能有多个通道,并且可以使用线程池来调度事件。
    this.dispatcher = createNMDispatcher();

    //创建NodeHealthCheckerService
    //提供检查节点运行状况并向要求运行状况检查器报告的服务报告的功能的类
    nodeHealthChecker =
        new NodeHealthCheckerService(
            getNodeHealthScriptRunner(conf), dirsHandler);
    addService(nodeHealthChecker);


    ((NMContext)context).setContainerExecutor(exec);
    ((NMContext)context).setDeletionService(del);

    //创建NodeStatusUpdaterImpl
    //节点状态更新,比如与ResourceManager的通信,对容器进行管理
    nodeStatusUpdater =
        createNodeStatusUpdater(context, dispatcher, nodeHealthChecker);

    //创建NodeLabelsProvider 负责获取节点标签
    nodeLabelsProvider = createNodeLabelsProvider(conf);
    if (nodeLabelsProvider != null) {
      addIfService(nodeLabelsProvider);
      nodeStatusUpdater.setNodeLabelsProvider(nodeLabelsProvider);
    }

    //创建NodeAttributesProvider 负责获取节点属性
    nodeAttributesProvider = createNodeAttributesProvider(conf);
    if (nodeAttributesProvider != null) {
      addIfService(nodeAttributesProvider);
      nodeStatusUpdater.setNodeAttributesProvider(nodeAttributesProvider);
    }
    
    //创建NodeResourceMonitorImpl 
    //节点资源监视器的实现。它定期跟踪节点的资源利用情况,并将其报告给NM。
    nodeResourceMonitor = createNodeResourceMonitor();
    addService(nodeResourceMonitor);
    ((NMContext) context).setNodeResourceMonitor(nodeResourceMonitor);

    //创建ContainerManagerImpl 管理容器生命周期的实体
    containerManager =
        createContainerManager(context, exec, del, nodeStatusUpdater,
        this.aclsManager, dirsHandler);
    addService(containerManager);
    ((NMContext) context).setContainerManager(containerManager);

    //创建NMLogAggregationStatusTracker
    //用于缓存已完成应用程序的日志聚合状态。它还将定期删除旧的缓存日志聚合状态
    this.nmLogAggregationStatusTracker = createNMLogAggregationStatusTracker(
        context);
    addService(nmLogAggregationStatusTracker);
    ((NMContext)context).setNMLogAggregationStatusTracker(
        this.nmLogAggregationStatusTracker);

    WebServer webServer = createWebServer(context, containerManager
        .getContainersMonitor(), this.aclsManager, dirsHandler);
    addService(webServer);
    ((NMContext) context).setWebServer(webServer);

    
    ((NMContext) context).setQueueableContainerAllocator(
        new OpportunisticContainerAllocator(
            context.getContainerTokenSecretManager()));

    dispatcher.register(ContainerManagerEventType.class, containerManager);
    dispatcher.register(NodeManagerEventType.class, this);
    addService(dispatcher);

    //创建JvmPauseMonitor
    //该类设置一个简单的线程,该线程在睡眠短时间间隔的循环中运行。
    //如果睡眠时间明显长于其目标时间,则表示JVM或主机已暂停处理,
    //这可能会导致其他问题。如果检测到这样的暂停,线程将记录一条消息。
    pauseMonitor = new JvmPauseMonitor();
    addService(pauseMonitor);
    metrics.getJvmMetrics().setPauseMonitor(pauseMonitor);

    //初始化度量系统
    DefaultMetricsSystem.initialize("NodeManager");

    //时间轴服务v.2是否通过配置启用
    if (YarnConfiguration.timelineServiceV2Enabled(conf)) {
      this.nmCollectorService = createNMCollectorService(context);
      addService(nmCollectorService);
    }

    //StatusUpdater应该最后添加,这样它才能最后启动,这样我们就可以在向RM注册之前确保一切正常
    addService(nodeStatusUpdater);
    ((NMContext) context).setNodeStatusUpdater(nodeStatusUpdater);
    nmStore.setNodeStatusUpdater(nodeStatusUpdater);

    //在为添加的服务调用init之前进行安全登录。
    try {
      doSecureLogin();
    } catch (IOException e) {
      throw new YarnRuntimeException("Failed NodeManager login", e);
    }

    //注册NodeManagerMXBean
    //MBeans.register("NodeManager", "NodeManager", this);
    registerMXBean();

    super.serviceInit(conf);
    // TODO add local dirs to del
  }


  private void initAndStartRecoveryStore(Configuration conf)
      throws IOException {
    //是否启用节点管理器以在启动后恢复
    //可以通过yarn.nodemanager.recovery.enabled设置,默认false
    boolean recoveryEnabled = conf.getBoolean(
        YarnConfiguration.NM_RECOVERY_ENABLED,
        YarnConfiguration.DEFAULT_NM_RECOVERY_ENABLED);
    if (recoveryEnabled) {
      FileSystem recoveryFs = FileSystem.getLocal(conf);
      String recoveryDirName = conf.get(YarnConfiguration.NM_RECOVERY_DIR);
      if (recoveryDirName == null) {
        throw new IllegalArgumentException("Recovery is enabled but " +
            YarnConfiguration.NM_RECOVERY_DIR + " is not set.");
      }
      Path recoveryRoot = new Path(recoveryDirName);
      recoveryFs.mkdirs(recoveryRoot, new FsPermission((short)0700));
      nmStore = new NMLeveldbStateStoreService();
    } else {
      //未存储状态时要使用的状态存储
      nmStore = new NMNullStateStoreService();
    }
    nmStore.init(conf);
    nmStore.start();
  }

2.4、serviceStart

java 复制代码
  protected void serviceStart() throws Exception {
    //获取serviceInit()时添加到服务列表,并依次启动它们
    List<Service> services = getServices();
    if (LOG.isDebugEnabled()) {
      LOG.debug(getName() + ": starting services, size=" + services.size());
    }
    for (Service service : services) {
      service.start();
    }
    super.serviceStart();
  }

3、WebAppProxyServer

ProxyServer将位于最终用户和AppMaster web界面之间。

3.1、main

java 复制代码
  public static void main(String[] args) {
    Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
    StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG);
    try {
      YarnConfiguration configuration = new YarnConfiguration();
      new GenericOptionsParser(configuration, args);
      //启动代理服务器
      WebAppProxyServer proxyServer = startServer(configuration);
      proxyServer.proxy.join();
    } catch (Throwable t) {
      ExitUtil.terminate(-1, t);
    }
  }

  protected static WebAppProxyServer startServer(Configuration configuration)
      throws Exception {
    WebAppProxyServer proxy = new WebAppProxyServer();
    ShutdownHookManager.get().addShutdownHook(
        new CompositeServiceShutdownHook(proxy), SHUTDOWN_HOOK_PRIORITY);
    //实际调用本类的serviceInit(),详看第3.2步
    proxy.init(configuration);
    //初始化时只添加了两个服务 WebAppProxy 和 JvmPauseMonitor
    //我们看下WebAppProxy 的启动,详见第3.3步
    proxy.start();
    return proxy;
  }

3.2、serviceInit

java 复制代码
//只添加了两个服务 WebAppProxy 和 JvmPauseMonitor
protected void serviceInit(Configuration conf) throws Exception {
    Configuration config = new YarnConfiguration(conf);
    //以指定给代理的Kerberose主体身份登录
    doSecureLogin(conf);
    //构建WebAppProxy
    proxy = new WebAppProxy();
    addService(proxy);

    //这些在RM、NM中都讲过
    DefaultMetricsSystem.initialize("WebAppProxyServer");
    JvmMetrics jm = JvmMetrics.initSingleton("WebAppProxyServer", null);
    pauseMonitor = new JvmPauseMonitor();
    addService(pauseMonitor);
    jm.setPauseMonitor(pauseMonitor);

    super.serviceInit(config);
  }

3.3、WebAppProxy

java 复制代码
protected void serviceInit(Configuration conf) throws Exception {
    //core-default.xml中的 hadoop.security.authentication 值由kerberos和simple,默认simple
    String auth =  conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
    if (auth == null || "simple".equals(auth)) {
      isSecurityEnabled = false;
    } else if ("kerberos".equals(auth)) {
      isSecurityEnabled = true;
    } else {
      LOG.warn("Unrecognized attribute value for " +
          CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION +
          " of " + auth);
    }
    //web代理的地址
    //web代理的地址网络代理的地址为HOST:PORT,如果未给定,则代理将作为RM的一部分运行
    //先看yarn.web-proxy.address是否设置,如果没有设置再看
    //yarn.resourcemanager.webapp.address(RM web应用程序的http地址。如果只提供一个主机作为值,则网络应用程序将在随机端口上提供服务。)默认值为${yarn.resourcemanager.hostname}:8088
    String proxy = WebAppUtils.getProxyHostAndPort(conf);
    String[] proxyParts = proxy.split(":");
    proxyHost = proxyParts[0];

    //创建到RM/Application History Server的新连接以获取应用程序报告。
    fetcher = new AppReportFetcher(conf);
    //获取yarn.web-proxy.address的值
    bindAddress = conf.get(YarnConfiguration.PROXY_ADDRESS);
    if(bindAddress == null || bindAddress.isEmpty()) {
      throw new YarnRuntimeException(YarnConfiguration.PROXY_ADDRESS + 
          " is not set so the proxy will not run.");
    }
    LOG.info("Instantiating Proxy at " + bindAddress);
    String[] parts = StringUtils.split(bindAddress, ':');
    port = 0;
    if (parts.length == 2) {
      bindAddress = parts[0];
      port = Integer.parseInt(parts[1]);
    }
    //从相同ACL的字符串表示构建一个新ACL。
    //字符串是一个以逗号分隔的用户和组列表。用户列表位于第一位,并由组列表后面的空格分隔。例如"user1,user2 group1,group2"
    acl = new AccessControlList(conf.get(YarnConfiguration.YARN_ADMIN_ACL, 
        YarnConfiguration.DEFAULT_YARN_ADMIN_ACL));
    super.serviceInit(conf);
  }
  
  @Override
  protected void serviceStart() throws Exception {
    try {
      Configuration conf = getConfig();
      HttpServer2.Builder b = new HttpServer2.Builder()
          .setName("proxy")
          .addEndpoint(
              URI.create(WebAppUtils.getHttpSchemePrefix(conf) + bindAddress
                  + ":" + port)).setFindPort(port == 0).setConf(getConfig())
          .setACL(acl);
      if (YarnConfiguration.useHttps(conf)) {
        WebAppUtils.loadSslConfiguration(b);
      }
      proxyServer = b.build();
      proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME,
          ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class);
      proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher);
      proxyServer
          .setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, isSecurityEnabled);
      proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
      //调用HttpServer2.start()
      //最终调用的时org.eclipse.jetty.server.start()
      //启动服务器。不等待服务器启动
      proxyServer.start();
    } catch (IOException e) {
      LOG.error("Could not start proxy web server",e);
      throw e;
    }
    super.serviceStart();
  }
  
  @Override
  protected void serviceStop() throws Exception {
    if(proxyServer != null) {
      try {
        proxyServer.stop();
      } catch (Exception e) {
        LOG.error("Error stopping proxy web server", e);
        throw new YarnRuntimeException("Error stopping proxy web server",e);
      }
    }
    if(this.fetcher != null) {
      this.fetcher.stop();
    }
    super.serviceStop();
  }

四、总结

1、用户执行./start-yarn.sh

2、start-yarn.sh中依次启动resourceManager、nodemanager、proxyserver

3、根据yarn命令和hadoop-functions.sh找到三个角色的启动类,并且在本地或者远程(通过ssh的方式)启动各自的java进程

4、resourceManager、nodemanager、proxyserver初始化各自的服务列表

5、resourceManager、nodemanager、proxyserver按照服务列表依次启动服务

相关推荐
processflow流程图1 小时前
分布式kettle调度平台v6.4.0新功能介绍
分布式
在下不上天1 小时前
Flume日志采集系统的部署,实现flume负载均衡,flume故障恢复
大数据·开发语言·python
全栈开发圈1 小时前
干货分享|分布式数据科学工具 Xorbits 的使用
分布式
智慧化智能化数字化方案1 小时前
华为IPD流程管理体系L1至L5最佳实践-解读
大数据·华为
PersistJiao2 小时前
在 Spark RDD 中,sortBy 和 top 算子的各自适用场景
大数据·spark·top·sortby
2301_811274313 小时前
大数据基于Spring Boot的化妆品推荐系统的设计与实现
大数据·spring boot·后端
Yz98763 小时前
hive的存储格式
大数据·数据库·数据仓库·hive·hadoop·数据库开发
青云交3 小时前
大数据新视界 -- 大数据大厂之 Hive 数据导入:多源数据集成的策略与实战(上)(3/ 30)
大数据·数据清洗·电商数据·数据整合·hive 数据导入·多源数据·影视娱乐数据
武子康3 小时前
大数据-230 离线数仓 - ODS层的构建 Hive处理 UDF 与 SerDe 处理 与 当前总结
java·大数据·数据仓库·hive·hadoop·sql·hdfs
武子康3 小时前
大数据-231 离线数仓 - DWS 层、ADS 层的创建 Hive 执行脚本
java·大数据·数据仓库·hive·hadoop·mysql