• Spark源码(启动ApplicationMaster和Driver线程)-第二期


    上一期指路

    上一期

    上一期我们分析到了YarnClusterApplication的start,我们继续从这里分析。

    1. override def start(args: Array[String], conf: SparkConf): Unit = {
    2. // SparkSubmit would use yarn cache to distribute files & jars in yarn mode,
    3. // so remove them from sparkConf here for yarn mode.
    4. conf.remove(JARS)
    5. conf.remove(FILES)
    6. new Client(new ClientArguments(args), conf, null).run()
    7. }

    点击run

    this.appId = submitApplication()

    再点击submitApplication

    1.Client#submitApplication

    1. def submitApplication(): ApplicationId = {
    2. ResourceRequestHelper.validateResources(sparkConf)
    3. var appId: ApplicationId = null
    4. try {
    5. launcherBackend.connect()
    6. yarnClient.init(hadoopConf)
    7. yarnClient.start()
    8. logInfo("Requesting a new application from cluster with %d NodeManagers"
    9. .format(yarnClient.getYarnClusterMetrics.getNumNodeManagers))
    10. // Get a new application from our RM
    11. val newApp = yarnClient.createApplication()
    12. val newAppResponse = newApp.getNewApplicationResponse()
    13. appId = newAppResponse.getApplicationId()
    14. // The app staging dir based on the STAGING_DIR configuration if configured
    15. // otherwise based on the users home directory.
    16. val appStagingBaseDir = sparkConf.get(STAGING_DIR)
    17. .map { new Path(_, UserGroupInformation.getCurrentUser.getShortUserName) }
    18. .getOrElse(FileSystem.get(hadoopConf).getHomeDirectory())
    19. stagingDirPath = new Path(appStagingBaseDir, getAppStagingDir(appId))
    20. new CallerContext("CLIENT", sparkConf.get(APP_CALLER_CONTEXT),
    21. Option(appId.toString)).setCurrentContext()
    22. // Verify whether the cluster has enough resources for our AM
    23. verifyClusterResources(newAppResponse)
    24. // Set up the appropriate contexts to launch our AM
    25. val containerContext = createContainerLaunchContext(newAppResponse)
    26. val appContext = createApplicationSubmissionContext(newApp, containerContext)
    27. // Finally, submit and monitor the application
    28. logInfo(s"Submitting application $appId to ResourceManager")
    29. yarnClient.submitApplication(appContext)
    30. launcherBackend.setAppId(appId.toString)
    31. reportLauncherState(SparkAppHandle.State.SUBMITTED)
    32. appId
    33. } catch {
    34. case e: Throwable =>
    35. if (stagingDirPath != null) {
    36. cleanupStagingDir()
    37. }
    38. throw e
    39. }
    40. }

    ①yarnClient.init(hadoopConf)      

    yarnClient.start()

    初始化客户端并启动

    ②yarnClient.createApplication()  newApp.getNewApplicationResponse()      newAppResponse.getApplicationId()

    告诉我们的resource manager要创建一个应用,得到一个响应,通过该响应得到全局appId

    ③createContainerLaunchContext(newAppResponse)

    createApplicationSubmissionContext(newApp, containerContext)

    创建容器启动环境和app提交上下文,这些都是为了启动am

    ④yarnClient.submitApplication(appContext)

    通过客户端向yarn集群提交应用,其中包含了启动am命令的封装

    2.Client#createContainerLaunchContext

    1. private def createContainerLaunchContext(newAppResponse: GetNewApplicationResponse)
    2. : ContainerLaunchContext = {
    3. logInfo("Setting up container launch context for our AM")
    4. val appId = newAppResponse.getApplicationId
    5. val pySparkArchives =
    6. if (sparkConf.get(IS_PYTHON_APP)) {
    7. findPySparkArchives()
    8. } else {
    9. Nil
    10. }
    11. val launchEnv = setupLaunchEnv(stagingDirPath, pySparkArchives)
    12. val localResources = prepareLocalResources(stagingDirPath, pySparkArchives)
    13. val amContainer = Records.newRecord(classOf[ContainerLaunchContext])
    14. amContainer.setLocalResources(localResources.asJava)
    15. amContainer.setEnvironment(launchEnv.asJava)
    16. val javaOpts = ListBuffer[String]()
    17. // Set the environment variable through a command prefix
    18. // to append to the existing value of the variable
    19. var prefixEnv: Option[String] = None
    20. // Add Xmx for AM memory
    21. javaOpts += "-Xmx" + amMemory + "m"
    22. val tmpDir = new Path(Environment.PWD.$$(), YarnConfiguration.DEFAULT_CONTAINER_TEMP_DIR)
    23. javaOpts += "-Djava.io.tmpdir=" + tmpDir
    24. // TODO: Remove once cpuset version is pushed out.
    25. // The context is, default gc for server class machines ends up using all cores to do gc -
    26. // hence if there are multiple containers in same node, Spark GC affects all other containers'
    27. // performance (which can be that of other Spark containers)
    28. // Instead of using this, rely on cpusets by YARN to enforce "proper" Spark behavior in
    29. // multi-tenant environments. Not sure how default Java GC behaves if it is limited to subset
    30. // of cores on a node.
    31. val useConcurrentAndIncrementalGC = launchEnv.get("SPARK_USE_CONC_INCR_GC").exists(_.toBoolean)
    32. if (useConcurrentAndIncrementalGC) {
    33. // In our expts, using (default) throughput collector has severe perf ramifications in
    34. // multi-tenant machines
    35. javaOpts += "-XX:+UseConcMarkSweepGC"
    36. javaOpts += "-XX:MaxTenuringThreshold=31"
    37. javaOpts += "-XX:SurvivorRatio=8"
    38. javaOpts += "-XX:+CMSIncrementalMode"
    39. javaOpts += "-XX:+CMSIncrementalPacing"
    40. javaOpts += "-XX:CMSIncrementalDutyCycleMin=0"
    41. javaOpts += "-XX:CMSIncrementalDutyCycle=10"
    42. }
    43. // Include driver-specific java options if we are launching a driver
    44. if (isClusterMode) {
    45. sparkConf.get(DRIVER_JAVA_OPTIONS).foreach { opts =>
    46. javaOpts ++= Utils.splitCommandString(opts)
    47. .map(Utils.substituteAppId(_, appId.toString))
    48. .map(YarnSparkHadoopUtil.escapeForShell)
    49. }
    50. val libraryPaths = Seq(sparkConf.get(DRIVER_LIBRARY_PATH),
    51. sys.props.get("spark.driver.libraryPath")).flatten
    52. if (libraryPaths.nonEmpty) {
    53. prefixEnv = Some(createLibraryPathPrefix(libraryPaths.mkString(File.pathSeparator),
    54. sparkConf))
    55. }
    56. if (sparkConf.get(AM_JAVA_OPTIONS).isDefined) {
    57. logWarning(s"${AM_JAVA_OPTIONS.key} will not take effect in cluster mode")
    58. }
    59. } else {
    60. // Validate and include yarn am specific java options in yarn-client mode.
    61. sparkConf.get(AM_JAVA_OPTIONS).foreach { opts =>
    62. if (opts.contains("-Dspark")) {
    63. val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to set Spark options (was '$opts')."
    64. throw new SparkException(msg)
    65. }
    66. if (opts.contains("-Xmx")) {
    67. val msg = s"${AM_JAVA_OPTIONS.key} is not allowed to specify max heap memory settings " +
    68. s"(was '$opts'). Use spark.yarn.am.memory instead."
    69. throw new SparkException(msg)
    70. }
    71. javaOpts ++= Utils.splitCommandString(opts)
    72. .map(Utils.substituteAppId(_, appId.toString))
    73. .map(YarnSparkHadoopUtil.escapeForShell)
    74. }
    75. sparkConf.get(AM_LIBRARY_PATH).foreach { paths =>
    76. prefixEnv = Some(createLibraryPathPrefix(paths, sparkConf))
    77. }
    78. }
    79. // For log4j configuration to reference
    80. javaOpts += ("-Dspark.yarn.app.container.log.dir=" + ApplicationConstants.LOG_DIR_EXPANSION_VAR)
    81. val userClass =
    82. if (isClusterMode) {
    83. Seq("--class", YarnSparkHadoopUtil.escapeForShell(args.userClass))
    84. } else {
    85. Nil
    86. }
    87. val userJar =
    88. if (args.userJar != null) {
    89. Seq("--jar", args.userJar)
    90. } else {
    91. Nil
    92. }
    93. val primaryPyFile =
    94. if (isClusterMode && args.primaryPyFile != null) {
    95. Seq("--primary-py-file", new Path(args.primaryPyFile).getName())
    96. } else {
    97. Nil
    98. }
    99. val primaryRFile =
    100. if (args.primaryRFile != null) {
    101. Seq("--primary-r-file", args.primaryRFile)
    102. } else {
    103. Nil
    104. }
    105. val amClass =
    106. if (isClusterMode) {
    107. Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName
    108. } else {
    109. Utils.classForName("org.apache.spark.deploy.yarn.ExecutorLauncher").getName
    110. }
    111. if (args.primaryRFile != null &&
    112. (args.primaryRFile.endsWith(".R") || args.primaryRFile.endsWith(".r"))) {
    113. args.userArgs = ArrayBuffer(args.primaryRFile) ++ args.userArgs
    114. }
    115. val userArgs = args.userArgs.flatMap { arg =>
    116. Seq("--arg", YarnSparkHadoopUtil.escapeForShell(arg))
    117. }
    118. val amArgs =
    119. Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++
    120. Seq("--properties-file",
    121. buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, SPARK_CONF_FILE)) ++
    122. Seq("--dist-cache-conf",
    123. buildPath(Environment.PWD.$$(), LOCALIZED_CONF_DIR, DIST_CACHE_CONF_FILE))
    124. // Command for the ApplicationMaster
    125. val commands = prefixEnv ++
    126. Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++
    127. javaOpts ++ amArgs ++
    128. Seq(
    129. "1>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout",
    130. "2>", ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr")
    131. // TODO: it would be nicer to just make sure there are no null commands here
    132. val printableCommands = commands.map(s => if (s == null) "null" else s).toList
    133. amContainer.setCommands(printableCommands.asJava)
    134. logDebug("===============================================================================")
    135. logDebug("YARN AM launch context:")
    136. logDebug(s" user class: ${Option(args.userClass).getOrElse("N/A")}")
    137. logDebug(" env:")
    138. if (log.isDebugEnabled) {
    139. Utils.redact(sparkConf, launchEnv.toSeq).foreach { case (k, v) =>
    140. logDebug(s" $k -> $v")
    141. }
    142. }
    143. logDebug(" resources:")
    144. localResources.foreach { case (k, v) => logDebug(s" $k -> $v")}
    145. logDebug(" command:")
    146. logDebug(s" ${printableCommands.mkString(" ")}")
    147. logDebug("===============================================================================")
    148. // send the acl settings into YARN to control who has access via YARN interfaces
    149. val securityManager = new SecurityManager(sparkConf)
    150. amContainer.setApplicationACLs(
    151. YarnSparkHadoopUtil.getApplicationAclsForYarn(securityManager).asJava)
    152. setupSecurityToken(amContainer)
    153. amContainer
    154. }

    我们发现有各种jvm的配置参数javaOpts

    Utils.classForName("org.apache.spark.deploy.yarn.ApplicationMaster").getName

    如果是集群模式,通过返回获取类ApplicationMaster复制给amClass

    Seq(amClass) ++ userClass ++ userJar ++ primaryPyFile ++ primaryRFile ++ userArgs ++

    组合封装得到amArgs

    最重要的是有一行

    Seq(Environment.JAVA_HOME.$$() + "/bin/java", "-server") ++ javaOpts ++ amArgs ++

     就是通过java ApplicationMaster,运行其main方法,启动AM进程,也就是说这个启动am的被封装成命令放入容器中

    YarnClientImpl#submitApplication->

    rmClient.submitApplication(request)

    把指令提交给rm,rm让nm执行就会启动ApplicationMaster进程,我们分析ApplicationMaster的伴生对象中的main方法

    3.ApplicationMaster#main

    1. def main(args: Array[String]): Unit = {
    2. SignalUtils.registerLogger(log)
    3. val amArgs = new ApplicationMasterArguments(args)
    4. val sparkConf = new SparkConf()
    5. if (amArgs.propertiesFile != null) {
    6. Utils.getPropertiesFromFile(amArgs.propertiesFile).foreach { case (k, v) =>
    7. sparkConf.set(k, v)
    8. }
    9. }
    10. // Set system properties for each config entry. This covers two use cases:
    11. // - The default configuration stored by the SparkHadoopUtil class
    12. // - The user application creating a new SparkConf in cluster mode
    13. //
    14. // Both cases create a new SparkConf object which reads these configs from system properties.
    15. sparkConf.getAll.foreach { case (k, v) =>
    16. sys.props(k) = v
    17. }
    18. val yarnConf = new YarnConfiguration(SparkHadoopUtil.newConfiguration(sparkConf))
    19. master = new ApplicationMaster(amArgs, sparkConf, yarnConf)
    20. val ugi = sparkConf.get(PRINCIPAL) match {
    21. // We only need to log in with the keytab in cluster mode. In client mode, the driver
    22. // handles the user keytab.
    23. case Some(principal) if master.isClusterMode =>
    24. val originalCreds = UserGroupInformation.getCurrentUser().getCredentials()
    25. SparkHadoopUtil.get.loginUserFromKeytab(principal, sparkConf.get(KEYTAB).orNull)
    26. val newUGI = UserGroupInformation.getCurrentUser()
    27. if (master.appAttemptId == null || master.appAttemptId.getAttemptId > 1) {
    28. // Re-obtain delegation tokens if this is not a first attempt, as they might be outdated
    29. // as of now. Add the fresh tokens on top of the original user's credentials (overwrite).
    30. // Set the context class loader so that the token manager has access to jars
    31. // distributed by the user.
    32. Utils.withContextClassLoader(master.userClassLoader) {
    33. val credentialManager = new HadoopDelegationTokenManager(sparkConf, yarnConf, null)
    34. credentialManager.obtainDelegationTokens(originalCreds)
    35. }
    36. }
    37. // Transfer the original user's tokens to the new user, since it may contain needed tokens
    38. // (such as those user to connect to YARN).
    39. newUGI.addCredentials(originalCreds)
    40. newUGI
    41. case _ =>
    42. SparkHadoopUtil.get.createSparkUser()
    43. }
    44. ugi.doAs(new PrivilegedExceptionAction[Unit]() {
    45. override def run(): Unit = System.exit(master.run())
    46. })
    47. }

    ①new ApplicationMasterArguments(args)

    一看就是和命令行参数有关的

    ApplicationMasterArguments中的parseArgs,仍然是熟悉的解析--class、--jar等参数

    ②new ApplicationMaster

    构建AM,点击去发现如下:

    ③master.run()

    运行am

    4.ApplicationMaster#run

    1. final def run(): Int = {
    2. try {
    3. val attemptID = if (isClusterMode) {
    4. // Set the web ui port to be ephemeral for yarn so we don't conflict with
    5. // other spark processes running on the same box
    6. System.setProperty(UI_PORT.key, "0")
    7. // Set the master and deploy mode property to match the requested mode.
    8. System.setProperty("spark.master", "yarn")
    9. System.setProperty(SUBMIT_DEPLOY_MODE.key, "cluster")
    10. // Set this internal configuration if it is running on cluster mode, this
    11. // configuration will be checked in SparkContext to avoid misuse of yarn cluster mode.
    12. System.setProperty("spark.yarn.app.id", appAttemptId.getApplicationId().toString())
    13. Option(appAttemptId.getAttemptId.toString)
    14. } else {
    15. None
    16. }
    17. new CallerContext(
    18. "APPMASTER", sparkConf.get(APP_CALLER_CONTEXT),
    19. Option(appAttemptId.getApplicationId.toString), attemptID).setCurrentContext()
    20. logInfo("ApplicationAttemptId: " + appAttemptId)
    21. // This shutdown hook should run *after* the SparkContext is shut down.
    22. val priority = ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY - 1
    23. ShutdownHookManager.addShutdownHook(priority) { () =>
    24. val maxAppAttempts = client.getMaxRegAttempts(sparkConf, yarnConf)
    25. val isLastAttempt = appAttemptId.getAttemptId() >= maxAppAttempts
    26. if (!finished) {
    27. // The default state of ApplicationMaster is failed if it is invoked by shut down hook.
    28. // This behavior is different compared to 1.x version.
    29. // If user application is exited ahead of time by calling System.exit(N), here mark
    30. // this application as failed with EXIT_EARLY. For a good shutdown, user shouldn't call
    31. // System.exit(0) to terminate the application.
    32. finish(finalStatus,
    33. ApplicationMaster.EXIT_EARLY,
    34. "Shutdown hook called before final status was reported.")
    35. }
    36. if (!unregistered) {
    37. // we only want to unregister if we don't want the RM to retry
    38. if (finalStatus == FinalApplicationStatus.SUCCEEDED || isLastAttempt) {
    39. unregister(finalStatus, finalMsg)
    40. cleanupStagingDir(new Path(System.getenv("SPARK_YARN_STAGING_DIR")))
    41. }
    42. }
    43. }
    44. if (isClusterMode) {
    45. runDriver()
    46. } else {
    47. runExecutorLauncher()
    48. }
    49. } catch {
    50. case e: Exception =>
    51. // catch everything else if not specifically handled
    52. logError("Uncaught exception: ", e)
    53. finish(FinalApplicationStatus.FAILED,
    54. ApplicationMaster.EXIT_UNCAUGHT_EXCEPTION,
    55. "Uncaught exception: " + StringUtils.stringifyException(e))
    56. } finally {
    57. try {
    58. metricsSystem.foreach { ms =>
    59. ms.report()
    60. ms.stop()
    61. }
    62. } catch {
    63. case e: Exception =>
    64. logWarning("Exception during stopping of the metric system: ", e)
    65. }
    66. }
    67. exitCode
    68. }

    if (isClusterMode) runDriver()

    如果是集群模式就启动Driver端(线程)

     runExecutorLauncher()

    否则就启动Executor进程

    5.ApplicationMaster#runDriver

    1. private def runDriver(): Unit = {
    2. addAmIpFilter(None, System.getenv(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV))
    3. userClassThread = startUserApplication()
    4. // This a bit hacky, but we need to wait until the spark.driver.port property has
    5. // been set by the Thread executing the user class.
    6. logInfo("Waiting for spark context initialization...")
    7. val totalWaitTime = sparkConf.get(AM_MAX_WAIT_TIME)
    8. try {
    9. val sc = ThreadUtils.awaitResult(sparkContextPromise.future,
    10. Duration(totalWaitTime, TimeUnit.MILLISECONDS))
    11. if (sc != null) {
    12. val rpcEnv = sc.env.rpcEnv
    13. val userConf = sc.getConf
    14. val host = userConf.get(DRIVER_HOST_ADDRESS)
    15. val port = userConf.get(DRIVER_PORT)
    16. registerAM(host, port, userConf, sc.ui.map(_.webUrl), appAttemptId)
    17. val driverRef = rpcEnv.setupEndpointRef(
    18. RpcAddress(host, port),
    19. YarnSchedulerBackend.ENDPOINT_NAME)
    20. createAllocator(driverRef, userConf, rpcEnv, appAttemptId, distCacheConf)
    21. } else {
    22. // Sanity check; should never happen in normal operation, since sc should only be null
    23. // if the user app did not create a SparkContext.
    24. throw new IllegalStateException("User did not initialize spark context!")
    25. }
    26. resumeDriver()
    27. userClassThread.join()
    28. } catch {
    29. case e: SparkException if e.getCause().isInstanceOf[TimeoutException] =>
    30. logError(
    31. s"SparkContext did not initialize after waiting for $totalWaitTime ms. " +
    32. "Please check earlier log output for errors. Failing the application.")
    33. finish(FinalApplicationStatus.FAILED,
    34. ApplicationMaster.EXIT_SC_NOT_INITED,
    35. "Timed out waiting for SparkContext.")
    36. } finally {
    37. resumeDriver()
    38. }
    39. }

    ①startUserApplication()

    在一个单独的线程内启动一个包含spark driver的用户类

    ②ThreadUtils.awaitResult

    阻塞当前线程,等待结果sc,sc在创建完SparkContext之后就一定会有的,就是说初始化SparkContext会阻塞创建资源

    ③registerAM

    ApplicationMaster需要申请资源,要与ResourceManager进行交互,所以向rm注册am

    ④createAllocator

    创建分配器

    ⑤resumeDriver()

    用notify通知(表示恢复Driver线程),那么就可以继续往执行我们编写的业务计算逻辑了。因为创建资源会阻塞我们的业务计算执行

    6.ApplicationMaster#startUserApplication

    1. private def startUserApplication(): Thread = {
    2. logInfo("Starting the user application in a separate Thread")
    3. var userArgs = args.userArgs
    4. if (args.primaryPyFile != null && args.primaryPyFile.endsWith(".py")) {
    5. // When running pyspark, the app is run using PythonRunner. The second argument is the list
    6. // of files to add to PYTHONPATH, which Client.scala already handles, so it's empty.
    7. userArgs = Seq(args.primaryPyFile, "") ++ userArgs
    8. }
    9. if (args.primaryRFile != null &&
    10. (args.primaryRFile.endsWith(".R") || args.primaryRFile.endsWith(".r"))) {
    11. // TODO(davies): add R dependencies here
    12. }
    13. val mainMethod = userClassLoader.loadClass(args.userClass)
    14. .getMethod("main", classOf[Array[String]])
    15. val userThread = new Thread {
    16. override def run(): Unit = {
    17. try {
    18. if (!Modifier.isStatic(mainMethod.getModifiers)) {
    19. logError(s"Could not find static main method in object ${args.userClass}")
    20. finish(FinalApplicationStatus.FAILED, ApplicationMaster.EXIT_EXCEPTION_USER_CLASS)
    21. } else {
    22. mainMethod.invoke(null, userArgs.toArray)
    23. finish(FinalApplicationStatus.SUCCEEDED, ApplicationMaster.EXIT_SUCCESS)
    24. logDebug("Done running user class")
    25. }
    26. } catch {
    27. case e: InvocationTargetException =>
    28. e.getCause match {
    29. case _: InterruptedException =>
    30. // Reporter thread can interrupt to stop user class
    31. case SparkUserAppException(exitCode) =>
    32. val msg = s"User application exited with status $exitCode"
    33. logError(msg)
    34. finish(FinalApplicationStatus.FAILED, exitCode, msg)
    35. case cause: Throwable =>
    36. logError("User class threw exception: " + cause, cause)
    37. finish(FinalApplicationStatus.FAILED,
    38. ApplicationMaster.EXIT_EXCEPTION_USER_CLASS,
    39. "User class threw exception: " + StringUtils.stringifyException(cause))
    40. }
    41. sparkContextPromise.tryFailure(e.getCause())
    42. } finally {
    43. // Notify the thread waiting for the SparkContext, in case the application did not
    44. // instantiate one. This will do nothing when the user code instantiates a SparkContext
    45. // (with the correct master), or when the user code throws an exception (due to the
    46. // tryFailure above).
    47. sparkContextPromise.trySuccess(null)
    48. }
    49. }
    50. }
    51. userThread.setContextClassLoader(userClassLoader)
    52. userThread.setName("Driver")
    53. userThread.start()
    54. userThread
    55. }

    ①userClassLoader.loadClass(args.userClass)      .getMethod

    使用类加载器加载userClass,并获取其main方法

    ②new Thread

    创建一个线程

    ③userThread.setName("Driver")    userThread.start()

    设置线程名为Driver并启动

    启动之后就调用run方法

    先判断main方法是否是静态的,不是静态报错,是静态就调用。这时候就是执行我们编写的代码里的main方法并执行初始化完成了SparkContext那一步,这时候SparkContext初始化获得了,那么前面提到的阻塞线程就等待成功,可以继续往下走了

    总览

    这一期涉及到的源码流程图如下:

  • 相关阅读:
    Linux CentOS7 vim寄存器
    力扣(LeetCode)301. 删除无效的括号(2022.10.29)
    【Linux】基础:进程控制
    程序本地能运行,服务器上不能(以flink为例)
    天玑8000和骁龙870哪个处理器好?
    Docker进阶——再次认识docker的概念 & Docker的结构 & Docker镜像结构 & 镜像的构建方式
    Unity游戏框架搭建
    准备用HashMap存1W条数据,构造时传10000还会触发扩容吗?存1000呢?
    LeetCode 刷题系列 -- 1425. 带限制的子序列和
    Evil.js源码解读
  • 原文地址:https://blog.csdn.net/emttxdy/article/details/124680315