spark-dev mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From CodingCat <...@git.apache.org>
Subject [GitHub] incubator-spark pull request: [SPARK-1092] print warning informati...
Date Sat, 15 Feb 2014 14:28:35 GMT
Github user CodingCat commented on the pull request:

    https://github.com/apache/incubator-spark/pull/602#issuecomment-35157378
  
    Hi, @mridulm , I think it will be used in local, mesos, and standalone mode
    
    1. local
    
    case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) =>
            // Check to make sure memory requested <= memoryPerSlave. Otherwise Spark will
just hang.
            val memoryPerSlaveInt = memoryPerSlave.toInt
            if (sc.executorMemory > memoryPerSlaveInt) {
              throw new SparkException(
                "Asked to launch cluster with %d MB RAM / worker but requested %d MB/worker".format(
                  memoryPerSlaveInt, sc.executorMemory))
            }
    
            val scheduler = new TaskSchedulerImpl(sc)
            val localCluster = new LocalSparkCluster(
              numSlaves.toInt, coresPerSlave.toInt, memoryPerSlaveInt)
            val masterUrls = localCluster.start()
            val backend = new SparkDeploySchedulerBackend(scheduler, sc, masterUrls, appName)
            scheduler.initialize(backend)
            backend.shutdownCallback = (backend: SparkDeploySchedulerBackend) => {
              localCluster.stop()
            }
            scheduler
    
    2. standalone (SparkDeployClusterBackend.scala)
    
    override def start() {
        super.start()
    
        // The endpoint for executors to talk to us
        val driverUrl = "akka.tcp://spark@%s:%s/user/%s".format(
          conf.get("spark.driver.host"),  conf.get("spark.driver.port"),
          CoarseGrainedSchedulerBackend.ACTOR_NAME)
        val args = Seq(driverUrl, "{{EXECUTOR_ID}}", "{{HOSTNAME}}", "{{CORES}}", "{{WORKER_URL}}")
        val command = Command(
          "org.apache.spark.executor.CoarseGrainedExecutorBackend", args, sc.executorEnvs)
        val sparkHome = sc.getSparkHome()
        val appDesc = new ApplicationDescription(appName, maxCores, sc.executorMemory, command,
          sparkHome, "http://" + sc.ui.appUIAddress)
    
        client = new AppClient(sc.env.actorSystem, masters, appDesc, this, conf)
        client.start()
      }
    
    3. CoarseMesosSchedulerBackend.scala
    
    resourceOffers(d: SchedulerDriver, offers: JList[Offer])


If your project is set up for it, you can reply to this email and have your
reply appear on GitHub as well. To do so, please top-post your response.
If your project does not have this feature enabled and wishes so, or if the
feature is enabled but not working, please contact infrastructure at
infrastructure@apache.org or file a JIRA ticket with INFRA.

Mime
View raw message