class
SparkCluster extends EmrCluster
Value Members
-
final
def
!=(arg0: AnyRef): Boolean
-
final
def
!=(arg0: Any): Boolean
-
final
def
##(): Int
-
final
def
==(arg0: AnyRef): Boolean
-
final
def
==(arg0: Any): Boolean
-
-
-
val
additionalMasterSecurityGroupIds: Seq[String]
-
val
additionalSlaveSecurityGroupIds: Seq[String]
-
val
amiVersion: String
-
final
def
asInstanceOf[T0]: T0
-
val
availabilityZone: Option[String]
-
val
bootstrapAction: Seq[String]
-
def
clone(): AnyRef
-
def
copy(id: PipelineObjectId = id, bootstrapAction: Seq[String] = bootstrapAction, amiVersion: String = amiVersion, masterInstanceType: Option[String] = masterInstanceType, coreInstanceType: Option[String] = coreInstanceType, coreInstanceCount: Int = coreInstanceCount, taskInstanceType: Option[String] = taskInstanceType, taskInstanceCount: Int = taskInstanceCount, taskInstanceBidPrice: Option[Double] = taskInstanceBidPrice, terminateAfter: String = terminateAfter, keyPair: Option[String] = keyPair, region: Option[String] = region, sparkVersion: String = sparkVersion, enableDebugging: Option[Boolean] = enableDebugging, supportedProducts: Option[String] = supportedProducts, subnetId: Option[String] = subnetId, role: Option[String] = role, resourceRole: Option[String] = resourceRole, availabilityZone: Option[String] = availabilityZone, coreInstanceBidPrice: Option[Double] = coreInstanceBidPrice, masterInstanceBidPrice: Option[Double] = masterInstanceBidPrice, useOnDemandOnLastAttempt: Option[Boolean] = useOnDemandOnLastAttempt, visibleToAllUsers: Option[Boolean] = visibleToAllUsers, masterSecurityGroupId: Option[String] = masterSecurityGroupId, slaveSecurityGroupId: Option[String] = slaveSecurityGroupId, additionalMasterSecurityGroupIds: Seq[String] = additionalMasterSecurityGroupIds, additionalSlaveSecurityGroupIds: Seq[String] = additionalSlaveSecurityGroupIds, hadoopSchedulerType: Option[SchedulerType] = hadoopSchedulerType, actionOnResourceFailure: Option[ActionOnResourceFailure] = actionOnResourceFailure, actionOnTaskFailure: Option[ActionOnTaskFailure] = actionOnTaskFailure): SparkCluster
-
val
coreInstanceBidPrice: Option[Double]
-
val
coreInstanceCount: Int
-
val
coreInstanceType: Option[String]
-
val
enableDebugging: Option[Boolean]
-
final
def
eq(arg0: AnyRef): Boolean
-
def
equals(arg0: Any): Boolean
-
def
finalize(): Unit
-
final
def
getClass(): Class[_]
-
def
groupedBy(group: String): SparkCluster
-
val
hadoopSchedulerType: Option[SchedulerType]
-
def
hashCode(): Int
-
-
-
lazy val
instanceCount: Int
-
final
def
isInstanceOf[T0]: Boolean
-
val
keyPair: Option[String]
-
val
masterInstanceBidPrice: Option[Double]
-
val
masterInstanceType: Option[String]
-
val
masterSecurityGroupId: Option[String]
-
def
named(name: String): SparkCluster
-
final
def
ne(arg0: AnyRef): Boolean
-
final
def
notify(): Unit
-
final
def
notifyAll(): Unit
-
-
-
val
region: Option[String]
-
val
resourceRole: Option[String]
-
val
role: Option[String]
-
implicit
def
seq2Option[A](anySeq: Seq[A]): Option[Seq[A]]
-
def
seqToOption[A, B](anySeq: Seq[A])(transform: (A) ⇒ B): Option[Seq[B]]
-
-
val
slaveSecurityGroupId: Option[String]
-
val
sparkVersion: String
-
lazy val
standardBootstrapAction: Seq[String]
-
val
subnetId: Option[String]
-
val
supportedProducts: Option[String]
-
final
def
synchronized[T0](arg0: ⇒ T0): T0
-
val
taskInstanceBidPrice: Option[Double]
-
val
taskInstanceCount: Int
-
val
taskInstanceType: Option[String]
-
val
terminateAfter: String
-
def
terminatingAfter(terminateAfter: String): SparkCluster
-
def
toString(): String
-
implicit
def
uniquePipelineId2String(id: PipelineObjectId): String
-
val
useOnDemandOnLastAttempt: Option[Boolean]
-
val
visibleToAllUsers: Option[Boolean]
-
final
def
wait(): Unit
-
final
def
wait(arg0: Long, arg1: Int): Unit
-
final
def
wait(arg0: Long): Unit
-
-
-
def
withAdditionalMasterSecurityGroupIds(securityGroupId: String*): SparkCluster
-
def
withAdditionalSlaveSecurityGroupIds(securityGroupId: String*): SparkCluster
-
def
withAmiVersion(ver: String): SparkCluster
-
def
withAvailabilityZone(availabilityZone: String): SparkCluster
-
def
withBootstrapAction(action: String*): SparkCluster
-
def
withCoreInstanceBidPrice(coreInstanceBidPrice: Double): SparkCluster
-
def
withCoreInstanceCount(instanceCount: Int): SparkCluster
-
def
withCoreInstanceType(instanceType: String): SparkCluster
-
def
withDebuggingEnabled(): SparkCluster
-
def
withHadoopSchedulerType(hadoopSchedulerType: SchedulerType): SparkCluster
-
def
withKeyPair(keyPair: String): SparkCluster
-
def
withMasterInstanceBidPrice(masterInstanceBidPrice: Double): SparkCluster
-
def
withMasterInstanceType(instanceType: String): SparkCluster
-
def
withMasterSecurityGroupId(masterSecurityGroupId: String): SparkCluster
-
def
withRegion(region: String): SparkCluster
-
def
withResourceRole(role: String): SparkCluster
-
def
withRole(role: String): SparkCluster
-
def
withSlaveSecurityGroupId(slaveSecurityGroupId: String): SparkCluster
-
def
withSparkVersion(sparkVersion: String): SparkCluster
-
def
withSubnetId(id: String): SparkCluster
-
def
withSupportedProducts(products: String): SparkCluster
-
def
withTaskInstanceBidPrice(bid: Double): SparkCluster
-
def
withTaskInstanceCount(instanceCount: Int): SparkCluster
-
def
withTaskInstanceType(instanceType: String): SparkCluster
-
def
withUseOnDemandOnLastAttempt(useOnDemandOnLastAttempt: Boolean): SparkCluster
-
def
withVisibleToAllUsers(visibleToAllUsers: Boolean): SparkCluster
Inherited from AnyRef
Inherited from Any
Launch a Spark cluster