diff --git a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala index 6f1130853c5a1..949865bf94f2b 100644 --- a/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala +++ b/resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/Config.scala @@ -65,10 +65,10 @@ private[spark] object Config extends Logging { val KUBERNETES_USE_LEGACY_PVC_ACCESS_MODE = ConfigBuilder("spark.kubernetes.legacy.useReadWriteOnceAccessMode") - .internal() - .doc("If true, use ReadWriteOnce instead of ReadWriteOncePod as persistence volume " + - "access mode.") - .version("3.4.3") + .doc("If true, use ReadWriteOnce instead of ReadWriteOncePod as the access mode for " + + "dynamically created PersistentVolumeClaims. Set this to true when using a storage " + + "class or Kubernetes cluster that does not support the ReadWriteOncePod access mode.") + .version("4.2.0") .booleanConf .createWithDefault(false) diff --git a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStepSuite.scala b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStepSuite.scala index 293773ddb9ec5..4afb4ba7798f0 100644 --- a/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStepSuite.scala +++ b/resource-managers/kubernetes/core/src/test/scala/org/apache/spark/deploy/k8s/features/MountVolumesFeatureStepSuite.scala @@ -18,8 +18,12 @@ package org.apache.spark.deploy.k8s.features import scala.jdk.CollectionConverters._ +import io.fabric8.kubernetes.api.model.PersistentVolumeClaim + +import org.apache.spark.SparkConf import org.apache.spark.SparkFunSuite import org.apache.spark.deploy.k8s._ +import org.apache.spark.deploy.k8s.Config.KUBERNETES_USE_LEGACY_PVC_ACCESS_MODE class MountVolumesFeatureStepSuite extends SparkFunSuite { test("Mounts hostPath volumes") { @@ -540,6 +544,48 @@ class MountVolumesFeatureStepSuite extends SparkFunSuite { assert(executorPVC.getClaimName.endsWith("-exec-1-pvc-0")) } + test("SPARK-55330: OnDemand PVC uses ReadWriteOncePod access mode by default") { + val volumeConf = KubernetesVolumeSpec( + "testVolume", + "/tmp", + "", + "", + false, + KubernetesPVCVolumeConf(MountVolumesFeatureStep.PVC_ON_DEMAND, + storageClass = Some("fast"), + size = Some("1Gi")) + ) + val executorConf = KubernetesTestConf.createExecutorConf(volumes = Seq(volumeConf)) + val step = new MountVolumesFeatureStep(executorConf) + step.configurePod(SparkPod.initialPod()) + val pvcs = step.getAdditionalKubernetesResources().map(_.asInstanceOf[PersistentVolumeClaim]) + assert(pvcs.size === 1) + assert(pvcs.head.getSpec.getAccessModes.asScala === Seq("ReadWriteOncePod")) + } + + test("SPARK-55330: OnDemand PVC uses ReadWriteOnce when legacy access mode is enabled") { + val volumeConf = KubernetesVolumeSpec( + "testVolume", + "/tmp", + "", + "", + false, + KubernetesPVCVolumeConf(MountVolumesFeatureStep.PVC_ON_DEMAND, + storageClass = Some("fast"), + size = Some("1Gi")) + ) + val sparkConf = new SparkConf(false) + .set(KUBERNETES_USE_LEGACY_PVC_ACCESS_MODE, true) + val executorConf = KubernetesTestConf.createExecutorConf( + sparkConf = sparkConf, + volumes = Seq(volumeConf)) + val step = new MountVolumesFeatureStep(executorConf) + step.configurePod(SparkPod.initialPod()) + val pvcs = step.getAdditionalKubernetesResources().map(_.asInstanceOf[PersistentVolumeClaim]) + assert(pvcs.size === 1) + assert(pvcs.head.getSpec.getAccessModes.asScala === Seq("ReadWriteOnce")) + } + test("SPARK-49833: Mount multiple volumes to executor with annotations") { val pvcVolumeConf1 = KubernetesVolumeSpec( "checkpointVolume1",