diff --git a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala index df5dfdf7deafb..ccbe203aada89 100644 --- a/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala +++ b/connector/docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/v2/V2JDBCTest.scala @@ -81,7 +81,7 @@ private[v2] trait V2JDBCTest // nullable is true in the expectedSchema because Spark always sets nullable to true // regardless of the JDBC metadata https://github.com/apache/spark/pull/18445 var expectedSchema = new StructType().add("ID", StringType, true, defaultMetadata()) - // If function is not overriden we don't want to compare external engine types + // If function is not overridden we don't want to compare external engine types var expectedSchemaWithoutJdbcClientType = removeMetadataFromAllFields(expectedSchema, "jdbcClientType") var schemaWithoutJdbcClientType = @@ -91,7 +91,7 @@ private[v2] trait V2JDBCTest t = spark.table(s"$catalogName.alt_table") expectedSchema = new StructType().add("ID", StringType, true, defaultMetadata()) - // If function is not overriden we don't want to compare external engine types + // If function is not overridden we don't want to compare external engine types expectedSchemaWithoutJdbcClientType = removeMetadataFromAllFields(expectedSchema, "jdbcClientType") schemaWithoutJdbcClientType = @@ -118,7 +118,7 @@ private[v2] trait V2JDBCTest .add("ID1", StringType, true, defaultMetadata()) .add("ID2", StringType, true, defaultMetadata()) - // If function is not overriden we don't want to compare external engine types + // If function is not overridden we don't want to compare external engine types val expectedSchemaWithoutJdbcClientType = removeMetadataFromAllFields(expectedSchema, "jdbcClientType") val schemaWithoutJdbcClientType = diff --git a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala index 69e0a10a34b28..aa93ece3b49e5 100644 --- a/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala +++ b/core/src/main/scala/org/apache/spark/scheduler/TaskSetManager.scala @@ -1486,7 +1486,7 @@ private[scheduler] case class BarrierPendingLaunchTask( taskLocality: TaskLocality.TaskLocality, assignedResources: Map[String, Map[String, Long]]) { // Stored the corresponding index of the WorkerOffer which is responsible to launch the task. - // Used to revert the assigned resources (e.g., cores, custome resources) when the barrier + // Used to revert the assigned resources (e.g., cores, custom resources) when the barrier // task set doesn't launch successfully in a single resourceOffers round. var assignedOfferIndex: Int = _ var assignedCores: Int = 0 diff --git a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala index 09655007acb34..8d34b6eaf5126 100644 --- a/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala +++ b/resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/Client.scala @@ -855,7 +855,7 @@ private[spark] class Client( * The archive also contains some Spark configuration. Namely, it saves the contents of * SparkConf in a file to be loaded by the AM process. * - * @param confsToOverride configs that should overriden when creating the final spark conf file + * @param confsToOverride configs that should overridden when creating the final spark conf file */ private def createConfArchive(confsToOverride: Map[String, String]): File = { val hadoopConfFiles = new HashMap[String, File]() diff --git a/sql/api/src/main/scala/org/apache/spark/sql/internal/columnNodes.scala b/sql/api/src/main/scala/org/apache/spark/sql/internal/columnNodes.scala index 4a7339165cb57..4b5db024e617a 100644 --- a/sql/api/src/main/scala/org/apache/spark/sql/internal/columnNodes.scala +++ b/sql/api/src/main/scala/org/apache/spark/sql/internal/columnNodes.scala @@ -579,7 +579,7 @@ private[sql] case class UpdateFields( /** * Evaluate one or more conditional branches. The value of the first branch for which the - * predicate evalutes to true is returned. If none of the branches evaluate to true, the value of + * predicate evaluates to true is returned. If none of the branches evaluate to true, the value of * `otherwise` is returned. * * @param branches diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala index dc3e6dcbd388c..bbf035f45d29c 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/collectionOperations.scala @@ -2575,7 +2575,7 @@ case class Get(left: Expression, right: Expression) override def inputTypes: Seq[AbstractDataType] = left.dataType match { case _: ArrayType => Seq(ArrayType, IntegerType) - // Do not apply implicit cast if the first arguement is not array type. + // Do not apply implicit cast if the first argument is not array type. case _ => Nil } diff --git a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/streaming/WriteToStreamStatement.scala b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/streaming/WriteToStreamStatement.scala index 7015d0dd3b2cc..fe061eddd22a5 100644 --- a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/streaming/WriteToStreamStatement.scala +++ b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/streaming/WriteToStreamStatement.scala @@ -26,7 +26,7 @@ import org.apache.spark.sql.connector.catalog.{Identifier, Table, TableCatalog} import org.apache.spark.sql.streaming.{OutputMode, Trigger} /** - * A statement for Stream writing. It contains all neccessary param and will be resolved in the + * A statement for Stream writing. It contains all necessary param and will be resolved in the * rule [[ResolveStreamWrite]]. * * @param userSpecifiedName Query name optionally specified by the user. diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreSuite.scala index 3e4b4b7320f53..b1fd7b2d5e55c 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/RocksDBStateStoreSuite.scala @@ -3052,7 +3052,7 @@ class RocksDBStateStoreSuite extends StateStoreSuiteBase[RocksDBStateStoreProvid val threadId = threadInfo.get.threadRef.get.get.getId assert( threadId == Thread.currentThread().getId, - s"acquired thread should be curent thread ${Thread.currentThread().getId} " + + s"acquired thread should be current thread ${Thread.currentThread().getId} " + s"after load but was $threadId") } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala index 7570cf942f061..a362d418cfbe7 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/streaming/state/StateStoreSuite.scala @@ -2209,7 +2209,7 @@ abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider] version = store.commit() } - // Reload the store from the commited version and repeat the above test. + // Reload the store from the committed version and repeat the above test. tryWithProviderResource(newStoreProvider(storeId, colFamiliesEnabled)) { provider => assert(version > 0) val store = provider.getStore(version) diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamRealTimeModeE2ESuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamRealTimeModeE2ESuite.scala index 3615edc75cb2c..7854c90563332 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamRealTimeModeE2ESuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamRealTimeModeE2ESuite.scala @@ -159,16 +159,16 @@ class StreamRealTimeModeE2ESuite extends StreamRealTimeModeE2ESuiteBase { .batchId should be(i + 1) query.lastProgress.sources(0).numInputRows should be(numRows * 3) - val commitedResults = new mutable.ListBuffer[String]() + val committedResults = new mutable.ListBuffer[String]() val numPartitions = if (withUnion) 10 else 5 for (v <- 0 until numPartitions) { val it = ResultsCollector.get(s"$uniqueSinkName-${i}-$v").iterator() while (it.hasNext) { - commitedResults += it.next() + committedResults += it.next() } } - commitedResults.sorted should equal(expectedResultsByBatch(i).sorted) + committedResults.sorted should equal(expectedResultsByBatch(i).sorted) } } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamRealTimeModeSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamRealTimeModeSuite.scala index 2ff8fd90bb9c9..f81dc44dcacbd 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamRealTimeModeSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamRealTimeModeSuite.scala @@ -362,7 +362,7 @@ class StreamRealTimeModeWithManualClockSuite extends StreamRealTimeModeManualClo } test("purge offsetLog when it doesn't match with the commit log") { - // Simulate when the query fails after commiting the offset log but before the commit log + // Simulate when the query fails after committing the offset log but before the commit log // by manually deleting the last entry of the commit log. val inputData = LowLatencyMemoryStream[Int](1) val mapped = inputData.toDS().map(_ + 1)