Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ private[v2] trait V2JDBCTest
// nullable is true in the expectedSchema because Spark always sets nullable to true
// regardless of the JDBC metadata https://github.com/apache/spark/pull/18445
var expectedSchema = new StructType().add("ID", StringType, true, defaultMetadata())
// If function is not overriden we don't want to compare external engine types
// If function is not overridden we don't want to compare external engine types
var expectedSchemaWithoutJdbcClientType =
removeMetadataFromAllFields(expectedSchema, "jdbcClientType")
var schemaWithoutJdbcClientType =
Expand All @@ -91,7 +91,7 @@ private[v2] trait V2JDBCTest
t = spark.table(s"$catalogName.alt_table")
expectedSchema = new StructType().add("ID", StringType, true, defaultMetadata())

// If function is not overriden we don't want to compare external engine types
// If function is not overridden we don't want to compare external engine types
expectedSchemaWithoutJdbcClientType =
removeMetadataFromAllFields(expectedSchema, "jdbcClientType")
schemaWithoutJdbcClientType =
Expand All @@ -118,7 +118,7 @@ private[v2] trait V2JDBCTest
.add("ID1", StringType, true, defaultMetadata())
.add("ID2", StringType, true, defaultMetadata())

// If function is not overriden we don't want to compare external engine types
// If function is not overridden we don't want to compare external engine types
val expectedSchemaWithoutJdbcClientType =
removeMetadataFromAllFields(expectedSchema, "jdbcClientType")
val schemaWithoutJdbcClientType =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1486,7 +1486,7 @@ private[scheduler] case class BarrierPendingLaunchTask(
taskLocality: TaskLocality.TaskLocality,
assignedResources: Map[String, Map[String, Long]]) {
// Stored the corresponding index of the WorkerOffer which is responsible to launch the task.
// Used to revert the assigned resources (e.g., cores, custome resources) when the barrier
// Used to revert the assigned resources (e.g., cores, custom resources) when the barrier
// task set doesn't launch successfully in a single resourceOffers round.
var assignedOfferIndex: Int = _
var assignedCores: Int = 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -855,7 +855,7 @@ private[spark] class Client(
* The archive also contains some Spark configuration. Namely, it saves the contents of
* SparkConf in a file to be loaded by the AM process.
*
* @param confsToOverride configs that should overriden when creating the final spark conf file
* @param confsToOverride configs that should overridden when creating the final spark conf file
*/
private def createConfArchive(confsToOverride: Map[String, String]): File = {
val hadoopConfFiles = new HashMap[String, File]()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -579,7 +579,7 @@ private[sql] case class UpdateFields(

/**
* Evaluate one or more conditional branches. The value of the first branch for which the
* predicate evalutes to true is returned. If none of the branches evaluate to true, the value of
* predicate evaluates to true is returned. If none of the branches evaluate to true, the value of
* `otherwise` is returned.
*
* @param branches
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2575,7 +2575,7 @@ case class Get(left: Expression, right: Expression)

override def inputTypes: Seq[AbstractDataType] = left.dataType match {
case _: ArrayType => Seq(ArrayType, IntegerType)
// Do not apply implicit cast if the first arguement is not array type.
// Do not apply implicit cast if the first argument is not array type.
case _ => Nil
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ import org.apache.spark.sql.connector.catalog.{Identifier, Table, TableCatalog}
import org.apache.spark.sql.streaming.{OutputMode, Trigger}

/**
* A statement for Stream writing. It contains all neccessary param and will be resolved in the
* A statement for Stream writing. It contains all necessary param and will be resolved in the
* rule [[ResolveStreamWrite]].
*
* @param userSpecifiedName Query name optionally specified by the user.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3052,7 +3052,7 @@ class RocksDBStateStoreSuite extends StateStoreSuiteBase[RocksDBStateStoreProvid
val threadId = threadInfo.get.threadRef.get.get.getId
assert(
threadId == Thread.currentThread().getId,
s"acquired thread should be curent thread ${Thread.currentThread().getId} " +
s"acquired thread should be current thread ${Thread.currentThread().getId} " +
s"after load but was $threadId")
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2209,7 +2209,7 @@ abstract class StateStoreSuiteBase[ProviderClass <: StateStoreProvider]
version = store.commit()
}

// Reload the store from the commited version and repeat the above test.
// Reload the store from the committed version and repeat the above test.
tryWithProviderResource(newStoreProvider(storeId, colFamiliesEnabled)) { provider =>
assert(version > 0)
val store = provider.getStore(version)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -159,16 +159,16 @@ class StreamRealTimeModeE2ESuite extends StreamRealTimeModeE2ESuiteBase {
.batchId should be(i + 1)
query.lastProgress.sources(0).numInputRows should be(numRows * 3)

val commitedResults = new mutable.ListBuffer[String]()
val committedResults = new mutable.ListBuffer[String]()
val numPartitions = if (withUnion) 10 else 5
for (v <- 0 until numPartitions) {
val it = ResultsCollector.get(s"$uniqueSinkName-${i}-$v").iterator()
while (it.hasNext) {
commitedResults += it.next()
committedResults += it.next()
}
}

commitedResults.sorted should equal(expectedResultsByBatch(i).sorted)
committedResults.sorted should equal(expectedResultsByBatch(i).sorted)
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -362,7 +362,7 @@ class StreamRealTimeModeWithManualClockSuite extends StreamRealTimeModeManualClo
}

test("purge offsetLog when it doesn't match with the commit log") {
// Simulate when the query fails after commiting the offset log but before the commit log
// Simulate when the query fails after committing the offset log but before the commit log
// by manually deleting the last entry of the commit log.
val inputData = LowLatencyMemoryStream[Int](1)
val mapped = inputData.toDS().map(_ + 1)
Expand Down