Skip to content

Commit 89f549f

Browse files
committed
renaming jupyter integration
1 parent f2f6f4a commit 89f549f

File tree

3 files changed

+7
-7
lines changed

3 files changed

+7
-7
lines changed

README.md

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -80,12 +80,12 @@ The Kotlin Spark API also supports Kotlin Jupyter notebooks.
8080
To it, simply add
8181

8282
```jupyterpython
83-
%use kotlin-spark-api
83+
%use spark
8484
```
8585
to the top of your notebook. This will get the latest version of the API, together with the latest version of Spark.
8686
To define a certain version of Spark or the API itself, simply add it like this:
8787
```jupyterpython
88-
%use kotlin-spark-api(spark=3.2, v=1.1.0)
88+
%use spark(spark=3.2, v=1.1.0)
8989
```
9090

9191
Inside the notebook a Spark session will be initiated automatically. This can be accessed via the `spark` value.
@@ -95,7 +95,7 @@ There is also support for HTML rendering of Datasets and simple (Java)RDDs.
9595

9696
To use Spark Streaming abilities, instead use
9797
```jupyterpython
98-
%use kotlin-spark-api-streaming
98+
%use spark-streaming
9999
```
100100
This does not start a Spark session right away, meaning you can call `withSparkStreaming(batchDuration) {}`
101101
in whichever cell you want.

jupyter/src/main/kotlin/org/jetbrains/kotlinx/spark/api/jupyter/SparkIntegration.kt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import org.intellij.lang.annotations.Language
2424
import org.jetbrains.kotlinx.jupyter.api.KotlinKernelHost
2525

2626
/**
27-
* %use kotlin-spark-api
27+
* %use spark
2828
*/
2929
@Suppress("UNUSED_VARIABLE", "LocalVariableName")
3030
@OptIn(ExperimentalStdlibApi::class)
@@ -49,7 +49,7 @@ internal class SparkIntegration : Integration() {
4949
org.apache.spark.api.java.JavaSparkContext(spark.sparkContext)
5050
}""".trimIndent(),
5151
"""
52-
println("Spark session has been started and is running. No `withSpark { }` necessary, you can access `spark` and `sc` directly. To use Spark streaming, use `%use kotlin-spark-api-streaming` instead.")""".trimIndent(),
52+
println("Spark session has been started and is running. No `withSpark { }` necessary, you can access `spark` and `sc` directly. To use Spark streaming, use `%use spark-streaming` instead.")""".trimIndent(),
5353
"""
5454
inline fun <reified T> List<T>.toDS(): Dataset<T> = toDS(spark)""".trimIndent(),
5555
"""

jupyter/src/main/kotlin/org/jetbrains/kotlinx/spark/api/jupyter/SparkStreamingIntegration.kt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ import scala.collection.Iterable as ScalaIterable
4242
import scala.collection.Iterator as ScalaIterator
4343

4444
/**
45-
* %use kotlin-spark-api-streaming
45+
* %use spark-streaming
4646
*/
4747
@Suppress("UNUSED_VARIABLE", "LocalVariableName")
4848
@OptIn(ExperimentalStdlibApi::class)
@@ -54,7 +54,7 @@ internal class SparkStreamingIntegration : Integration() {
5454
@Language("kts")
5555
val _1 = listOf(
5656
"""
57-
println("To start a spark streaming session, simply use `withSparkStreaming { }` inside a cell. To use Spark normally, use `withSpark { }` in a cell, or use `%use kotlin-spark-api` to start a Spark session for the whole notebook.")""".trimIndent(),
57+
println("To start a spark streaming session, simply use `withSparkStreaming { }` inside a cell. To use Spark normally, use `withSpark { }` in a cell, or use `%use spark` to start a Spark session for the whole notebook.")""".trimIndent(),
5858
).map(::execute)
5959
}
6060
}

0 commit comments

Comments
 (0)