diff --git a/antora.yml b/antora.yml index 2e000c4..6e6bf37 100644 --- a/antora.yml +++ b/antora.yml @@ -10,9 +10,16 @@ asciidoc: attributes: company: 'DataStax' protocol_version: '2.10' - pulsar-operator: 'KAAP Operator' - pulsar-operator-full-name: 'Kubernetes Autoscaling for Apache Pulsar (KAAP)' + product-short: 'KAAP Operator' + product: 'Kubernetes Autoscaling for Apache Pulsar (KAAP)' pulsar-stack: 'KAAP stack' pulsar: 'Apache Pulsar' pulsar-short: 'Pulsar' - pulsar-reg: 'Apache Pulsar(TM)' \ No newline at end of file + pulsar-reg: 'Apache Pulsar(TM)' + starlight-kafka: 'Starlight for Kafka' + kafka-reg: 'Apache Kafka(R)' + kafka-short: 'Kafka' + cr: 'custom resource (CR)' + cr-short: 'CR' + crd: 'custom resource definition (CRD)' + crd-short: 'CRD' \ No newline at end of file diff --git a/local-preview-playbook.yml b/local-preview-playbook.yml index df413b4..69689d4 100644 --- a/local-preview-playbook.yml +++ b/local-preview-playbook.yml @@ -54,23 +54,176 @@ asciidoc: xrefstyle: short # CUSTOM ATTRIBUTES company: 'DataStax' + trust-center: 'IBM Trust Center' + trust-center-url: 'https://www.ibm.com/trust' + trust-center-link: '{trust-center-url}[{trust-center}]' + support-url: 'https://www.ibm.com/mysupport/s/' + dsbulk: 'DataStax Bulk Loader (DSBulk)' + dsbulk-short: 'DSBulk' + dsbulk-repo: 'https://github.com/datastax/dsbulk' + astra: 'Astra' + astra-db: 'Astra DB' + astra-ui: 'Astra Portal' + astra-url: 'https://astra.datastax.com' + astra-ui-link: '{astra-url}[{astra-ui}^]' + db-classic: 'Managed Cluster' + db-serverless: 'Serverless (non-vector)' + db-serverless-vector: 'Serverless (vector)' + scb: 'Secure Connect Bundle (SCB)' + scb-short: 'SCB' + scb-brief: 'Secure Connect Bundle' + devops-api: 'DevOps API' + devops-api-ref-url: 'xref:astra-api-docs:ROOT:attachment$devops-api/index.html' + astra-cli: 'Astra CLI' + astra-stream: 'Astra Streaming' + starlight-kafka: 'Starlight for Kafka' + starlight-rabbitmq: 'Starlight for RabbitMQ' + astra-streaming-examples-repo: 'https://github.com/datastax/astra-streaming-examples' + sstable-sideloader: '{astra-db} Sideloader' + zdm: 'Zero Downtime Migration' + zdm-short: 'ZDM' + zdm-proxy: 'ZDM Proxy' + cass-migrator: 'Cassandra Data Migrator (CDM)' + cass-migrator-short: 'CDM' + hcd: 'Hyper-Converged Database (HCD)' + hcd-short: 'HCD' + dse: 'DataStax Enterprise (DSE)' + dse-short: 'DSE' + metrics-collector: 'DSE Metrics Collector' + mc: 'Mission Control' + opscenter: 'DSE OpsCenter' + studio: 'DataStax Studio' + cass-reg: 'Apache Cassandra(R)' + cass: 'Apache Cassandra' + cass-short: 'Cassandra' + cql: 'Cassandra Query Language (CQL)' + cql-shell: 'CQL shell' + cql-console: 'CQL console' + cql-service: 'CQL Service' + pulsar-reg: 'Apache Pulsar(TM)' + pulsar: 'Apache Pulsar' + pulsar-short: 'Pulsar' + spark-reg: 'Apache Spark(TM)' + spark: 'Apache Spark' + spark-short: 'Spark' + spark-connect: 'Spark Connect' + spark-connector: 'Apache Cassandra Spark Connector' + spark-connector-short: 'Spark Connector' + kafka-reg: 'Apache Kafka(R)' + kafka: 'Apache Kafka' + kafka-short: 'Kafka' + kafka-connect: 'Kafka Connect' + kafka-connector: 'DataStax Apache Kafka Connector' + kafka-connector-short: 'Kafka Connector' + solr-reg: 'Apache Solr(TM)' + solr: 'Apache Solr' + solr-short: 'Solr' + lucene-reg: 'Apache Lucene(TM)' + lucene: 'Apache Lucene' + lucene-short: 'Lucene' + hadoop-reg: 'Apache Hadoop(R)' + hadoop: 'Apache Hadoop' + hadoop-short: 'Hadoop' + airflow-reg: 'Apache Airflow(R)' + airflow: 'Apache Airflow' + airflow-short: 'Airflow' + maven-reg: 'Apache Maven(TM)' + maven: 'Apache Maven' + maven-short: 'Maven' + flink-reg: 'Apache Flink(R)' + flink: 'Apache Flink' + flink-short: 'Flink' + beam-reg: 'Apache Beam(R)' + beam: 'Apache Beam' + beam-short: 'Beam' + geode-reg: 'Apache Geode(TM)' + geode: 'Apache Geode' + geode-short: 'Geode' + hbase-reg: 'Apache HBase(R)' + hbase: 'Apache HBase' + hbase-short: 'HBase' + kudu-reg: 'Apache Kudu(TM)' + kudu: 'Apache Kudu' + kudu-short: 'Kudu' + phoenix-reg: 'Apache Phoenix(TM)' + phoenix: 'Apache Phoenix' + phoenix-short: 'Phoenix' + zookeeper-reg: 'Apache ZooKeeper(TM)' + zookeeper: 'Apache ZooKeeper' + zookeeper-short: 'ZooKeeper' + asf: 'Apache Software Foundation (ASF)' + asf-short: 'ASF' + tinkerpop-reg: 'Apache TinkerPop(TM)' + tinkerpop: 'Apache TinkerPop' + tinkerpop-short: 'TinkerPop' + cloudstack-reg: 'Apache CloudStack(R)' + cloudstack: 'Apache CloudStack' + cloudstack-short: 'CloudStack' + tomcat-reg: 'Apache Tomcat(R)' + tomcat: 'Apache Tomcat' + tomcat-short: 'Tomcat' + ajp: 'Apache JServ Protocol (AJP)' + ajp-short: 'AJP' + activemq-reg: 'Apache ActiveMQ(R)' + activemq: 'Apache ActiveMQ' + activemq-short: 'ActiveMQ' + tomee-reg: 'Apache TomEE(TM)' + tomee: 'Apache TomEE' + tomee-short: 'TomEE' + bookkeeper-reg: 'Apache BookKeeper(TM)' + bookkeeper: 'Apache BookKeeper' + bookkeeper-short: 'BookKeeper' + groovy-reg: 'Apache Groovy(TM)' + groovy: 'Apache Groovy' + groovy-short: 'Groovy' + cpp-driver-url: 'https://github.com/datastax/cpp-driver' + csharp-driver-url: 'https://github.com/datastax/csharp-driver' + gocql-astra-url: 'https://github.com/datastax/gocql-astra' + go-driver-url: 'https://github.com/apache/cassandra-gocql-driver' + cql-proxy-url: 'https://github.com/datastax/cql-proxy' + java-driver-url: 'https://github.com/apache/cassandra-java-driver' + nodejs-driver-url: 'https://github.com/datastax/nodejs-driver' + python-driver-url: 'https://github.com/datastax/python-driver' + scala-driver-url: 'https://github.com/apache/cassandra-spark-connector' + cass-driver-cpp-shield: 'image:https://img.shields.io/github/v/tag/datastax/cpp-driver?label=latest[alt="Latest cpp-driver release on GitHub",link="{cpp-driver-url}/tags"]' + cass-driver-csharp-shield: 'image:https://img.shields.io/nuget/v/CassandraCSharpDriver?label=latest[alt="Latest CassandraCSharpDriver release on NuGet",link="https://www.nuget.org/packages/CassandraCSharpDriver"]' + cass-driver-go-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-gocql-driver?label=latest%20gocql[alt="Latest gocql release on GitHub",link="{go-driver-url}/tags"]' + cass-driver-java-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-java-driver?label=latest[alt="Latest cassandra-java-driver release on GitHub",link="{java-driver-url}/tags"]' + cass-driver-nodejs-shield: 'image:https://img.shields.io/github/v/tag/datastax/nodejs-driver?label=latest[alt="Latest nodejs-driver release on GitHub",link="{nodejs-driver-url}/tags"]' + cass-driver-python-shield: 'image:https://img.shields.io/github/v/tag/datastax/python-driver?label=latest[alt="Latest python-driver release on GitHub",link="{python-driver-url}/tags"]' + cass-driver-scala-shield: 'image:https://img.shields.io/github/v/tag/apache/cassandra-spark-connector?label=latest[alt="Latest cassandra-spark-connector release on GitHub",link="{scala-driver-url}/releases"]' + data-api: 'Data API' + csharp-client-api-ref-url: 'xref:astra-api-docs:ROOT:attachment$csharp-client' + py-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$python-client/astrapy' + ts-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$typescript-client' + java-client-api-ref-url-2x: 'xref:astra-api-docs:ROOT:attachment$java-client' + python-client-repo-url: 'https://github.com/datastax/astrapy' + typescript-client-repo-url: 'https://github.com/datastax/astra-db-ts' + typescript-client-examples-url: '{typescript-client-repo-url}/blob/v2.x/examples' + java-client-repo-url: 'https://github.com/datastax/astra-db-java' + csharp-client-repo-url: 'https://github.com/datastax/astra-db-csharp' + python-client-python-version: '3.8' + dataapi-java-client-shield: 'image:https://img.shields.io/maven-central/v/com.datastax.astra/astra-db-java.svg?label=latest[alt="Latest astra-db-java release on Maven Central",link="https://search.maven.org/artifact/com.datastax.astra/astra-db-java"]' + dataapi-python-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astrapy?label=latest[alt="Latest astrapy release on GitHub",link="{python-client-repo-url}/releases"]' + dataapi-typescript-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astra-db-ts?label=latest[alt="Latest astra-db-ts release on GitHub",link="{typescript-client-repo-url}/releases"]' + dataapi-csharp-client-shield: 'image:https://img.shields.io/github/v/tag/datastax/astra-db-csharp?label=latest[alt="Latest astra-db-csharp release on GitHub",link="{csharp-client-repo-url}/releases"]' + agent: 'DataStax Agent' + repair-service: 'Repair Service' + backup-service: 'Backup Service' + performance-service: 'Performance Service' + monitoring-service: 'OpsCenter Monitoring' + nodesync-service: 'NodeSync Service' + bestpractice-service: 'Best Practice Service' + capacity-service: 'Capacity Service' + lcm: 'Lifecycle Manager (LCM)' + lcm-short: 'LCM' + cr: 'custom resource (CR)' + cr-short: 'CR' + crd: 'custom resource definition (CRD)' + crd-short: 'CRD' + # Custom attributes only used in ragstack-ai astra_db: 'Astra DB' - astra_stream: 'Astra Streaming' astra_ui: 'Astra Portal' - astra_cli: 'Astra CLI' - astra-streaming-examples-repo: 'https://raw.githubusercontent.com/datastax/astra-streaming-examples/master' - luna-streaming-examples-repo: 'https://raw.githubusercontent.com/datastaxdevs/luna-streaming-examples/main' - support_url: 'https://www.ibm.com/mysupport/s/' - glossary-url: 'https://docs.datastax.com/en/glossary/docs/index.html#' - emoji-tada: "🎉" - emoji-rocket: "🚀" - emoji-smile: "😀" - dse: 'DataStax Enterprise (DSE)' - cassandra: 'Apache Cassandra(R)' - classic: 'classic' - classic_cap: 'Classic' - serverless: 'serverless' - serverless_cap: 'Serverless' # Antora Atlas primary-site-url: https://docs.datastax.com/en primary-site-manifest-url: https://docs.datastax.com/en/site-manifest.json diff --git a/modules/ROOT/nav.adoc b/modules/ROOT/nav.adoc index 66ae016..e9e5e31 100644 --- a/modules/ROOT/nav.adoc +++ b/modules/ROOT/nav.adoc @@ -26,7 +26,7 @@ * xref:resource-sets:proxies.adoc[] * xref:resource-sets:racks.adoc[] -.CRD spec +.{crd-short} spec * xref:crd-spec:Autorecovery.openapi.adoc[] * xref:crd-spec:Bastion.openapi.adoc[] * xref:crd-spec:BookKeeper.openapi.adoc[] diff --git a/modules/ROOT/pages/index.adoc b/modules/ROOT/pages/index.adoc index 42e9eb5..6a1c43a 100644 --- a/modules/ROOT/pages/index.adoc +++ b/modules/ROOT/pages/index.adoc @@ -1,55 +1,55 @@ -= {pulsar-operator-full-name} -:navtitle: About {pulsar-operator-full-name} += {product} +:navtitle: About {product} -{pulsar-operator-full-name} simplifies running https://pulsar.apache.org[{pulsar-reg}] on Kubernetes by applying the familiar https://kubernetes.io/docs/concepts/extend-kubernetes/operator/[Operator pattern] to the {pulsar-short} components, and horizontally scaling resources up or down based on CPU and memory workloads. +{product} simplifies running https://pulsar.apache.org[{pulsar-reg}] on Kubernetes by applying the familiar https://kubernetes.io/docs/concepts/extend-kubernetes/operator/[Operator pattern] to the {pulsar-short} components, and horizontally scaling resources up or down based on CPU and memory workloads. -Operating and maintaining {pulsar} clusters traditionally involves complex manual configurations, making it challenging for developers and operators to effectively manage the system's lifecycle. However, with the {pulsar-operator}, these complexities are abstracted away, enabling developers to focus on their applications rather than the underlying infrastructure. +Operating and maintaining {pulsar} clusters traditionally involves complex manual configurations, making it challenging for developers and operators to effectively manage the system's lifecycle. However, with the {product-short}, these complexities are abstracted away, enabling developers to focus on their applications rather than the underlying infrastructure. -Some of the key features and benefits of the {pulsar-operator} include: +Some of the key features and benefits of the {product-short} include: - **Easy Deployment**: Deploying an {pulsar} cluster on Kubernetes is simplified through declarative configurations and automation provided by the operator. -- **Scalability**: The {pulsar-operator} enables effortless scaling of {pulsar-short} clusters by automatically handling the creation and configuration of new {pulsar-short} brokers and bookies as per defined rules. The broker autoscaling is integrated with the {pulsar-short} broker load balancer to make smart resource management decisions, and bookkeepers are scaled up and down based on storage usage in a safe, controlled manner. +- **Scalability**: The {product-short} enables effortless scaling of {pulsar-short} clusters by automatically handling the creation and configuration of new {pulsar-short} brokers and bookies as per defined rules. The broker autoscaling is integrated with the {pulsar-short} broker load balancer to make smart resource management decisions, and bookkeepers are scaled up and down based on storage usage in a safe, controlled manner. - **High Availability**: The operator implements best practices for high availability, ensuring that {pulsar-short} clusters are fault-tolerant and can sustain failures without service disruptions. - **Lifecycle Management**: The operator takes care of common {pulsar-short} cluster lifecycle tasks, such as cluster creation, upgrade, configuration updates, and graceful shutdowns. -We also offer the xref:getting-started:stack.adoc[{pulsar-stack}] if you're looking for more Kubernetes-native tooling deployed with your {pulsar-short} cluster. Along with the PulsarCluster CRDs, {pulsar-stack} also includes: +We also offer the xref:getting-started:stack.adoc[{pulsar-stack}] if you're looking for more Kubernetes-native tooling deployed with your {pulsar-short} cluster. Along with the `PulsarCluster` {crd}, {pulsar-stack} also includes: -* {pulsar-operator} +* {product-short} * Prometheus Stack (Grafana) * {pulsar-short} Grafana dashboards * Cert Manager * Keycloak -Whether you are a developer looking to leverage the power of {pulsar} in your Kubernetes environment or an operator seeking to streamline the management of {pulsar-short} clusters, the {pulsar-operator} provides a robust and user-friendly solution. +Whether you are a developer looking to leverage the power of {pulsar} in your Kubernetes environment or an operator seeking to streamline the management of {pulsar-short} clusters, the {product-short} provides a robust and user-friendly solution. -This guide offers a starting point for {pulsar-operator}. -We will cover installation and deployment, configuration points, and further options for managing {pulsar-short} components with the {pulsar-operator}. +This guide offers a starting point for {product-short}. +We will cover installation and deployment, configuration points, and further options for managing {pulsar-short} components with the {product-short}. == Features -After a new custom resource type is added to your cluster by installing a CRD, you can create instances of the resource based on its specification. +After a new {cr} type is added to your cluster by installing a {crd-short}, you can create instances of the resource based on its specification. The Kubernetes API can be extended to support the new resource type, automating away the tedious aspects of managing a {pulsar-short} cluster. -* xref:scaling-components:autoscale-bookies.adoc[Bookkeeper autoscaler] - Automatically scale the number of bookies based on memory usage. +* xref:scaling-components:autoscale-bookies.adoc[BookKeeper autoscaler] - Automatically scale the number of bookies based on memory usage. * xref:scaling-components:autoscale-brokers.adoc[Broker autoscaler] - Automatically scale the number of brokers based on CPU load. * xref:resource-sets:index.adoc[Rack-aware bookkeeper placement] - Place bookies in different racks to guarantee high availability. -* xref:scaling-components:kafka.adoc[Kafka API] - Use the Starlight for Kafka API to bring your Kafka message traffic to {pulsar-short}. +* xref:scaling-components:kafka.adoc[{kafka-short} API] - Use the {starlight-kafka} API to bring your {kafka-short} message traffic to {pulsar-short}. -== How {pulsar-operator} makes {pulsar-short} easier +== How {product-short} makes {pulsar-short} easier Operators are a common pattern for packaging, deploying, and managing Kubernetes applications. Operators extend Kubernetes functionality to automate common tasks in stateful applications. -Think of {pulsar-operator} as a manager for the individual components of {pulsar-short}. By implementing the pulsarCluster Custom Resource Definition, the operator knows enough to manage the deployment, configuration, and scaling of {pulsar-short} components with re-usable and automated tasks, such as: +Think of {product-short} as a manager for the individual components of {pulsar-short}. By implementing the `PulsarCluster` {crd-short}, the operator knows enough to manage the deployment, configuration, and scaling of {pulsar-short} components with re-usable and automated tasks, such as: * Deploying a {pulsar-short} cluster * Deploying monitoring and logging components * Autoscaling bookies based on memory usage, or brokers based on CPU load * Assigning resources to specific availability zones (AZs) -{pulsar-operator} is configured, deployed, and packaged with Helm charts and based on the https://quarkiverse.github.io/quarkiverse-docs/quarkus-operator-sdk/dev/index.html[Quarkus Operator SDK]. +{product-short} is configured, deployed, and packaged with Helm charts and based on the https://quarkiverse.github.io/quarkiverse-docs/quarkus-operator-sdk/dev/index.html[Quarkus Operator SDK]. == {pulsar-short} component architecture @@ -59,12 +59,12 @@ A typical {pulsar-short} cluster *requires* the following components: * https://pulsar.apache.org/docs/concepts-architecture-overview/#brokers[Broker] - This is the {pulsar-short} message router. -* https://pulsar.apache.org/docs/concepts-architecture-overview/#apache-bookkeeper[Bookkeeper (bookie)] - This is the {pulsar-short} data store. -Bookkeeper stores message data in a low-latency, resilient way. +* https://pulsar.apache.org/docs/concepts-architecture-overview/#apache-bookkeeper[BookKeeper (bookie)] - This is the {pulsar-short} data store. +BookKeeper stores message data in a low-latency, resilient way. In addition to the required components, you might want to include some *optional components*: -* https://bookkeeper.apache.org/docs/admin/autorecovery[Bookkeeper AutoRecovery] - This is a {pulsar-short} component that recovers Bookkeeper data in the event of a bookie outage. +* https://bookkeeper.apache.org/docs/admin/autorecovery[BookKeeper AutoRecovery] - This is a {pulsar-short} component that recovers BookKeeper data in the event of a bookie outage. * https://pulsar.apache.org/docs/concepts-architecture-overview/#pulsar-proxy[{pulsar-short} proxy] - The {pulsar-short} proxy is just that - a proxy that runs at the edge of the cluster with public facing endpoints. {pulsar-short} proxy also offers special options for cluster extensions, like our [Starlight Suite of APIs]. * https://pulsar.apache.org/docs/functions-worker-run-separately/[Dedicated functions worker(s)] - You can optionally run dedicated function workers in a {pulsar-short} cluster. @@ -72,17 +72,17 @@ In addition to the required components, you might want to include some *optional * xref:luna-streaming:components:heartbeat-vm.adoc[{pulsar-short} Heartbeat] - This is an optional component that monitors the health of {pulsar-short} cluster and emits metrics about the cluster that are helpful for observing and debugging issues. * Prometheus/Grafana/Alert manager stack - This is the default observability stack for a cluster. The Luna Helm chart includes pre-made dashboards in Grafana and pre-wires all the metrics scraping. -== How {pulsar-operator} installs {pulsar-short} +== How {product-short} installs {pulsar-short} -{pulsar-operator} can be installed in two ways. +{product-short} can be installed in two ways. -* xref:getting-started:operator.adoc[{pulsar-operator}] - Installs just the operator and PulsarCluster CRDs into an existing {pulsar-short} cluster. +* xref:getting-started:operator.adoc[{product-short}] - Installs just the operator and the `PulsarCluster` {crd-short} into an existing {pulsar-short} cluster. * xref:getting-started:stack.adoc[{pulsar-stack}] - Installs and deploys the operator, a {pulsar-short} cluster, and a full Prometheus monitoring stack. [TIP] ==== -You can also scan an existing {pulsar-short} cluster and generate an equivalent PulsarCluster CRD. For more, see xref:migration:migrate-cluster.adoc[]. +You can also scan an existing {pulsar-short} cluster and generate an equivalent `PulsarCluster` {crd-short}. For more, see xref:migration:migrate-cluster.adoc[]. ==== To get started, see xref:getting-started:index.adoc[Getting Started]. diff --git a/modules/authentication/pages/index.adoc b/modules/authentication/pages/index.adoc index fac914f..78b8475 100644 --- a/modules/authentication/pages/index.adoc +++ b/modules/authentication/pages/index.adoc @@ -1,6 +1,6 @@ = Authentication -Authentication in {pulsar-operator} is controlled with https://jwt.io/[JWT tokens] for cluster access and TLS for communication between applications within the cluster. +Authentication in {product-short} is controlled with https://jwt.io/[JWT tokens] for cluster access and TLS for communication between applications within the cluster. * See xref:auth-jwt.adoc[] for JWT authentication. * See xref:auth-tls.adoc[] for TLS communication. \ No newline at end of file diff --git a/modules/getting-started/pages/index.adoc b/modules/getting-started/pages/index.adoc index a3f34f7..02c481d 100644 --- a/modules/getting-started/pages/index.adoc +++ b/modules/getting-started/pages/index.adoc @@ -1,12 +1,12 @@ = Get started -{pulsar-operator} can be installed in two ways. +{product-short} can be installed in two ways. -* xref:getting-started:operator.adoc[{pulsar-operator}] - Installs just the operator pod and PulsarCluster CRDs. +* xref:getting-started:operator.adoc[{product-short}] - Installs just the operator pod and the `PulsarCluster` {crd}. * xref:getting-started:stack.adoc[{pulsar-stack}] - Installs and deploys the operator, a {pulsar-short} cluster, and a full Prometheus monitoring stack. [TIP] ==== -If you have an existing {pulsar-short} cluster, you can scan and generate an equivalent PulsarCluster CRD. For more, see xref:migration:migrate-cluster.adoc[]. +If you have an existing {pulsar-short} cluster, you can scan and generate an equivalent `PulsarCluster` {crd-short}. For more, see xref:migration:migrate-cluster.adoc[]. ==== \ No newline at end of file diff --git a/modules/getting-started/pages/operator.adoc b/modules/getting-started/pages/operator.adoc index 3ce728f..e3da5e5 100644 --- a/modules/getting-started/pages/operator.adoc +++ b/modules/getting-started/pages/operator.adoc @@ -1,10 +1,10 @@ -= Install {pulsar-operator} Helm chart += Install {product-short} Helm chart -{pulsar-operator} is installed using Helm. -You can install just the operator and the PulsarCluster CRDs, or you can install xref:stack.adoc[{pulsar-stack}], which includes the operator, CRDs, and the Prometheus monitoring stack. +{product-short} is installed using Helm. +You can install just the operator and the `PulsarCluster` {crd}, or you can install xref:stack.adoc[{pulsar-stack}], which includes the operator, {crd-short}, and the Prometheus monitoring stack. [#operator] -== Install {pulsar-operator} +== Install {product-short} . Install the {company} Helm repository: + @@ -14,9 +14,9 @@ helm repo add kaap https://datastax.github.io/kaap helm repo update ---- -. The {pulsar-operator} Helm chart is available for download (https://github.com/datastax/kaap/releases/latest)[here]. +. The {product-short} Helm chart is available for download (https://github.com/datastax/kaap/releases/latest)[here]. -. Install the {pulsar-operator} Helm chart: +. Install the {product-short} Helm chart: + [source,shell] ---- @@ -30,7 +30,7 @@ REVISION: 1 TEST SUITE: None ---- -. Ensure {pulsar-operator} is up and running: +. Ensure {product-short} is up and running: + [source,shell] ---- @@ -114,10 +114,10 @@ Events: + You've now installed KAAP. + -By default, when KAAP is installed, the PulsarCluster CRDs are also created. -This setting is defined in the {pulsar-operator} values.yaml file as `crd: create: true`. +By default, when KAAP is installed, the `PulsarCluster` {crd-short}s are also created. +This setting is defined in the {product-short} values.yaml file as `crd: create: true`. -. Get the available CRDs: +. Get the available {crd-short}s: + [source,shell] ---- @@ -142,7 +142,7 @@ zookeepers.kaap.oss.datastax.com 2023-05-12T16:36:06Z == Uninstall -Uninstall the {pulsar-operator}: +Uninstall the {product-short}: [source,shell] ---- diff --git a/modules/getting-started/pages/stack.adoc b/modules/getting-started/pages/stack.adoc index 07bdf8c..9bddca8 100644 --- a/modules/getting-started/pages/stack.adoc +++ b/modules/getting-started/pages/stack.adoc @@ -4,13 +4,13 @@ Need more monitoring and management capabilities? Check out the {pulsar-stack}. {pulsar-stack} includes: -* {pulsar-operator} +* {product-short} * Prometheus Stack (Grafana) * {pulsar-short} Grafana dashboards * Cert Manager * Keycloak -. Install a PulsarCluster with the {pulsar-stack} included: +. Install a `PulsarCluster` with the {pulsar-stack} included: + [source,shell] ---- @@ -125,7 +125,7 @@ You've now installed {pulsar-stack}. == Uninstall -Uninstall the {pulsar-operator} and the cluster: +Uninstall the {product-short} and the cluster: [source,shell] ---- diff --git a/modules/getting-started/pages/upgrades.adoc b/modules/getting-started/pages/upgrades.adoc index b7c82bb..5f6323b 100644 --- a/modules/getting-started/pages/upgrades.adoc +++ b/modules/getting-started/pages/upgrades.adoc @@ -1,13 +1,13 @@ = Upgrade deployments -The {pulsar-operator} performs cluster upgrades in a very conservative manner, with the primary goal of reducing maintenance time during upgrades. +The {product-short} performs cluster upgrades in a very conservative manner, with the primary goal of reducing maintenance time during upgrades. Components are updated and then restarted *only* if strictly needed. For example, if only the broker needs to be upgraded, then all other services will be left up and running. -If there is an error or interruption during upgrade, the operator will apply the desired state defined in the PulsarCluster custom resource until the resource matches the actual state. +If there is an error or interruption during upgrade, the operator will apply the desired state defined in the `PulsarCluster` {cr} until the resource matches the actual state. == Upgrade schema -The {pulsar-operator} follows a fixed schema to upgrade the cluster: +The {product-short} follows a fixed schema to upgrade the cluster: [source,console] ---- @@ -125,9 +125,9 @@ pulsar-cluster pulsar-zookeeper-metadata-zgfn4 0/1 You've successfully upgraded your deployment by changing only one YAML file. -== Upgrade CRDs +== Upgrade {crd-short}s -To upgrade CRDs, run the following command: +To upgrade {crd-short}s, run the following command: [source,shell] ---- diff --git a/modules/migration/pages/migrate-cluster.adoc b/modules/migration/pages/migrate-cluster.adoc index 5a81a59..4eb4acf 100644 --- a/modules/migration/pages/migrate-cluster.adoc +++ b/modules/migration/pages/migrate-cluster.adoc @@ -1,9 +1,9 @@ -= Migrate existing cluster to {pulsar-operator} += Migrate existing cluster to {product-short} -Migrating an existing {pulsar-reg} cluster to one controlled by the {pulsar-operator} is a manual process, but we've included a migration tool to help you along the way. +Migrating an existing {pulsar-reg} cluster to one controlled by the {product-short} is a manual process, but we've included a migration tool to help you along the way. -The migration tool is a CLI application that connects to an existing {pulsar} cluster and generates a valid and equivalent PulsarCluster CRD. -The migration tool simulates what would happen if the generated PulsarCluster would be submitted, retrieves the Kubernetes resources that would be created, and compares them with the existing cluster's resources, generating a detailed HTML report. +The migration tool is a CLI application that connects to an existing {pulsar} cluster and generates a valid and equivalent `PulsarCluster` {crd}. +The migration tool simulates what would happen if the generated `PulsarCluster` would be submitted, retrieves the Kubernetes resources that would be created, and compares them with the existing cluster's resources, generating a detailed HTML report. You can then examine the report and decide if you want to proceed with the cluster migration, or if you need to make some changes first. == Prerequisites @@ -11,7 +11,7 @@ You can then examine the report and decide if you want to proceed with the clust * An existing {pulsar} cluster * Migration-tool JAR downloaded from the https://github.com/datastax/kaap/releases[latest release]. -== Scan and generate cluster CRDs +== Scan and generate cluster {crd-short}s . Create an input file called `input-cluster-specs.yaml` with the following content: + @@ -53,11 +53,11 @@ For example, if the broker pod is `pulsar-prod-cluster-broker-0`, then the `clus java -jar migration-tool.jar generate -i input-cluster-specs.yaml -o output ---- -. Find the link to the generated report in the logs, open the generated report in your browser, and then examine the differences between the existing cluster and the {pulsar-operator}. +. Find the link to the generated report in the logs, open the generated report in your browser, and then examine the differences between the existing cluster and the {product-short}. + If everything looks good, proceed to the <>. + -If you need to change the generated CRD and simulate the migration again, run the following command after making the necessary changes: +If you need to change the generated {crd-short} and simulate the migration again, run the following command after making the necessary changes: + [source,java] ---- @@ -69,7 +69,7 @@ java -jar migration-tool.jar diff -d output/ . Create a new `values.yaml` file for the operator. -. In the `pulsar-operator.cluster` section, enter the generated CRD's spec. +. In the `pulsar-operator.cluster` section, enter the generated {crd-short} spec. + [source,yaml] ---- @@ -88,7 +88,7 @@ pulsar-operator: helm install pulsar kaap/kaap-stack --values ---- -. Wait for the PulsarCluster status to be in a Ready state, which indicates that the operator has assumed control of the cluster. +. Wait for the `PulsarCluster` status to be in a Ready state, which indicates that the operator has assumed control of the cluster. Since the generated resources will match the existing cluster's resources, the following behaviors are expected: + * The operator doesn't create any new resources. diff --git a/modules/resource-sets/pages/bookies.adoc b/modules/resource-sets/pages/bookies.adoc index ecb269b..be82b06 100644 --- a/modules/resource-sets/pages/bookies.adoc +++ b/modules/resource-sets/pages/bookies.adoc @@ -1,9 +1,9 @@ -= Bookkeeper Sets += BookKeeper Sets -With a https://pulsar.apache.org/docs/administration-isolation-bookie/#rack-aware-placement-policy[rack-aware deployment], {pulsar-operator} can set the data placement policy automatically. +With a https://pulsar.apache.org/docs/administration-isolation-bookie/#rack-aware-placement-policy[rack-aware deployment], {product-short} can set the data placement policy automatically. Every entry will be stored as much as possible in different failure domains to guarantee rack-level fault tolerance. -The auto-configuration of rack-awareness is enabled by default, and is configured in the Bookkeeper configuration section: +The auto-configuration of rack-awareness is enabled by default, and is configured in the BookKeeper configuration section: [source,shell] ---- bookkeeper: @@ -14,7 +14,7 @@ bookkeeper: [NOTE] ==== The autoRackConfig feature requires `bookkeeperClientRegionawarePolicyEnabled=true` in the broker configuration. -Fortunately, {pulsar-operator} will automatically add this configuration property in the broker and autorecovery values. +Fortunately, {product-short} will automatically add this configuration property in the broker and autorecovery values. ==== If you wish to disable the region-aware policy, you need to explicitly set `bookkeeperClientRegionawarePolicyEnabled=false` in the broker and autorecovery configuration. diff --git a/modules/resource-sets/pages/index.adoc b/modules/resource-sets/pages/index.adoc index bdb6f07..731edc1 100644 --- a/modules/resource-sets/pages/index.adoc +++ b/modules/resource-sets/pages/index.adoc @@ -4,7 +4,7 @@ The operator allows you to create multiple sets of {pulsar-short} proxies, broke Each set is a dedicated deployment/statefulset with its own service and configmap. When multiple sets are specified, an umbrella service is created as the main entrypoint of the cluster, but otherwise, a dedicated service is created for each set. You can customize the service per set - for example, you might assign different DNS domains for each resource set. -Resource sets are a very powerful addition to a {pulsar-operator}-managed cluster, allowing you to create different configurations for the same components. For example, you might dedicate a set of brokers to a single customer, or you can create a set of brokers with a different configuration for testing purposes. +Resource sets are a very powerful addition to a {product-short}-managed cluster, allowing you to create different configurations for the same components. For example, you might dedicate a set of brokers to a single customer, or you can create a set of brokers with a different configuration for testing purposes. Racks, proxies, bookies, and pods can likewise be created as resource sets with their own configurations. diff --git a/modules/resource-sets/pages/proxies.adoc b/modules/resource-sets/pages/proxies.adoc index f320331..1931758 100644 --- a/modules/resource-sets/pages/proxies.adoc +++ b/modules/resource-sets/pages/proxies.adoc @@ -1,8 +1,8 @@ = Proxy Sets Proxy resource sets are used to create multiple sets of {pulsar-short} proxies. Each resource set has its own configuration. -{pulsar-short} can communicate with many different application clients, such as Apache Kafka and RabbitMQ, through proxy extensions. -{pulsar-operator} can manage these dedicated proxy extensions with resource sets. +{pulsar-short} can communicate with many different application clients, such as {kafka-reg} and RabbitMQ, through proxy extensions. +{product-short} can manage these dedicated proxy extensions with resource sets. [source,shell] ---- spec: diff --git a/modules/scaling-components/pages/autoscale-bookies.adoc b/modules/scaling-components/pages/autoscale-bookies.adoc index 6d8691d..9300375 100644 --- a/modules/scaling-components/pages/autoscale-bookies.adoc +++ b/modules/scaling-components/pages/autoscale-bookies.adoc @@ -1,17 +1,17 @@ -= Bookkeeper autoscaler += BookKeeper autoscaler -In a {pulsar-short} cluster managed by KAAP, BookKeeper nodes are scaled up in response to running low on storage, and because of Bookkeeper's segment-based design, the new storage is available immediately for use by the cluster, with no log stream rebalancing required. +In a {pulsar-short} cluster managed by KAAP, BookKeeper nodes are scaled up in response to running low on storage, and because of BookKeeper's segment-based design, the new storage is available immediately for use by the cluster, with no log stream rebalancing required. -When KAAP sees low storage usage on a Bookkeeper node, the node is automatically scaled down (decommissioned) to free up volume usage and reduce storage costs. This scale-down is done in a safe, controlled manner which ensures no data loss and guarantees the configured replication factor for all messages. For example, if your replication factor is 3 (write and ack quorum of 3), 3 replicas are maintained at all times during the scale down to ensure data can be recovered, even if there is a failure during the scale-down phase. Scaling down bookies has been a consistent pain point in {pulsar-short}, and KAAP automates this without sacrificing the {pulsar-short} data guarantees. +When KAAP sees low storage usage on a BookKeeper node, the node is automatically scaled down (decommissioned) to free up volume usage and reduce storage costs. This scale-down is done in a safe, controlled manner which ensures no data loss and guarantees the configured replication factor for all messages. For example, if your replication factor is 3 (write and ack quorum of 3), 3 replicas are maintained at all times during the scale down to ensure data can be recovered, even if there is a failure during the scale-down phase. Scaling down bookies has been a consistent pain point in {pulsar-short}, and KAAP automates this without sacrificing the {pulsar-short} data guarantees. -== Install Operator with Bookkeeper autoscaler enabled +== Install Operator with BookKeeper autoscaler enabled [source,shell] ---- helm install pulsar-operator helm/pulsar-operator \ --values helm/examples/bookie-autoscaling/values.yaml ---- -== Bookkeeper autoscaler configuration +== BookKeeper autoscaler configuration The operator will scale the number of bookies pods in a cluster up and down based on current disk usage. The operator checks the disk usage percentage of all bookies at a regular interval. @@ -38,7 +38,7 @@ The operator's thresholds are set in the values.yaml file: stabilizationWindowMs: 30000 ---- -.Bookkeeper autoscaler configuration +.BookKeeper autoscaler configuration [cols=4*,options="header"] |=== |Name diff --git a/modules/scaling-components/pages/index.adoc b/modules/scaling-components/pages/index.adoc index 3452814..b18264e 100644 --- a/modules/scaling-components/pages/index.adoc +++ b/modules/scaling-components/pages/index.adoc @@ -1,9 +1,9 @@ = Scaling components -After a new custom resource type is added to your cluster by installing a CRD, you can create instances of the resource based on its specification. +After a new {cr} type is added to your cluster by installing a {crd}, you can create instances of the resource based on its specification. The Kubernetes API can be extended to support the new resource type, automating away the tedious aspects of managing a {pulsar-short} cluster. -* xref:scaling-components:autoscale-bookies.adoc[Bookkeeper autoscaler] - Automatically scale the number of bookies based on memory usage. +* xref:scaling-components:autoscale-bookies.adoc[BookKeeper autoscaler] - Automatically scale the number of bookies based on memory usage. * xref:scaling-components:autoscale-brokers.adoc[Broker autoscaler] - Automatically scale the number of brokers based on CPU load. * xref:resource-sets:bookies.adoc[Rack-aware bookkeeper placement] - Place bookies in different racks to guarantee high availability. -* xref:scaling-components:kafka.adoc[Kafka API] - Use the Starlight for Kafka API to bring your Kafka message traffic to {pulsar-short}. \ No newline at end of file +* xref:scaling-components:kafka.adoc[{kafka-short} API] - Use the {starlight-kafka} API to bring your {kafka-short} message traffic to {pulsar-short}. \ No newline at end of file diff --git a/modules/scaling-components/pages/kafka.adoc b/modules/scaling-components/pages/kafka.adoc index 08eea56..e2667c2 100644 --- a/modules/scaling-components/pages/kafka.adoc +++ b/modules/scaling-components/pages/kafka.adoc @@ -1,28 +1,28 @@ -= Kafka += {kafka-short} -Have an Apache Kafka(R) workload you want to control with {pulsar-operator}? -Thanks to Starlight for Kafka, you can run your Kafka workload on a {pulsar-short} cluster, and with {pulsar-operator}, the scaling of the Kafka pods is handled for you. +Have an {kafka-reg} workload you want to control with {product-short}? +Thanks to {starlight-kafka}, you can run your {kafka-short} workload on a {pulsar-short} cluster, and with {product-short}, the scaling of the {kafka-short} pods is handled for you. -== Scale the {pulsar-short} Broker with a Kafka Client Workload +== Scale the {pulsar-short} Broker with a {kafka-short} client Workload This folder contains a sample configuration and demo about how to run a workload on an {pulsar-reg} cluster with the Broker Auto Scaling feature. -Support for the Kafka wire protocol is provided by xref:starlight-for-kafka:ROOT:index.adoc[Starlight for Kafka]. +Support for the {kafka-short} wire protocol is provided by xref:starlight-for-kafka:ROOT:index.adoc[{starlight-kafka}]. -The client work load is generated using the basic Kafka Performance tools. +The client work load is generated using the basic {kafka-short} performance tools. == Install . Install the operator and a {pulsar-short} cluster. -In this case, we're installing xref:getting-started:stack.adoc[{pulsar-stack}] with the Kafka protocol enabled. +In this case, we're installing xref:getting-started:stack.adoc[{pulsar-stack}] with the {kafka-short} protocol enabled. + [source,shell] ---- helm install pos helm/pulsar-stack --values helm/examples/kafka/values.yaml ---- + -The Kafka protocol is controlled in the deployment's `values.yaml` file. +The {kafka-short} protocol is controlled in the deployment's `values.yaml` file. + [source,yaml] ---- @@ -41,7 +41,7 @@ kafka: config: {} ---- + -Additionally, you can proxy the Kafka connection in the {pulsar-short} Proxy with `kafka:enabled:true`. +Additionally, you can proxy the {kafka-short} connection in the {pulsar-short} Proxy with `kafka:enabled:true`. + [source,yaml] ---- @@ -56,28 +56,28 @@ proxy: config: {} ---- -. Deploy the Kafka producers. +. Deploy the {kafka-short} producers. + [source,shell] ---- kubectl apply -f helm/examples/kafka/kafka-producer-perf.yaml ---- -. See the logs of the Kafka producers. +. See the logs of the {kafka-short} producers. + [source,shell] ---- kubectl logs -f deploy/kafka-client-producer ---- -. Deploy the Kafka consumers. +. Deploy the {kafka-short} consumers. + [source,shell] ---- kubectl apply -f helm/examples/kafka/kafka-consumer-perf.yaml ---- -. See the logs of the Kafka consumers. +. See the logs of the {kafka-short} consumers. + [source,shell] ----