getSecondaryResource(P primary,
+ Context context) {
+ return eventSource()
+ .flatMap(es -> es.getSecondaryResource(primary));
+ }
+
+ /**
+ * Custom match that compares an SHA-256 hash of the desired resource
+ * spec against the last applied hash. Overrides the 3-arg entry
+ * point because that is what JOSDK's reconcile loop actually calls.
+ *
+ * The parent's 3-arg match delegates to a 5-arg method that calls
+ * {@code addMetadata()} unconditionally — writing the
+ * {@code javaoperatorsdk.io/previous} annotation on every
+ * reconciliation. On Docker Desktop, that annotation write bumps
+ * {@code metadata.generation}, which triggers a new informer event,
+ * causing an infinite reconciliation loop.
+ *
+ * By intercepting here we avoid both the annotation write and the
+ * false-positive diffs from K8s-injected defaults (protocol: TCP,
+ * terminationGracePeriodSeconds, etc.) when the desired spec has
+ * not actually changed.
+ */
+ @Override
+ public Matcher.Result match(R actual, P primary,
+ Context context) {
+ R desired = desired(primary, context);
+ String resourceKey = desired.getKind()
+ + "/" + desired.getMetadata().getNamespace()
+ + "/" + desired.getMetadata().getName();
+ String desiredHash = computeHash(desired);
+ if (actual == null) {
+ if (desiredHash != null) {
+ String previousHash = LAST_DESIRED_HASHES.get(resourceKey);
+ if (Objects.equals(previousHash, desiredHash)) {
+ // Resource was created in a previous reconciliation but
+ // the informer hasn't indexed it yet. Returning false
+ // would trigger another SSA apply, which fires another
+ // informer event, creating an infinite reconciliation
+ // loop on Docker Desktop. Skip the re-creation.
+ LOG.debug("Resource {} already created (informer lag), "
+ + "skipping re-create", resourceKey);
+ return Matcher.Result.computed(true, desired);
+ }
+ // First creation — cache the hash so the next
+ // reconciliation can detect informer lag.
+ LOG.info("Creating resource {}", resourceKey);
+ LAST_DESIRED_HASHES.put(resourceKey, desiredHash);
+ }
+ return Matcher.Result.computed(false, desired);
+ }
+ if (desiredHash == null) {
+ // Serialization failed — delegate to parent which will
+ // call addMetadata + the real matcher
+ return super.match(actual, primary, context);
+ }
+ String previousHash = LAST_DESIRED_HASHES.get(resourceKey);
+ if (previousHash == null) {
+ // First reconciliation after operator start — the resource
+ // already exists so seed the cache without triggering an
+ // update. This prevents a gratuitous rolling update caused
+ // by the annotation write that addMetadata() would perform.
+ LOG.info("Seeding hash for existing resource {}", resourceKey);
+ LAST_DESIRED_HASHES.put(resourceKey, desiredHash);
+ return Matcher.Result.computed(true, desired);
+ }
+ if (desiredHash.equals(previousHash)) {
+ LOG.debug("Desired spec unchanged for {}, skipping update",
+ resourceKey);
+ return Matcher.Result.computed(true, desired);
+ }
+ LOG.info("Desired spec changed for {}, will update", resourceKey);
+ LAST_DESIRED_HASHES.put(resourceKey, desiredHash);
+ return Matcher.Result.computed(false, desired);
+ }
+
+ private String computeHash(R resource) {
+ try {
+ JsonNode tree = MAPPER.valueToTree(resource);
+ sortJsonNode(tree);
+ String json = MAPPER.writeValueAsString(tree);
+ MessageDigest digest = MessageDigest.getInstance("SHA-256");
+ byte[] hash = digest.digest(
+ json.getBytes(StandardCharsets.UTF_8));
+ StringBuilder sb = new StringBuilder(64);
+ for (byte b : hash) {
+ sb.append(String.format("%02x", b));
+ }
+ return sb.toString();
+ } catch (Exception e) {
+ LOG.warn("Failed to compute hash for resource {}: {}",
+ resource.getMetadata().getName(), e.getMessage());
+ return null;
+ }
+ }
+
+ /** Recursively sort all object node keys for deterministic JSON. */
+ private static void sortJsonNode(JsonNode node) {
+ if (node.isObject()) {
+ ObjectNode obj = (ObjectNode) node;
+ TreeMap sorted = new TreeMap<>();
+ Iterator fieldNames = obj.fieldNames();
+ while (fieldNames.hasNext()) {
+ String name = fieldNames.next();
+ JsonNode child = obj.get(name);
+ sortJsonNode(child);
+ sorted.put(name, child);
+ }
+ obj.removeAll();
+ sorted.forEach(obj::set);
+ } else if (node.isArray()) {
+ ArrayNode arr = (ArrayNode) node;
+ for (int i = 0; i < arr.size(); i++) {
+ sortJsonNode(arr.get(i));
+ }
+ }
+ }
+
+ /**
+ * Builds the database connection env vars: DB_DRIVER, DBPASSWORD
+ * (from SecretKeyRef), and SERVICE_OPTS with javax.jdo connection
+ * properties. Shared by MetastoreDeploymentDependent and
+ * SchemaInitJobDependent.
+ */
+ protected static List buildDbEnvVars(DatabaseConfig db) {
+ List envVars = new ArrayList<>();
+ envVars.add(new EnvVar("DB_DRIVER", db.getType(), null));
+
+ // DBPASSWORD must be defined before SERVICE_OPTS so that
+ // Kubernetes $(DBPASSWORD) interpolation resolves correctly.
+ SecretKeyRef passwordRef = db.getPasswordSecretRef();
+ if (passwordRef != null) {
+ envVars.add(new EnvVarBuilder()
+ .withName("DBPASSWORD")
+ .withNewValueFrom()
+ .withNewSecretKeyRef()
+ .withName(passwordRef.getName())
+ .withKey(passwordRef.getKey())
+ .endSecretKeyRef()
+ .endValueFrom()
+ .build());
+ }
+
+ StringBuilder serviceOpts = new StringBuilder();
+ if (db.getUrl() != null) {
+ serviceOpts.append("-Djavax.jdo.option.ConnectionURL=")
+ .append(db.getUrl());
+ }
+ if (db.getDriver() != null) {
+ serviceOpts.append(" -Djavax.jdo.option.ConnectionDriverName=")
+ .append(db.getDriver());
+ }
+ if (db.getUsername() != null) {
+ serviceOpts.append(" -Djavax.jdo.option.ConnectionUserName=")
+ .append(db.getUsername());
+ }
+ if (passwordRef != null) {
+ serviceOpts.append(
+ " -Djavax.jdo.option.ConnectionPassword=$(DBPASSWORD)");
+ }
+ if (!serviceOpts.isEmpty()) {
+ envVars.add(new EnvVar("SERVICE_OPTS",
+ serviceOpts.toString().trim(), null));
+ }
+ return envVars;
+ }
+
+ /** Builds the JDBC driver download init container. */
+ protected static Container buildJdbcDriverInitContainer(
+ String image, String driverJarUrl) {
+ return new ContainerBuilder()
+ .withName("download-jdbc-driver")
+ .withImage(image)
+ .withCommand("/bin/bash", "-c",
+ "wget -q -O " + EXT_JARS_PATH
+ + "/jdbc-driver.jar '" + driverJarUrl + "'")
+ .withVolumeMounts(new VolumeMountBuilder()
+ .withName("ext-jars")
+ .withMountPath(EXT_JARS_PATH)
+ .build())
+ .build();
+ }
+
+ /** Builds a projected Volume merging multiple ConfigMaps. */
+ protected static Volume buildProjectedConfigVolume(
+ String volumeName, String... configMapNames) {
+ List
+ projections = new ArrayList<>();
+ for (String cmName : configMapNames) {
+ projections.add(
+ new io.fabric8.kubernetes.api.model.VolumeProjectionBuilder()
+ .withNewConfigMap().withName(cmName).endConfigMap()
+ .build());
+ }
+ return new VolumeBuilder()
+ .withName(volumeName)
+ .withNewProjected()
+ .withSources(projections)
+ .endProjected()
+ .build();
+ }
+
+ /** Adds S3A filesystem properties to a config map. */
+ protected static void addS3AProperties(
+ Map props, StorageSpec storage) {
+ if (storage == null || storage.getEndpoint() == null) {
+ return;
+ }
+ props.put("fs.defaultFS", "s3a://" + storage.getBucket());
+ props.put("fs.s3a.endpoint", storage.getEndpoint());
+ props.put("fs.s3a.path.style.access",
+ String.valueOf(storage.isPathStyleAccess()));
+ props.put("fs.s3a.impl",
+ "org.apache.hadoop.fs.s3a.S3AFileSystem");
+ }
+
+ /**
+ * Builds the {@code AWS_ACCESS_KEY_ID} and {@code AWS_SECRET_ACCESS_KEY}
+ * environment variables for S3A credential propagation.
+ *
+ * Priority order:
+ *
+ * - {@code SecretKeyRef} → valueFrom secretKeyRef
+ * - Plain-text fields → literal value
+ *
+ */
+ protected static List buildS3CredentialEnvVars(
+ StorageSpec storage) {
+ if (storage == null) {
+ return Collections.emptyList();
+ }
+
+ List envVars = new ArrayList<>();
+
+ // Access key: prefer SecretKeyRef, fall back to plain text
+ SecretKeyRef accessRef = storage.getAccessKeySecretRef();
+ if (accessRef != null) {
+ envVars.add(new EnvVarBuilder()
+ .withName("AWS_ACCESS_KEY_ID")
+ .withNewValueFrom()
+ .withNewSecretKeyRef()
+ .withName(accessRef.getName())
+ .withKey(accessRef.getKey())
+ .endSecretKeyRef()
+ .endValueFrom()
+ .build());
+ } else if (storage.getAccessKey() != null) {
+ envVars.add(new EnvVar("AWS_ACCESS_KEY_ID",
+ storage.getAccessKey(), null));
+ }
+
+ // Secret key: prefer SecretKeyRef, fall back to plain text
+ SecretKeyRef secretRef = storage.getSecretKeySecretRef();
+ if (secretRef != null) {
+ envVars.add(new EnvVarBuilder()
+ .withName("AWS_SECRET_ACCESS_KEY")
+ .withNewValueFrom()
+ .withNewSecretKeyRef()
+ .withName(secretRef.getName())
+ .withKey(secretRef.getKey())
+ .endSecretKeyRef()
+ .endValueFrom()
+ .build());
+ } else if (storage.getSecretKey() != null) {
+ envVars.add(new EnvVar("AWS_SECRET_ACCESS_KEY",
+ storage.getSecretKey(), null));
+ }
+
+ return envVars;
+ }
+
+ /**
+ * Populates init containers, volume mounts, and volumes for the
+ * Metastore pod spec (shared by MetastoreDeploymentDependent and
+ * SchemaInitJobDependent).
+ *
+ * Adds the projected hive-config volume (merging metastore +
+ * hadoop ConfigMaps) and, when a JDBC driver JAR URL is configured,
+ * adds the download init container plus the ext-jars emptyDir volume.
+ */
+ protected static void buildMetastoreVolumesAndInitContainers(
+ HiveCluster hiveCluster, DatabaseConfig db, String image,
+ List initContainers,
+ List volumeMounts,
+ List volumes) {
+ boolean hasDriverJar = db.getDriverJarUrl() != null;
+
+ if (hasDriverJar) {
+ initContainers.add(buildJdbcDriverInitContainer(
+ image, db.getDriverJarUrl()));
+ }
+
+ volumeMounts.add(new VolumeMountBuilder()
+ .withName("hive-config")
+ .withMountPath(CONF_MOUNT_PATH).build());
+ if (hasDriverJar) {
+ volumeMounts.add(new VolumeMountBuilder()
+ .withName("ext-jars")
+ .withMountPath(EXT_JARS_PATH).build());
+ }
+
+ volumes.add(buildProjectedConfigVolume("hive-config",
+ MetastoreConfigMapDependent.resourceName(hiveCluster),
+ HadoopConfigMapDependent.resourceName(hiveCluster)));
+ if (hasDriverJar) {
+ volumes.add(new VolumeBuilder()
+ .withName("ext-jars")
+ .withNewEmptyDir().endEmptyDir().build());
+ }
+ }
+
+ /** Builds Kubernetes ResourceRequirements from the operator's spec. */
+ protected static ResourceRequirements buildResources(ResourceRequirementsSpec spec) {
+ if (spec == null) {
+ return new ResourceRequirements();
+ }
+ ResourceRequirementsBuilder builder = new ResourceRequirementsBuilder();
+ if (spec.getRequestsCpu() != null) {
+ builder.addToRequests("cpu", new Quantity(spec.getRequestsCpu()));
+ }
+ if (spec.getRequestsMemory() != null) {
+ builder.addToRequests("memory", new Quantity(spec.getRequestsMemory()));
+ }
+ if (spec.getLimitsCpu() != null) {
+ builder.addToLimits("cpu", new Quantity(spec.getLimitsCpu()));
+ }
+ if (spec.getLimitsMemory() != null) {
+ builder.addToLimits("memory", new Quantity(spec.getLimitsMemory()));
+ }
+ return builder.build();
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2ConfigMapDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2ConfigMapDependent.java
new file mode 100644
index 000000000000..96e36a717177
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2ConfigMapDependent.java
@@ -0,0 +1,156 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.HiveClusterSpec;
+import org.apache.hive.kubernetes.operator.model.spec.LlapSpec;
+import org.apache.hive.kubernetes.operator.util.HadoopXmlBuilder;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/** Manages the hive-site.xml ConfigMap for HiveServer2. */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=hiveserver2,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class HiveServer2ConfigMapDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "hiveserver2";
+
+ public HiveServer2ConfigMapDependent() {
+ super(ConfigMap.class);
+ }
+
+ @Override
+ protected ConfigMap desired(HiveCluster hiveCluster,
+ Context context) {
+ HiveClusterSpec spec = hiveCluster.getSpec();
+ boolean tezAmEnabled = spec.getTezAm().isEnabled();
+ String zkQuorum = spec.getZookeeper().getQuorum();
+
+ // hive-site.xml properties
+ Map props = new LinkedHashMap<>();
+ String metastoreUri = "thrift://"
+ + hiveCluster.getMetadata().getName() + "-metastore:9083";
+ props.put("hive.metastore.uris", metastoreUri);
+ props.put("hive.metastore.warehouse.dir",
+ spec.getMetastore().getWarehouseDir());
+ props.put("hive.server2.enable.doAs", "false");
+ props.put("hive.tez.exec.inplace.progress", "false");
+ props.put("hive.tez.exec.print.summary", "true");
+ props.put("hive.jar.directory", "/tmp");
+ props.put("hive.user.install.directory", "/tmp");
+ if (tezAmEnabled) {
+ props.put("hive.exec.local.scratchdir", "/opt/hive/scratch");
+ }
+
+ // S3A config also in hive-site.xml so it is serialized into
+ // the Tez DAG configuration for task execution.
+ addS3AProperties(props, spec.getStorage());
+
+ if (tezAmEnabled) {
+ props.put("hive.server2.tez.use.external.sessions", "true");
+ props.put("hive.server2.tez.external.sessions.namespace",
+ "/tez-external-sessions/tez_am/server");
+ props.put("hive.server2.tez.external.sessions.registry.class",
+ "org.apache.hadoop.hive.ql.exec.tez."
+ + "ZookeeperExternalSessionsRegistryClient");
+ props.put("hive.zookeeper.quorum", zkQuorum);
+ // Also set Tez properties in hive-site.xml so HiveConf carries
+ // them to TezClient even if tez-site.xml classpath ordering
+ // causes a different tez-site.xml to load.
+ props.put("tez.am.framework.mode", "STANDALONE_ZOOKEEPER");
+ props.put("tez.am.registry.namespace", "/tez_am/server");
+ props.put("tez.am.zookeeper.quorum", zkQuorum);
+ if (spec.getLlap().isEnabled()) {
+ props.put("hive.execution.mode", "llap");
+ props.put("hive.llap.execution.mode", "all");
+ props.put("hive.llap.daemon.service.hosts",
+ spec.getLlap().getServiceHosts());
+ }
+ } else {
+ props.put("hive.server2.tez.use.external.sessions", "false");
+ props.put("tez.local.mode", "true");
+ props.put("tez.am.framework.mode", "LOCAL");
+ props.put("mapreduce.framework.name", "local");
+ }
+
+ if (spec.getHiveServer2().getConfigOverrides() != null) {
+ props.putAll(spec.getHiveServer2().getConfigOverrides());
+ }
+
+ // tez-site.xml properties
+ Map tezProps = getTezProperties(zkQuorum, tezAmEnabled, spec);
+
+ return new ConfigMapBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .addToData("hive-site.xml", HadoopXmlBuilder.buildXml(props))
+ .addToData("tez-site.xml", HadoopXmlBuilder.buildXml(tezProps))
+ .build();
+ }
+
+ private static Map getTezProperties(String zkQuorum, boolean tezAmEnabled, HiveClusterSpec spec) {
+ Map tezProps = new LinkedHashMap<>();
+ tezProps.put("tez.am.mode.session", "true");
+ tezProps.put("tez.ignore.lib.uris", "true");
+ tezProps.put("tez.am.tez-ui.webservice.enable", "false");
+ tezProps.put("tez.am.disable.client-version-check", "true");
+ tezProps.put("tez.session.am.dag.submit.timeout.secs", "-1");
+ tezProps.put("tez.am.zookeeper.quorum", zkQuorum);
+ tezProps.put("hive.zookeeper.quorum", zkQuorum);
+ if (tezAmEnabled) {
+ tezProps.put("tez.local.mode", "false");
+ tezProps.put("tez.am.framework.mode", "STANDALONE_ZOOKEEPER");
+ tezProps.put("tez.am.registry.namespace", "/tez_am/server");
+ } else {
+ tezProps.put("tez.local.mode", "true");
+ }
+
+ // LLAP properties required by the Tez AM's
+ // service_plugins_descriptor.json (LlapTaskCommunicator,
+ // LlapTaskSchedulerService).
+ LlapSpec llap = spec.getLlap();
+ if (llap.isEnabled()) {
+ tezProps.put("hive.llap.daemon.service.hosts",
+ llap.getServiceHosts());
+ }
+
+ if (spec.getTezAm().getConfigOverrides() != null) {
+ tezProps.putAll(spec.getTezAm().getConfigOverrides());
+ }
+ return tezProps;
+ }
+
+ /** Returns the ConfigMap resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-hiveserver2-config";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2DeploymentDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2DeploymentDependent.java
new file mode 100644
index 000000000000..a441a0487794
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2DeploymentDependent.java
@@ -0,0 +1,206 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import io.fabric8.kubernetes.api.model.ContainerPort;
+import io.fabric8.kubernetes.api.model.ContainerPortBuilder;
+import io.fabric8.kubernetes.api.model.EnvVar;
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.Probe;
+import io.fabric8.kubernetes.api.model.ProbeBuilder;
+import io.fabric8.kubernetes.api.model.apps.Deployment;
+import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.HiveClusterSpec;
+import org.apache.hive.kubernetes.operator.model.spec.HiveServer2Spec;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/** Manages the Kubernetes Deployment for HiveServer2. */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=hiveserver2,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class HiveServer2DeploymentDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "hiveserver2";
+ private static final String SCRATCH_MOUNT_PATH = "/opt/hive/scratch";
+
+ public HiveServer2DeploymentDependent() {
+ super(Deployment.class);
+ }
+
+ @Override
+ protected Deployment desired(HiveCluster hiveCluster,
+ Context context) {
+ HiveClusterSpec spec = hiveCluster.getSpec();
+ HiveServer2Spec hs2 = spec.getHiveServer2();
+ Map selectorLabels =
+ Labels.selectorForComponent(hiveCluster, COMPONENT);
+
+ List envVars = new ArrayList<>();
+ envVars.add(new EnvVar("SERVICE_NAME", "hiveserver2", null));
+ envVars.add(new EnvVar("IS_RESUME", "true", null));
+ envVars.add(new EnvVar("HIVE_CUSTOM_CONF_DIR",
+ CONF_MOUNT_PATH, null));
+ envVars.add(new EnvVar("HADOOP_CLASSPATH",
+ HADOOP_CLASSPATH_VALUE, null));
+ envVars.add(new EnvVar("TEZ_AM_EXTERNAL_ID",
+ "tez-session-hs2", null));
+
+ // S3A credentials as env vars so Tez tasks can resolve them
+ // via the default AWS credential provider chain.
+ envVars.addAll(buildS3CredentialEnvVars(spec.getStorage()));
+
+ // Env vars consumed by the Hive Docker entrypoint.sh to
+ // configure Tez execution mode at container startup.
+ if (spec.getTezAm().isEnabled()) {
+ envVars.add(new EnvVar("HIVE_SERVER2_TEZ_USE_EXTERNAL_SESSIONS",
+ "true", null));
+ envVars.add(new EnvVar("TEZ_FRAMEWORK_MODE",
+ "STANDALONE_ZOOKEEPER", null));
+ envVars.add(new EnvVar("HIVE_ZOOKEEPER_QUORUM",
+ spec.getZookeeper().getQuorum(), null));
+ }
+
+ if (spec.getLlap().isEnabled()) {
+ envVars.add(new EnvVar("HIVE_LLAP_DAEMON_SERVICE_HOSTS",
+ spec.getLlap().getServiceHosts(), null));
+ }
+
+ String metastoreUri = "thrift://"
+ + hiveCluster.getMetadata().getName() + "-metastore:9083";
+ StringBuilder serviceOpts = new StringBuilder();
+ serviceOpts.append("-Dhive.metastore.uris=").append(metastoreUri);
+ if (spec.getLlap().isEnabled()) {
+ serviceOpts.append(" -Dhive.execution.mode=llap");
+ serviceOpts.append(" -Dhive.llap.daemon.service.hosts=")
+ .append(spec.getLlap().getServiceHosts());
+ }
+ if (spec.getTezAm().isEnabled()) {
+ serviceOpts.append(" -Dhive.zookeeper.quorum=")
+ .append(spec.getZookeeper().getQuorum());
+ }
+ envVars.add(new EnvVar("SERVICE_OPTS",
+ serviceOpts.toString(), null));
+
+ List ports = List.of(
+ new ContainerPortBuilder()
+ .withName("thrift")
+ .withContainerPort(hs2.getThriftPort()).build(),
+ new ContainerPortBuilder()
+ .withName("webui")
+ .withContainerPort(hs2.getWebUiPort()).build()
+ );
+
+ Probe readinessProbe = new ProbeBuilder()
+ .withNewTcpSocket()
+ .withPort(new IntOrString(hs2.getThriftPort()))
+ .endTcpSocket()
+ .withInitialDelaySeconds(15)
+ .withPeriodSeconds(10)
+ .withFailureThreshold(3)
+ .build();
+
+ Probe livenessProbe = new ProbeBuilder()
+ .withNewTcpSocket()
+ .withPort(new IntOrString(hs2.getThriftPort()))
+ .endTcpSocket()
+ .withInitialDelaySeconds(120)
+ .withPeriodSeconds(30)
+ .withFailureThreshold(10)
+ .build();
+
+ boolean tezAmEnabled = spec.getTezAm().isEnabled();
+
+ // Build volume mounts and volumes lists up front so the
+ // Deployment is constructed in a single builder chain.
+ // Using editFirstContainer() caused JOSDK SSA comparison
+ // mismatches that triggered infinite reconciliation loops.
+ List volumeMounts =
+ new ArrayList<>();
+ volumeMounts.add(new io.fabric8.kubernetes.api.model.VolumeMountBuilder()
+ .withName("hive-config").withMountPath(CONF_MOUNT_PATH).build());
+
+ List volumes =
+ new ArrayList<>();
+ volumes.add(buildProjectedConfigVolume("hive-config",
+ HiveServer2ConfigMapDependent.resourceName(hiveCluster),
+ HadoopConfigMapDependent.resourceName(hiveCluster)));
+
+ if (tezAmEnabled) {
+ volumeMounts.add(
+ new io.fabric8.kubernetes.api.model.VolumeMountBuilder()
+ .withName("scratch")
+ .withMountPath(SCRATCH_MOUNT_PATH).build());
+ volumes.add(new io.fabric8.kubernetes.api.model.VolumeBuilder()
+ .withName("scratch")
+ .withNewPersistentVolumeClaim()
+ .withClaimName(ScratchPvcDependent
+ .resourceName(hiveCluster))
+ .endPersistentVolumeClaim()
+ .build());
+ }
+
+ Deployment deployment = new DeploymentBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withReplicas(hs2.getReplicas())
+ .withNewSelector()
+ .withMatchLabels(selectorLabels)
+ .endSelector()
+ .withNewTemplate()
+ .withNewMetadata()
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .addNewContainer()
+ .withName("hiveserver2")
+ .withImage(spec.getImage())
+ .withImagePullPolicy(spec.getImagePullPolicy())
+ .withEnv(envVars)
+ .withPorts(ports)
+ .withReadinessProbe(readinessProbe)
+ .withLivenessProbe(livenessProbe)
+ .withResources(buildResources(hs2.getResources()))
+ .withVolumeMounts(volumeMounts)
+ .endContainer()
+ .withVolumes(volumes)
+ .endSpec()
+ .endTemplate()
+ .endSpec()
+ .build();
+ return deployment;
+ }
+
+ /** Returns the Deployment resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-hiveserver2";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2ServiceDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2ServiceDependent.java
new file mode 100644
index 000000000000..911498c3fa1b
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/HiveServer2ServiceDependent.java
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.Service;
+import io.fabric8.kubernetes.api.model.ServiceBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.spec.HiveServer2Spec;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/** Manages the Kubernetes Service for HiveServer2 (Thrift and WebUI ports). */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=hiveserver2,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class HiveServer2ServiceDependent
+ extends HiveDependentResource {
+
+ public HiveServer2ServiceDependent() {
+ super(Service.class);
+ }
+
+ @Override
+ protected Service desired(HiveCluster hiveCluster,
+ Context context) {
+ HiveServer2Spec hs2 = hiveCluster.getSpec().getHiveServer2();
+
+ return new ServiceBuilder()
+ .withNewMetadata()
+ .withName(hiveCluster.getMetadata().getName() + "-hiveserver2")
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster,
+ HiveServer2DeploymentDependent.COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withType(hs2.getServiceType())
+ .withSelector(Labels.selectorForComponent(hiveCluster,
+ HiveServer2DeploymentDependent.COMPONENT))
+ .addNewPort()
+ .withName("thrift")
+ .withPort(hs2.getThriftPort())
+ .withTargetPort(new IntOrString(hs2.getThriftPort()))
+ .endPort()
+ .addNewPort()
+ .withName("webui")
+ .withPort(hs2.getWebUiPort())
+ .withTargetPort(new IntOrString(hs2.getWebUiPort()))
+ .endPort()
+ .endSpec()
+ .build();
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapConfigMapDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapConfigMapDependent.java
new file mode 100644
index 000000000000..f2a7651ed2fa
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapConfigMapDependent.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.HiveClusterSpec;
+import org.apache.hive.kubernetes.operator.model.spec.LlapSpec;
+import org.apache.hive.kubernetes.operator.util.HadoopXmlBuilder;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/** Manages the llap-daemon-site.xml ConfigMap for LLAP daemons. */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=llap,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class LlapConfigMapDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "llap";
+
+ public LlapConfigMapDependent() {
+ super(ConfigMap.class);
+ }
+
+ @Override
+ protected ConfigMap desired(HiveCluster hiveCluster,
+ Context context) {
+ HiveClusterSpec spec = hiveCluster.getSpec();
+ LlapSpec llap = spec.getLlap();
+ Map props = new LinkedHashMap<>();
+
+ props.put("hive.llap.daemon.memory.per.instance.mb",
+ String.valueOf(llap.getMemoryMb()));
+ props.put("hive.llap.daemon.num.executors",
+ String.valueOf(llap.getExecutors()));
+ props.put("hive.llap.daemon.service.hosts", llap.getServiceHosts());
+ props.put("hive.zookeeper.quorum",
+ spec.getZookeeper().getQuorum());
+
+ if (llap.getConfigOverrides() != null) {
+ props.putAll(llap.getConfigOverrides());
+ }
+
+ return new ConfigMapBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .addToData("llap-daemon-site.xml",
+ HadoopXmlBuilder.buildXml(props))
+ .build();
+ }
+
+ /** Returns the ConfigMap resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-llap-config";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapServiceDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapServiceDependent.java
new file mode 100644
index 000000000000..30d5933a1b4f
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapServiceDependent.java
@@ -0,0 +1,76 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.Service;
+import io.fabric8.kubernetes.api.model.ServiceBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/**
+ * Manages the headless Kubernetes Service for LLAP daemons.
+ * Required by the StatefulSet for stable DNS entries and ZooKeeper registration.
+ */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=llap,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class LlapServiceDependent
+ extends HiveDependentResource {
+
+ public LlapServiceDependent() {
+ super(Service.class);
+ }
+
+ @Override
+ protected Service desired(HiveCluster hiveCluster,
+ Context context) {
+ return new ServiceBuilder()
+ .withNewMetadata()
+ .withName(hiveCluster.getMetadata().getName() + "-llap")
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster,
+ LlapStatefulSetDependent.COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withClusterIP("None")
+ .withSelector(Labels.selectorForComponent(hiveCluster,
+ LlapStatefulSetDependent.COMPONENT))
+ .addNewPort()
+ .withName("management")
+ .withPort(15004)
+ .withTargetPort(new IntOrString(15004))
+ .endPort()
+ .addNewPort()
+ .withName("shuffle")
+ .withPort(15551)
+ .withTargetPort(new IntOrString(15551))
+ .endPort()
+ .addNewPort()
+ .withName("web")
+ .withPort(15002)
+ .withTargetPort(new IntOrString(15002))
+ .endPort()
+ .endSpec()
+ .build();
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapStatefulSetDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapStatefulSetDependent.java
new file mode 100644
index 000000000000..14510d7cf858
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/LlapStatefulSetDependent.java
@@ -0,0 +1,151 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import io.fabric8.kubernetes.api.model.ContainerPort;
+import io.fabric8.kubernetes.api.model.ContainerPortBuilder;
+import io.fabric8.kubernetes.api.model.EnvVar;
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.Probe;
+import io.fabric8.kubernetes.api.model.ProbeBuilder;
+import io.fabric8.kubernetes.api.model.apps.StatefulSet;
+import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.HiveClusterSpec;
+import org.apache.hive.kubernetes.operator.model.spec.LlapSpec;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/**
+ * Manages the Kubernetes StatefulSet for LLAP daemons.
+ * Uses StatefulSet for stable pod identities required by ZooKeeper registration.
+ */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=llap,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class LlapStatefulSetDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "llap";
+
+ public LlapStatefulSetDependent() {
+ super(StatefulSet.class);
+ }
+
+ @Override
+ protected StatefulSet desired(HiveCluster hiveCluster,
+ Context context) {
+ HiveClusterSpec spec = hiveCluster.getSpec();
+ LlapSpec llap = spec.getLlap();
+ Map selectorLabels =
+ Labels.selectorForComponent(hiveCluster, COMPONENT);
+
+ List envVars = new ArrayList<>();
+ envVars.add(new EnvVar("SERVICE_NAME", "llap", null));
+ envVars.add(new EnvVar("IS_RESUME", "true", null));
+ envVars.add(new EnvVar("HIVE_CUSTOM_CONF_DIR",
+ CONF_MOUNT_PATH, null));
+ envVars.add(new EnvVar("LLAP_MEMORY_MB",
+ String.valueOf(llap.getMemoryMb()), null));
+ envVars.add(new EnvVar("LLAP_EXECUTORS",
+ String.valueOf(llap.getExecutors()), null));
+ envVars.add(new EnvVar("HIVE_ZOOKEEPER_QUORUM",
+ spec.getZookeeper().getQuorum(), null));
+ envVars.add(new EnvVar("HIVE_LLAP_DAEMON_SERVICE_HOSTS",
+ llap.getServiceHosts(), null));
+
+ // S3A credentials so LLAP tasks can read/write S3.
+ envVars.addAll(buildS3CredentialEnvVars(spec.getStorage()));
+ envVars.add(new EnvVar("HADOOP_CLASSPATH",
+ HADOOP_CLASSPATH_VALUE, null));
+
+ List ports = List.of(
+ new ContainerPortBuilder()
+ .withName("management").withContainerPort(15004).build(),
+ new ContainerPortBuilder()
+ .withName("shuffle").withContainerPort(15551).build(),
+ new ContainerPortBuilder()
+ .withName("web").withContainerPort(15002).build(),
+ new ContainerPortBuilder()
+ .withName("output").withContainerPort(15003).build()
+ );
+
+ Probe readinessProbe = new ProbeBuilder()
+ .withNewTcpSocket()
+ .withPort(new IntOrString(15004))
+ .endTcpSocket()
+ .withInitialDelaySeconds(15)
+ .withPeriodSeconds(10)
+ .withFailureThreshold(3)
+ .build();
+
+ String headlessServiceName =
+ hiveCluster.getMetadata().getName() + "-llap";
+
+ return new StatefulSetBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withReplicas(llap.getReplicas())
+ .withServiceName(headlessServiceName)
+ .withNewSelector()
+ .withMatchLabels(selectorLabels)
+ .endSelector()
+ .withNewTemplate()
+ .withNewMetadata()
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .addNewContainer()
+ .withName("llap")
+ .withImage(spec.getImage())
+ .withImagePullPolicy(spec.getImagePullPolicy())
+ .withEnv(envVars)
+ .withPorts(ports)
+ .withReadinessProbe(readinessProbe)
+ .withResources(buildResources(llap.getResources()))
+ .addNewVolumeMount()
+ .withName("llap-config")
+ .withMountPath(CONF_MOUNT_PATH)
+ .endVolumeMount()
+ .endContainer()
+ .withVolumes(List.of(buildProjectedConfigVolume(
+ "llap-config",
+ LlapConfigMapDependent.resourceName(hiveCluster),
+ HadoopConfigMapDependent.resourceName(hiveCluster))))
+ .endSpec()
+ .endTemplate()
+ .endSpec()
+ .build();
+ }
+
+ /** Returns the StatefulSet resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-llap";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreConfigMapDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreConfigMapDependent.java
new file mode 100644
index 000000000000..6d25cb2cf3a3
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreConfigMapDependent.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import io.fabric8.kubernetes.api.model.ConfigMap;
+import io.fabric8.kubernetes.api.model.ConfigMapBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.spec.DatabaseConfig;
+import org.apache.hive.kubernetes.operator.model.spec.MetastoreSpec;
+import org.apache.hive.kubernetes.operator.util.HadoopXmlBuilder;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/** Manages the metastore-site.xml ConfigMap for the Hive Metastore. */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=metastore,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class MetastoreConfigMapDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "metastore";
+
+ public MetastoreConfigMapDependent() {
+ super(ConfigMap.class);
+ }
+
+ @Override
+ protected ConfigMap desired(HiveCluster hiveCluster,
+ Context context) {
+ MetastoreSpec metastore = hiveCluster.getSpec().getMetastore();
+ Map props = new LinkedHashMap<>();
+
+ props.put("metastore.warehouse.dir", metastore.getWarehouseDir());
+
+ DatabaseConfig db = metastore.getDatabase();
+ if (db != null) {
+ if (db.getUrl() != null) {
+ props.put("javax.jdo.option.ConnectionURL", db.getUrl());
+ }
+ if (db.getDriver() != null) {
+ props.put("javax.jdo.option.ConnectionDriverName", db.getDriver());
+ }
+ if (db.getUsername() != null) {
+ props.put("javax.jdo.option.ConnectionUserName", db.getUsername());
+ }
+ }
+
+ if (metastore.getConfigOverrides() != null) {
+ props.putAll(metastore.getConfigOverrides());
+ }
+
+ return new ConfigMapBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .addToData("metastore-site.xml", HadoopXmlBuilder.buildXml(props))
+ .build();
+ }
+
+ /** Returns the ConfigMap resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-metastore-config";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreDeploymentDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreDeploymentDependent.java
new file mode 100644
index 000000000000..5cc86d026fcc
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreDeploymentDependent.java
@@ -0,0 +1,147 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import io.fabric8.kubernetes.api.model.Container;
+import io.fabric8.kubernetes.api.model.ContainerPort;
+import io.fabric8.kubernetes.api.model.ContainerPortBuilder;
+import io.fabric8.kubernetes.api.model.EnvVar;
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.Probe;
+import io.fabric8.kubernetes.api.model.ProbeBuilder;
+import io.fabric8.kubernetes.api.model.Volume;
+import io.fabric8.kubernetes.api.model.VolumeMount;
+import io.fabric8.kubernetes.api.model.apps.Deployment;
+import io.fabric8.kubernetes.api.model.apps.DeploymentBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.HiveClusterSpec;
+import org.apache.hive.kubernetes.operator.model.spec.DatabaseConfig;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/** Manages the Kubernetes Deployment for the Hive Metastore. */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=metastore,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class MetastoreDeploymentDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "metastore";
+
+ public MetastoreDeploymentDependent() {
+ super(Deployment.class);
+ }
+
+ @Override
+ protected Deployment desired(HiveCluster hiveCluster,
+ Context context) {
+ HiveClusterSpec spec = hiveCluster.getSpec();
+ DatabaseConfig db = spec.getMetastore().getDatabase();
+ Map selectorLabels =
+ Labels.selectorForComponent(hiveCluster, COMPONENT);
+
+ List envVars = new ArrayList<>();
+ envVars.add(new EnvVar("SERVICE_NAME", "metastore", null));
+ envVars.add(new EnvVar("IS_RESUME", "true", null));
+ envVars.add(new EnvVar("HIVE_CUSTOM_CONF_DIR",
+ CONF_MOUNT_PATH, null));
+ envVars.add(new EnvVar("HADOOP_CLASSPATH",
+ HADOOP_CLASSPATH_VALUE, null));
+ envVars.addAll(buildDbEnvVars(db));
+ envVars.addAll(buildS3CredentialEnvVars(spec.getStorage()));
+
+ List ports = List.of(
+ new ContainerPortBuilder()
+ .withName("thrift").withContainerPort(9083).build(),
+ new ContainerPortBuilder()
+ .withName("rest").withContainerPort(9001).build()
+ );
+
+ Probe readinessProbe = new ProbeBuilder()
+ .withNewTcpSocket()
+ .withPort(new IntOrString(9083))
+ .endTcpSocket()
+ .withInitialDelaySeconds(15)
+ .withPeriodSeconds(10)
+ .withFailureThreshold(3)
+ .build();
+
+ Probe livenessProbe = new ProbeBuilder()
+ .withNewTcpSocket()
+ .withPort(new IntOrString(9083))
+ .endTcpSocket()
+ .withInitialDelaySeconds(60)
+ .withPeriodSeconds(30)
+ .withFailureThreshold(5)
+ .build();
+
+ List initContainers = new ArrayList<>();
+ List volumeMounts = new ArrayList<>();
+ List volumes = new ArrayList<>();
+ buildMetastoreVolumesAndInitContainers(
+ hiveCluster, db, spec.getImage(),
+ initContainers, volumeMounts, volumes);
+
+ return new DeploymentBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withReplicas(spec.getMetastore().getReplicas())
+ .withNewSelector()
+ .withMatchLabels(selectorLabels)
+ .endSelector()
+ .withNewTemplate()
+ .withNewMetadata()
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withInitContainers(initContainers)
+ .addNewContainer()
+ .withName("metastore")
+ .withImage(spec.getImage())
+ .withImagePullPolicy(spec.getImagePullPolicy())
+ .withEnv(envVars)
+ .withPorts(ports)
+ .withReadinessProbe(readinessProbe)
+ .withLivenessProbe(livenessProbe)
+ .withResources(buildResources(
+ spec.getMetastore().getResources()))
+ .withVolumeMounts(volumeMounts)
+ .endContainer()
+ .withVolumes(volumes)
+ .endSpec()
+ .endTemplate()
+ .endSpec()
+ .build();
+ }
+
+ /** Returns the Deployment resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-metastore";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreServiceDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreServiceDependent.java
new file mode 100644
index 000000000000..cf0537897512
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/MetastoreServiceDependent.java
@@ -0,0 +1,68 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import io.fabric8.kubernetes.api.model.IntOrString;
+import io.fabric8.kubernetes.api.model.Service;
+import io.fabric8.kubernetes.api.model.ServiceBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/** Manages the Kubernetes Service for the Hive Metastore (Thrift + REST ports). */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=metastore,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class MetastoreServiceDependent
+ extends HiveDependentResource {
+
+ public MetastoreServiceDependent() {
+ super(Service.class);
+ }
+
+ @Override
+ protected Service desired(HiveCluster hiveCluster,
+ Context context) {
+ return new ServiceBuilder()
+ .withNewMetadata()
+ .withName(hiveCluster.getMetadata().getName() + "-metastore")
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster,
+ MetastoreDeploymentDependent.COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withType("ClusterIP")
+ .withSelector(Labels.selectorForComponent(hiveCluster,
+ MetastoreDeploymentDependent.COMPONENT))
+ .addNewPort()
+ .withName("thrift")
+ .withPort(9083)
+ .withTargetPort(new IntOrString(9083))
+ .endPort()
+ .addNewPort()
+ .withName("rest")
+ .withPort(9001)
+ .withTargetPort(new IntOrString(9001))
+ .endPort()
+ .endSpec()
+ .build();
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/SchemaInitJobDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/SchemaInitJobDependent.java
new file mode 100644
index 000000000000..d96a9a908e35
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/SchemaInitJobDependent.java
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import io.fabric8.kubernetes.api.model.Container;
+import io.fabric8.kubernetes.api.model.EnvVar;
+import io.fabric8.kubernetes.api.model.Volume;
+import io.fabric8.kubernetes.api.model.VolumeMount;
+import io.fabric8.kubernetes.api.model.batch.v1.Job;
+import io.fabric8.kubernetes.api.model.batch.v1.JobBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.HiveClusterSpec;
+import org.apache.hive.kubernetes.operator.model.spec.DatabaseConfig;
+import org.apache.hive.kubernetes.operator.model.spec.SecretKeyRef;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/**
+ * Manages the Kubernetes Job that initializes or upgrades the Hive Metastore
+ * database schema using schematool.
+ */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=schema-init,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class SchemaInitJobDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "schema-init";
+
+ public SchemaInitJobDependent() {
+ super(Job.class);
+ }
+
+ @Override
+ protected Job desired(HiveCluster hiveCluster,
+ Context context) {
+ HiveClusterSpec spec = hiveCluster.getSpec();
+ DatabaseConfig db = spec.getMetastore().getDatabase();
+
+ List envVars = new ArrayList<>();
+ envVars.add(new EnvVar("SERVICE_NAME", "metastore", null));
+ envVars.add(new EnvVar("IS_RESUME", "false", null));
+ envVars.add(new EnvVar("HIVE_CUSTOM_CONF_DIR",
+ CONF_MOUNT_PATH, null));
+ envVars.addAll(buildDbEnvVars(db));
+
+ SecretKeyRef passwordRef = db.getPasswordSecretRef();
+ boolean hasDriverJar = db.getDriverJarUrl() != null;
+
+ // This Job runs schematool directly (not via the entrypoint),
+ // so we must replicate the entrypoint's config setup:
+ // 1. Symlink custom config files into HIVE_CONF_DIR
+ // 2. Set HADOOP_CLIENT_OPTS to pass SERVICE_OPTS as JVM args
+ // 3. Copy JDBC driver jar if downloaded by init container
+ StringBuilder cmd = new StringBuilder();
+ cmd.append("export HIVE_CONF_DIR=$HIVE_HOME/conf && ");
+ cmd.append("if [ -d \"${HIVE_CUSTOM_CONF_DIR:-}\" ]; then ");
+ cmd.append("find \"${HIVE_CUSTOM_CONF_DIR}\" -type f -exec ");
+ cmd.append("ln -sfn {} \"${HIVE_CONF_DIR}\"/ \\; ; ");
+ cmd.append("export HADOOP_CONF_DIR=$HIVE_CONF_DIR; fi && ");
+ cmd.append("export HADOOP_CLIENT_OPTS="
+ + "\"${HADOOP_CLIENT_OPTS:-} -Xmx1G ${SERVICE_OPTS:-}\" && ");
+ if (hasDriverJar) {
+ cmd.append("cp ").append(EXT_JARS_PATH)
+ .append("/*.jar $HIVE_HOME/lib/ && ");
+ }
+ cmd.append("$HIVE_HOME/bin/schematool -dbType ")
+ .append(db.getType())
+ .append(" -initOrUpgradeSchema");
+ if (passwordRef != null) {
+ cmd.append(" -passWord \"$DBPASSWORD\"");
+ }
+ String schemaCommand = cmd.toString();
+
+ List initContainers = new ArrayList<>();
+ List volumeMounts = new ArrayList<>();
+ List volumes = new ArrayList<>();
+ buildMetastoreVolumesAndInitContainers(
+ hiveCluster, db, spec.getImage(),
+ initContainers, volumeMounts, volumes);
+
+ return new JobBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withBackoffLimit(3)
+ .withNewTemplate()
+ .withNewMetadata()
+ .withLabels(Labels.forComponent(
+ hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withRestartPolicy("OnFailure")
+ .withInitContainers(initContainers)
+ .addNewContainer()
+ .withName("schema-init")
+ .withImage(spec.getImage())
+ .withImagePullPolicy(spec.getImagePullPolicy())
+ .withCommand("/bin/bash", "-c")
+ .withArgs(schemaCommand)
+ .withEnv(envVars)
+ .withVolumeMounts(volumeMounts)
+ .endContainer()
+ .withVolumes(volumes)
+ .endSpec()
+ .endTemplate()
+ .endSpec()
+ .build();
+ }
+
+ /** Returns the Job resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-schema-init";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/ScratchPvcDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/ScratchPvcDependent.java
new file mode 100644
index 000000000000..3591212d39fd
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/ScratchPvcDependent.java
@@ -0,0 +1,89 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.List;
+
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaim;
+import io.fabric8.kubernetes.api.model.PersistentVolumeClaimBuilder;
+import io.fabric8.kubernetes.api.model.Quantity;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.spec.TezAmSpec;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/**
+ * Manages the shared scratch PersistentVolumeClaim mounted by both
+ * HiveServer2 and TezAM at /opt/hive/scratch.
+ *
+ * This mirrors the Docker Compose pattern where a named volume
+ * {@code scratch:/opt/hive/scratch} is shared between the hs2 and
+ * tezam containers so that the {@code dummy_path} written by HS2
+ * (for VALUES clause) is accessible by the TezAM.
+ *
+ * The PVC uses ReadWriteMany access mode so both pods can mount it
+ * simultaneously.
+ */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=scratch,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class ScratchPvcDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "scratch";
+
+ public ScratchPvcDependent() {
+ super(PersistentVolumeClaim.class);
+ }
+
+ @Override
+ protected PersistentVolumeClaim desired(HiveCluster hiveCluster,
+ Context context) {
+ TezAmSpec tezAm = hiveCluster.getSpec().getTezAm();
+
+ PersistentVolumeClaimBuilder builder = new PersistentVolumeClaimBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withAccessModes(List.of("ReadWriteMany"))
+ .withNewResources()
+ .addToRequests("storage",
+ new Quantity(tezAm.getScratchStorageSize()))
+ .endResources()
+ .endSpec();
+
+ if (tezAm.getScratchStorageClassName() != null) {
+ builder.editSpec()
+ .withStorageClassName(tezAm.getScratchStorageClassName())
+ .endSpec();
+ }
+
+ return builder.build();
+ }
+
+ /** Returns the PVC resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-scratch";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/TezAmServiceDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/TezAmServiceDependent.java
new file mode 100644
index 000000000000..adbe40ce4717
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/TezAmServiceDependent.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import io.fabric8.kubernetes.api.model.Service;
+import io.fabric8.kubernetes.api.model.ServiceBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/**
+ * Manages the headless Kubernetes Service for Tez Application Master.
+ * Required by the StatefulSet for stable DNS entries so that
+ * HiveServer2 can resolve TezAM pod hostnames for RPC communication.
+ */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=tezam,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class TezAmServiceDependent
+ extends HiveDependentResource {
+
+ public TezAmServiceDependent() {
+ super(Service.class);
+ }
+
+ @Override
+ protected Service desired(HiveCluster hiveCluster,
+ Context context) {
+ return new ServiceBuilder()
+ .withNewMetadata()
+ .withName(hiveCluster.getMetadata().getName() + "-tezam")
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster,
+ TezAmStatefulSetDependent.COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withClusterIP("None")
+ .withSelector(Labels.selectorForComponent(hiveCluster,
+ TezAmStatefulSetDependent.COMPONENT))
+ .endSpec()
+ .build();
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/TezAmStatefulSetDependent.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/TezAmStatefulSetDependent.java
new file mode 100644
index 000000000000..a82cee953950
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/TezAmStatefulSetDependent.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent;
+
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+
+import io.fabric8.kubernetes.api.model.EnvVar;
+import io.fabric8.kubernetes.api.model.apps.StatefulSet;
+import io.fabric8.kubernetes.api.model.apps.StatefulSetBuilder;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.processing.dependent.kubernetes.KubernetesDependent;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.HiveClusterSpec;
+import org.apache.hive.kubernetes.operator.model.spec.TezAmSpec;
+import org.apache.hive.kubernetes.operator.util.Labels;
+
+/**
+ * Manages the Kubernetes StatefulSet for the Tez Application Master.
+ * Uses StatefulSet (with a headless Service) so that each TezAM pod
+ * gets a stable, DNS-resolvable hostname. HiveServer2 discovers
+ * TezAM pods via ZooKeeper and connects over RPC using the hostname,
+ * so the hostname must be resolvable within the cluster.
+ */
+@KubernetesDependent(
+ labelSelector = "app.kubernetes.io/component=tezam,"
+ + "app.kubernetes.io/managed-by=hive-kubernetes-operator"
+)
+public class TezAmStatefulSetDependent
+ extends HiveDependentResource {
+
+ public static final String COMPONENT = "tezam";
+ private static final String SCRATCH_MOUNT_PATH = "/opt/hive/scratch";
+
+ public TezAmStatefulSetDependent() {
+ super(StatefulSet.class);
+ }
+
+ @Override
+ protected StatefulSet desired(HiveCluster hiveCluster,
+ Context context) {
+ HiveClusterSpec spec = hiveCluster.getSpec();
+ TezAmSpec tezAm = spec.getTezAm();
+ Map selectorLabels =
+ Labels.selectorForComponent(hiveCluster, COMPONENT);
+
+ List envVars = new ArrayList<>();
+ envVars.add(new EnvVar("SERVICE_NAME", "tezam", null));
+ envVars.add(new EnvVar("IS_RESUME", "true", null));
+ envVars.add(new EnvVar("HIVE_CUSTOM_CONF_DIR",
+ CONF_MOUNT_PATH, null));
+ envVars.add(new EnvVar("HIVE_ZOOKEEPER_QUORUM",
+ spec.getZookeeper().getQuorum(), null));
+ envVars.add(new EnvVar("TEZ_FRAMEWORK_MODE",
+ "STANDALONE_ZOOKEEPER", null));
+
+ if (spec.getLlap().isEnabled()) {
+ envVars.add(new EnvVar("HIVE_LLAP_DAEMON_SERVICE_HOSTS",
+ spec.getLlap().getServiceHosts(), null));
+ }
+
+ // S3A credentials as env vars.
+ envVars.addAll(buildS3CredentialEnvVars(spec.getStorage()));
+
+ envVars.add(new EnvVar("HADOOP_CLASSPATH",
+ HADOOP_CLASSPATH_VALUE, null));
+
+ String headlessServiceName =
+ hiveCluster.getMetadata().getName() + "-tezam";
+
+ return new StatefulSetBuilder()
+ .withNewMetadata()
+ .withName(resourceName(hiveCluster))
+ .withNamespace(hiveCluster.getMetadata().getNamespace())
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .withReplicas(tezAm.getReplicas())
+ .withServiceName(headlessServiceName)
+ .withNewSelector()
+ .withMatchLabels(selectorLabels)
+ .endSelector()
+ .withNewTemplate()
+ .withNewMetadata()
+ .withLabels(Labels.forComponent(hiveCluster, COMPONENT))
+ .endMetadata()
+ .withNewSpec()
+ .addNewContainer()
+ .withName("tezam")
+ .withImage(spec.getImage())
+ .withImagePullPolicy(spec.getImagePullPolicy())
+ .withEnv(envVars)
+ .withResources(buildResources(tezAm.getResources()))
+ .addNewVolumeMount()
+ .withName("hive-config")
+ .withMountPath(CONF_MOUNT_PATH)
+ .endVolumeMount()
+ .addNewVolumeMount()
+ .withName("scratch")
+ .withMountPath(SCRATCH_MOUNT_PATH)
+ .endVolumeMount()
+ .endContainer()
+ .withVolumes(List.of(
+ buildProjectedConfigVolume("hive-config",
+ HiveServer2ConfigMapDependent
+ .resourceName(hiveCluster),
+ HadoopConfigMapDependent
+ .resourceName(hiveCluster)),
+ new io.fabric8.kubernetes.api.model.VolumeBuilder()
+ .withName("scratch")
+ .withNewPersistentVolumeClaim()
+ .withClaimName(ScratchPvcDependent
+ .resourceName(hiveCluster))
+ .endPersistentVolumeClaim()
+ .build()))
+ .endSpec()
+ .endTemplate()
+ .endSpec()
+ .build();
+ }
+
+ /** Returns the StatefulSet resource name for this HiveCluster. */
+ public static String resourceName(HiveCluster hiveCluster) {
+ return hiveCluster.getMetadata().getName() + "-tezam";
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/LlapEnabledCondition.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/LlapEnabledCondition.java
new file mode 100644
index 000000000000..81d3ae3316c8
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/LlapEnabledCondition.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent.condition;
+
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+
+/**
+ * Activation condition for LLAP dependent resources.
+ * Returns true only when spec.llap.enabled is true.
+ */
+public class LlapEnabledCondition
+ implements Condition {
+
+ @Override
+ public boolean isMet(
+ DependentResource dependentResource,
+ HiveCluster primary,
+ Context context) {
+ return primary.getSpec().getLlap().isEnabled();
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/MetastoreReadyCondition.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/MetastoreReadyCondition.java
new file mode 100644
index 000000000000..c6b2c67f2f5d
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/MetastoreReadyCondition.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent.condition;
+
+import io.fabric8.kubernetes.api.model.apps.Deployment;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+
+/**
+ * Ready condition that checks whether the Metastore Deployment has the
+ * desired number of ready replicas. Used to gate HiveServer2 Deployment.
+ */
+public class MetastoreReadyCondition
+ implements Condition {
+
+ @Override
+ public boolean isMet(
+ DependentResource dependentResource,
+ HiveCluster primary,
+ Context context) {
+ int desiredReplicas = primary.getSpec().getMetastore().getReplicas();
+ return dependentResource.getSecondaryResource(primary, context)
+ .map(deployment -> deployment.getStatus() != null
+ && deployment.getStatus().getReadyReplicas() != null
+ && deployment.getStatus().getReadyReplicas() >= desiredReplicas)
+ .orElse(false);
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/SchemaJobCompletedCondition.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/SchemaJobCompletedCondition.java
new file mode 100644
index 000000000000..5dd14cdf6873
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/SchemaJobCompletedCondition.java
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent.condition;
+
+import io.fabric8.kubernetes.api.model.batch.v1.Job;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+
+/**
+ * Ready condition that checks whether the schema initialization Job
+ * has completed successfully. Used to gate Metastore Deployment creation.
+ */
+public class SchemaJobCompletedCondition
+ implements Condition {
+
+ @Override
+ public boolean isMet(
+ DependentResource dependentResource,
+ HiveCluster primary,
+ Context context) {
+ return dependentResource.getSecondaryResource(primary, context)
+ .map(job -> job.getStatus() != null
+ && job.getStatus().getSucceeded() != null
+ && job.getStatus().getSucceeded() >= 1)
+ .orElse(false);
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/TezAmEnabledCondition.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/TezAmEnabledCondition.java
new file mode 100644
index 000000000000..6dd6c59ed792
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/dependent/condition/TezAmEnabledCondition.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.dependent.condition;
+
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.DependentResource;
+import io.javaoperatorsdk.operator.processing.dependent.workflow.Condition;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+
+/**
+ * Activation condition for Tez AM dependent resources.
+ * Returns true only when spec.tezAm.enabled is true.
+ */
+public class TezAmEnabledCondition
+ implements Condition {
+
+ @Override
+ public boolean isMet(
+ DependentResource dependentResource,
+ HiveCluster primary,
+ Context context) {
+ return primary.getSpec().getTezAm().isEnabled();
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveCluster.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveCluster.java
new file mode 100644
index 000000000000..6a708e7c8c91
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveCluster.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model;
+
+import io.fabric8.kubernetes.api.model.Namespaced;
+import io.fabric8.kubernetes.client.CustomResource;
+import io.fabric8.kubernetes.model.annotation.Group;
+import io.fabric8.kubernetes.model.annotation.Kind;
+import io.fabric8.kubernetes.model.annotation.ShortNames;
+import io.fabric8.kubernetes.model.annotation.Version;
+
+/**
+ * HiveCluster is the root CRD type representing a complete Apache Hive deployment
+ * on Kubernetes. It manages Metastore, HiveServer2, LLAP daemons, and Tez AM.
+ */
+@Group("hive.apache.org")
+@Version("v1alpha1")
+@Kind("HiveCluster")
+@ShortNames("hc")
+public class HiveCluster
+ extends CustomResource
+ implements Namespaced {
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveClusterSpec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveClusterSpec.java
new file mode 100644
index 000000000000..e578905e1747
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveClusterSpec.java
@@ -0,0 +1,135 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+import org.apache.hive.kubernetes.operator.model.spec.HadoopSpec;
+import org.apache.hive.kubernetes.operator.model.spec.HiveServer2Spec;
+import org.apache.hive.kubernetes.operator.model.spec.LlapSpec;
+import org.apache.hive.kubernetes.operator.model.spec.MetastoreSpec;
+import org.apache.hive.kubernetes.operator.model.spec.StorageSpec;
+import org.apache.hive.kubernetes.operator.model.spec.TezAmSpec;
+import org.apache.hive.kubernetes.operator.model.spec.ZookeeperSpec;
+
+/** Full specification for a HiveCluster custom resource. */
+public class HiveClusterSpec {
+
+ @JsonPropertyDescription("Docker image to use for all Hive components")
+ private String image = "apache/hive:4.3.0-SNAPSHOT";
+
+ @JsonPropertyDescription("Image pull policy: Always, Never, or IfNotPresent")
+ private String imagePullPolicy = "IfNotPresent";
+
+ @JsonPropertyDescription("Metastore component configuration")
+ private MetastoreSpec metastore = new MetastoreSpec();
+
+ @JsonPropertyDescription("HiveServer2 component configuration")
+ private HiveServer2Spec hiveServer2 = new HiveServer2Spec();
+
+ @JsonPropertyDescription("LLAP daemon configuration. Disabled by default.")
+ private LlapSpec llap = new LlapSpec();
+
+ @JsonPropertyDescription("Tez Application Master configuration. Disabled by default.")
+ private TezAmSpec tezAm = new TezAmSpec();
+
+ @JsonPropertyDescription(
+ "External ZooKeeper connection details (not managed by this operator)")
+ private ZookeeperSpec zookeeper = new ZookeeperSpec();
+
+ @JsonPropertyDescription(
+ "S3-compatible storage backend configuration. Supports any "
+ + "S3-compatible endpoint (Apache Ozone, MinIO, AWS S3, etc.).")
+ private StorageSpec storage;
+
+ @JsonPropertyDescription(
+ "Hadoop/core-site.xml configuration overrides (e.g. S3A settings)")
+ private HadoopSpec hadoop = new HadoopSpec();
+
+ public String getImage() {
+ return image;
+ }
+
+ public void setImage(String image) {
+ this.image = image;
+ }
+
+ public String getImagePullPolicy() {
+ return imagePullPolicy;
+ }
+
+ public void setImagePullPolicy(String imagePullPolicy) {
+ this.imagePullPolicy = imagePullPolicy;
+ }
+
+ public MetastoreSpec getMetastore() {
+ return metastore;
+ }
+
+ public void setMetastore(MetastoreSpec metastore) {
+ this.metastore = metastore;
+ }
+
+ public HiveServer2Spec getHiveServer2() {
+ return hiveServer2;
+ }
+
+ public void setHiveServer2(HiveServer2Spec hiveServer2) {
+ this.hiveServer2 = hiveServer2;
+ }
+
+ public LlapSpec getLlap() {
+ return llap;
+ }
+
+ public void setLlap(LlapSpec llap) {
+ this.llap = llap;
+ }
+
+ public TezAmSpec getTezAm() {
+ return tezAm;
+ }
+
+ public void setTezAm(TezAmSpec tezAm) {
+ this.tezAm = tezAm;
+ }
+
+ public ZookeeperSpec getZookeeper() {
+ return zookeeper;
+ }
+
+ public void setZookeeper(ZookeeperSpec zookeeper) {
+ this.zookeeper = zookeeper;
+ }
+
+ public StorageSpec getStorage() {
+ return storage;
+ }
+
+ public void setStorage(StorageSpec storage) {
+ this.storage = storage;
+ }
+
+ public HadoopSpec getHadoop() {
+ return hadoop;
+ }
+
+ public void setHadoop(HadoopSpec hadoop) {
+ this.hadoop = hadoop;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveClusterStatus.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveClusterStatus.java
new file mode 100644
index 000000000000..f5c94b7e4d57
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/HiveClusterStatus.java
@@ -0,0 +1,102 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model;
+
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hive.kubernetes.operator.model.status.ComponentStatus;
+import org.apache.hive.kubernetes.operator.model.status.HiveClusterCondition;
+
+/** Status subresource for the HiveCluster custom resource. */
+public class HiveClusterStatus {
+
+ private List conditions = new ArrayList<>();
+ private ComponentStatus metastore;
+ private ComponentStatus hiveServer2;
+ private ComponentStatus llap;
+ private ComponentStatus tezAm;
+ private Long observedGeneration;
+
+ public List getConditions() {
+ return conditions;
+ }
+
+ public void setConditions(List conditions) {
+ this.conditions = conditions;
+ }
+
+ public ComponentStatus getMetastore() {
+ return metastore;
+ }
+
+ public void setMetastore(ComponentStatus metastore) {
+ this.metastore = metastore;
+ }
+
+ public ComponentStatus getHiveServer2() {
+ return hiveServer2;
+ }
+
+ public void setHiveServer2(ComponentStatus hiveServer2) {
+ this.hiveServer2 = hiveServer2;
+ }
+
+ public ComponentStatus getLlap() {
+ return llap;
+ }
+
+ public void setLlap(ComponentStatus llap) {
+ this.llap = llap;
+ }
+
+ public ComponentStatus getTezAm() {
+ return tezAm;
+ }
+
+ public void setTezAm(ComponentStatus tezAm) {
+ this.tezAm = tezAm;
+ }
+
+ public Long getObservedGeneration() {
+ return observedGeneration;
+ }
+
+ public void setObservedGeneration(Long observedGeneration) {
+ this.observedGeneration = observedGeneration;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ HiveClusterStatus that = (HiveClusterStatus) o;
+ return java.util.Objects.equals(observedGeneration, that.observedGeneration) &&
+ java.util.Objects.equals(conditions, that.conditions) &&
+ java.util.Objects.equals(metastore, that.metastore) &&
+ java.util.Objects.equals(hiveServer2, that.hiveServer2) &&
+ java.util.Objects.equals(llap, that.llap) &&
+ java.util.Objects.equals(tezAm, that.tezAm);
+ }
+
+ @Override
+ public int hashCode() {
+ return java.util.Objects.hash(conditions, metastore, hiveServer2, llap, tezAm, observedGeneration);
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/DatabaseConfig.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/DatabaseConfig.java
new file mode 100644
index 000000000000..85871c9888cc
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/DatabaseConfig.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** JDBC database connection configuration for the Hive Metastore backend. */
+public class DatabaseConfig {
+
+ @JsonPropertyDescription(
+ "Database type: derby, mysql, postgres, mssql, or oracle")
+ private String type = "derby";
+
+ @JsonPropertyDescription("JDBC connection URL")
+ private String url;
+
+ @JsonPropertyDescription("JDBC driver class name")
+ private String driver;
+
+ @JsonPropertyDescription("Database username")
+ private String username;
+
+ @JsonPropertyDescription(
+ "Reference to a Kubernetes Secret containing the database password")
+ private SecretKeyRef passwordSecretRef;
+
+ @JsonPropertyDescription(
+ "URL to download the JDBC driver jar, e.g. "
+ + "https://repo1.maven.org/maven2/org/postgresql/"
+ + "postgresql/42.7.5/postgresql-42.7.5.jar")
+ private String driverJarUrl;
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public String getUrl() {
+ return url;
+ }
+
+ public void setUrl(String url) {
+ this.url = url;
+ }
+
+ public String getDriver() {
+ return driver;
+ }
+
+ public void setDriver(String driver) {
+ this.driver = driver;
+ }
+
+ public String getUsername() {
+ return username;
+ }
+
+ public void setUsername(String username) {
+ this.username = username;
+ }
+
+ public SecretKeyRef getPasswordSecretRef() {
+ return passwordSecretRef;
+ }
+
+ public void setPasswordSecretRef(SecretKeyRef passwordSecretRef) {
+ this.passwordSecretRef = passwordSecretRef;
+ }
+
+ public String getDriverJarUrl() {
+ return driverJarUrl;
+ }
+
+ public void setDriverJarUrl(String driverJarUrl) {
+ this.driverJarUrl = driverJarUrl;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/HadoopSpec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/HadoopSpec.java
new file mode 100644
index 000000000000..e7bd187b9041
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/HadoopSpec.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import java.util.Map;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** Hadoop configuration overrides, primarily for core-site.xml (S3A, filesystem settings). */
+public class HadoopSpec {
+
+ @JsonPropertyDescription("Key-value pairs written into core-site.xml")
+ private Map coreSiteOverrides;
+
+ public Map getCoreSiteOverrides() {
+ return coreSiteOverrides;
+ }
+
+ public void setCoreSiteOverrides(Map coreSiteOverrides) {
+ this.coreSiteOverrides = coreSiteOverrides;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/HiveServer2Spec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/HiveServer2Spec.java
new file mode 100644
index 000000000000..62f03407e479
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/HiveServer2Spec.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import java.util.Map;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** Configuration for the HiveServer2 component. */
+public class HiveServer2Spec {
+
+ @JsonPropertyDescription("Number of HiveServer2 replicas")
+ private int replicas = 1;
+
+ @JsonPropertyDescription("Resource requirements for HiveServer2 pods")
+ private ResourceRequirementsSpec resources;
+
+ @JsonPropertyDescription(
+ "Additional hive-site.xml configuration overrides as key-value pairs")
+ private Map configOverrides;
+
+ @JsonPropertyDescription(
+ "Kubernetes Service type: ClusterIP, LoadBalancer, or NodePort")
+ private String serviceType = "ClusterIP";
+
+ @JsonPropertyDescription("HiveServer2 Thrift port")
+ private int thriftPort = 10000;
+
+ @JsonPropertyDescription("HiveServer2 Web UI port")
+ private int webUiPort = 10002;
+
+ public int getReplicas() {
+ return replicas;
+ }
+
+ public void setReplicas(int replicas) {
+ this.replicas = replicas;
+ }
+
+ public ResourceRequirementsSpec getResources() {
+ return resources;
+ }
+
+ public void setResources(ResourceRequirementsSpec resources) {
+ this.resources = resources;
+ }
+
+ public Map getConfigOverrides() {
+ return configOverrides;
+ }
+
+ public void setConfigOverrides(Map configOverrides) {
+ this.configOverrides = configOverrides;
+ }
+
+ public String getServiceType() {
+ return serviceType;
+ }
+
+ public void setServiceType(String serviceType) {
+ this.serviceType = serviceType;
+ }
+
+ public int getThriftPort() {
+ return thriftPort;
+ }
+
+ public void setThriftPort(int thriftPort) {
+ this.thriftPort = thriftPort;
+ }
+
+ public int getWebUiPort() {
+ return webUiPort;
+ }
+
+ public void setWebUiPort(int webUiPort) {
+ this.webUiPort = webUiPort;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/LlapSpec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/LlapSpec.java
new file mode 100644
index 000000000000..30985d836713
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/LlapSpec.java
@@ -0,0 +1,106 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import java.util.Map;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** Configuration for LLAP (Live Long and Process) daemons. */
+public class LlapSpec {
+
+ @JsonPropertyDescription("Whether LLAP is enabled")
+ private boolean enabled = false;
+
+ @JsonPropertyDescription("Number of LLAP daemon replicas")
+ private int replicas = 2;
+
+ @JsonPropertyDescription("Resource requirements for LLAP daemon pods")
+ private ResourceRequirementsSpec resources;
+
+ @JsonPropertyDescription("Number of LLAP executors per daemon")
+ private int executors = 1;
+
+ @JsonPropertyDescription("Memory in MB per LLAP daemon instance")
+ private int memoryMb = 2048;
+
+ @JsonPropertyDescription(
+ "LLAP service hosts identifier for ZooKeeper registration")
+ private String serviceHosts = "@llap0";
+
+ @JsonPropertyDescription(
+ "Additional llap-daemon-site.xml configuration overrides")
+ private Map configOverrides;
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public int getReplicas() {
+ return replicas;
+ }
+
+ public void setReplicas(int replicas) {
+ this.replicas = replicas;
+ }
+
+ public ResourceRequirementsSpec getResources() {
+ return resources;
+ }
+
+ public void setResources(ResourceRequirementsSpec resources) {
+ this.resources = resources;
+ }
+
+ public int getExecutors() {
+ return executors;
+ }
+
+ public void setExecutors(int executors) {
+ this.executors = executors;
+ }
+
+ public int getMemoryMb() {
+ return memoryMb;
+ }
+
+ public void setMemoryMb(int memoryMb) {
+ this.memoryMb = memoryMb;
+ }
+
+ public String getServiceHosts() {
+ return serviceHosts;
+ }
+
+ public void setServiceHosts(String serviceHosts) {
+ this.serviceHosts = serviceHosts;
+ }
+
+ public Map getConfigOverrides() {
+ return configOverrides;
+ }
+
+ public void setConfigOverrides(Map configOverrides) {
+ this.configOverrides = configOverrides;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/MetastoreSpec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/MetastoreSpec.java
new file mode 100644
index 000000000000..4e75ff13f502
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/MetastoreSpec.java
@@ -0,0 +1,83 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import java.util.Map;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** Configuration for the Hive Metastore component. */
+public class MetastoreSpec {
+
+ @JsonPropertyDescription("Number of Metastore replicas")
+ private int replicas = 1;
+
+ @JsonPropertyDescription("Resource requirements for Metastore pods")
+ private ResourceRequirementsSpec resources;
+
+ @JsonPropertyDescription("Database connection configuration for the metastore backend")
+ private DatabaseConfig database = new DatabaseConfig();
+
+ @JsonPropertyDescription("Warehouse directory path")
+ private String warehouseDir = "/opt/hive/data/warehouse";
+
+ @JsonPropertyDescription(
+ "Additional metastore-site.xml configuration overrides as key-value pairs")
+ private Map configOverrides;
+
+ public int getReplicas() {
+ return replicas;
+ }
+
+ public void setReplicas(int replicas) {
+ this.replicas = replicas;
+ }
+
+ public ResourceRequirementsSpec getResources() {
+ return resources;
+ }
+
+ public void setResources(ResourceRequirementsSpec resources) {
+ this.resources = resources;
+ }
+
+ public DatabaseConfig getDatabase() {
+ return database;
+ }
+
+ public void setDatabase(DatabaseConfig database) {
+ this.database = database;
+ }
+
+ public String getWarehouseDir() {
+ return warehouseDir;
+ }
+
+ public void setWarehouseDir(String warehouseDir) {
+ this.warehouseDir = warehouseDir;
+ }
+
+ public Map getConfigOverrides() {
+ return configOverrides;
+ }
+
+ public void setConfigOverrides(Map configOverrides) {
+ this.configOverrides = configOverrides;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/ResourceRequirementsSpec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/ResourceRequirementsSpec.java
new file mode 100644
index 000000000000..c38cbc8b8e69
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/ResourceRequirementsSpec.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** Kubernetes resource requirements specification for CPU and memory. */
+public class ResourceRequirementsSpec {
+
+ @JsonPropertyDescription("CPU request (e.g. 500m, 1)")
+ private String requestsCpu = "500m";
+
+ @JsonPropertyDescription("Memory request (e.g. 1Gi, 512Mi)")
+ private String requestsMemory = "1Gi";
+
+ @JsonPropertyDescription("CPU limit (e.g. 2, 1000m)")
+ private String limitsCpu;
+
+ @JsonPropertyDescription("Memory limit (e.g. 2Gi, 1024Mi)")
+ private String limitsMemory;
+
+ public String getRequestsCpu() {
+ return requestsCpu;
+ }
+
+ public void setRequestsCpu(String requestsCpu) {
+ this.requestsCpu = requestsCpu;
+ }
+
+ public String getRequestsMemory() {
+ return requestsMemory;
+ }
+
+ public void setRequestsMemory(String requestsMemory) {
+ this.requestsMemory = requestsMemory;
+ }
+
+ public String getLimitsCpu() {
+ return limitsCpu;
+ }
+
+ public void setLimitsCpu(String limitsCpu) {
+ this.limitsCpu = limitsCpu;
+ }
+
+ public String getLimitsMemory() {
+ return limitsMemory;
+ }
+
+ public void setLimitsMemory(String limitsMemory) {
+ this.limitsMemory = limitsMemory;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/SecretKeyRef.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/SecretKeyRef.java
new file mode 100644
index 000000000000..77de12452ad0
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/SecretKeyRef.java
@@ -0,0 +1,47 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** Reference to a key within a Kubernetes Secret. */
+public class SecretKeyRef {
+
+ @JsonPropertyDescription("Name of the Kubernetes Secret")
+ private String name;
+
+ @JsonPropertyDescription("Key within the Secret")
+ private String key;
+
+ public String getName() {
+ return name;
+ }
+
+ public void setName(String name) {
+ this.name = name;
+ }
+
+ public String getKey() {
+ return key;
+ }
+
+ public void setKey(String key) {
+ this.key = key;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/StorageSpec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/StorageSpec.java
new file mode 100644
index 000000000000..e17d5caa4a9d
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/StorageSpec.java
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/**
+ * Configuration for S3-compatible storage backend.
+ * Supports any S3-compatible endpoint such as Apache Ozone (via Helm),
+ * MinIO, or AWS S3.
+ */
+public class StorageSpec {
+
+ @JsonPropertyDescription(
+ "S3-compatible endpoint URL. "
+ + "Example: http://ozone-s3g-rest:9878 or https://s3.amazonaws.com")
+ private String endpoint;
+
+ @JsonPropertyDescription(
+ "Bucket name for the Hive scratch directory")
+ private String bucket = "hive";
+
+ @JsonPropertyDescription(
+ "Whether to use path-style access for S3 requests")
+ private boolean pathStyleAccess = true;
+
+ @JsonPropertyDescription(
+ "S3 access key as plain text (dev/test only)")
+ private String accessKey;
+
+ @JsonPropertyDescription(
+ "S3 secret key as plain text (dev/test only)")
+ private String secretKey;
+
+ @JsonPropertyDescription(
+ "Reference to a Kubernetes Secret containing the S3 access key. "
+ + "Takes precedence over accessKey.")
+ private SecretKeyRef accessKeySecretRef;
+
+ @JsonPropertyDescription(
+ "Reference to a Kubernetes Secret containing the S3 secret key. "
+ + "Takes precedence over secretKey.")
+ private SecretKeyRef secretKeySecretRef;
+
+ public String getEndpoint() {
+ return endpoint;
+ }
+
+ public void setEndpoint(String endpoint) {
+ this.endpoint = endpoint;
+ }
+
+ public String getBucket() {
+ return bucket;
+ }
+
+ public void setBucket(String bucket) {
+ this.bucket = bucket;
+ }
+
+ public boolean isPathStyleAccess() {
+ return pathStyleAccess;
+ }
+
+ public void setPathStyleAccess(boolean pathStyleAccess) {
+ this.pathStyleAccess = pathStyleAccess;
+ }
+
+ public String getAccessKey() {
+ return accessKey;
+ }
+
+ public void setAccessKey(String accessKey) {
+ this.accessKey = accessKey;
+ }
+
+ public String getSecretKey() {
+ return secretKey;
+ }
+
+ public void setSecretKey(String secretKey) {
+ this.secretKey = secretKey;
+ }
+
+ public SecretKeyRef getAccessKeySecretRef() {
+ return accessKeySecretRef;
+ }
+
+ public void setAccessKeySecretRef(SecretKeyRef accessKeySecretRef) {
+ this.accessKeySecretRef = accessKeySecretRef;
+ }
+
+ public SecretKeyRef getSecretKeySecretRef() {
+ return secretKeySecretRef;
+ }
+
+ public void setSecretKeySecretRef(SecretKeyRef secretKeySecretRef) {
+ this.secretKeySecretRef = secretKeySecretRef;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/TezAmSpec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/TezAmSpec.java
new file mode 100644
index 000000000000..54a2d5477df8
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/TezAmSpec.java
@@ -0,0 +1,95 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import java.util.Map;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** Configuration for the Tez Application Master component. */
+public class TezAmSpec {
+
+ @JsonPropertyDescription("Whether Tez AM is enabled")
+ private boolean enabled = false;
+
+ @JsonPropertyDescription("Number of Tez AM replicas")
+ private int replicas = 1;
+
+ @JsonPropertyDescription("Resource requirements for Tez AM pods")
+ private ResourceRequirementsSpec resources;
+
+ @JsonPropertyDescription("Storage size for the shared scratch PVC "
+ + "(ReadWriteMany) mounted on HS2 and TezAM at /opt/hive/scratch")
+ private String scratchStorageSize = "1Gi";
+
+ @JsonPropertyDescription("StorageClass for the shared scratch PVC. "
+ + "Must support ReadWriteMany access. If null, uses cluster default.")
+ private String scratchStorageClassName;
+
+ @JsonPropertyDescription("Additional tez-site.xml configuration overrides")
+ private Map configOverrides;
+
+ public boolean isEnabled() {
+ return enabled;
+ }
+
+ public void setEnabled(boolean enabled) {
+ this.enabled = enabled;
+ }
+
+ public int getReplicas() {
+ return replicas;
+ }
+
+ public void setReplicas(int replicas) {
+ this.replicas = replicas;
+ }
+
+ public ResourceRequirementsSpec getResources() {
+ return resources;
+ }
+
+ public void setResources(ResourceRequirementsSpec resources) {
+ this.resources = resources;
+ }
+
+ public String getScratchStorageSize() {
+ return scratchStorageSize;
+ }
+
+ public void setScratchStorageSize(String scratchStorageSize) {
+ this.scratchStorageSize = scratchStorageSize;
+ }
+
+ public String getScratchStorageClassName() {
+ return scratchStorageClassName;
+ }
+
+ public void setScratchStorageClassName(String scratchStorageClassName) {
+ this.scratchStorageClassName = scratchStorageClassName;
+ }
+
+ public Map getConfigOverrides() {
+ return configOverrides;
+ }
+
+ public void setConfigOverrides(Map configOverrides) {
+ this.configOverrides = configOverrides;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/ZookeeperSpec.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/ZookeeperSpec.java
new file mode 100644
index 000000000000..94c4d3fdf6ff
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/spec/ZookeeperSpec.java
@@ -0,0 +1,36 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.spec;
+
+import com.fasterxml.jackson.annotation.JsonPropertyDescription;
+
+/** External ZooKeeper connection configuration. ZooKeeper is not managed by this operator. */
+public class ZookeeperSpec {
+
+ @JsonPropertyDescription("ZooKeeper quorum connection string")
+ private String quorum = "zookeeper:2181";
+
+ public String getQuorum() {
+ return quorum;
+ }
+
+ public void setQuorum(String quorum) {
+ this.quorum = quorum;
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/status/ComponentStatus.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/status/ComponentStatus.java
new file mode 100644
index 000000000000..b9ce645afcb2
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/status/ComponentStatus.java
@@ -0,0 +1,67 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.status;
+
+/**
+ * Status of an individual Hive component (Metastore, HS2, LLAP, TezAM).
+ */
+public class ComponentStatus {
+
+ private int readyReplicas;
+ private int desiredReplicas;
+ private String phase;
+
+ public int getReadyReplicas() {
+ return readyReplicas;
+ }
+
+ public void setReadyReplicas(int readyReplicas) {
+ this.readyReplicas = readyReplicas;
+ }
+
+ public int getDesiredReplicas() {
+ return desiredReplicas;
+ }
+
+ public void setDesiredReplicas(int desiredReplicas) {
+ this.desiredReplicas = desiredReplicas;
+ }
+
+ public String getPhase() {
+ return phase;
+ }
+
+ public void setPhase(String phase) {
+ this.phase = phase;
+ }
+
+ public boolean equals(Object o) {
+ if (this == o)
+ return true;
+ if (o == null || getClass() != o.getClass())
+ return false;
+ ComponentStatus that = (ComponentStatus) o;
+ return readyReplicas == that.readyReplicas && desiredReplicas == that.desiredReplicas && java.util.Objects.equals(
+ phase, that.phase);
+ }
+
+ public int hashCode() {
+ return java.util.Objects.hash(readyReplicas, desiredReplicas, phase);
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/status/HiveClusterCondition.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/status/HiveClusterCondition.java
new file mode 100644
index 000000000000..5685fe4a774b
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/model/status/HiveClusterCondition.java
@@ -0,0 +1,87 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.model.status;
+
+/**
+ * Standard Kubernetes-style condition for the HiveCluster status.
+ * Condition types: Ready, MetastoreReady, HiveServer2Ready, SchemaInitialized.
+ */
+public class HiveClusterCondition {
+
+ private String type;
+ private String status;
+ private String reason;
+ private String message;
+ private String lastTransitionTime;
+
+ public String getType() {
+ return type;
+ }
+
+ public void setType(String type) {
+ this.type = type;
+ }
+
+ public String getStatus() {
+ return status;
+ }
+
+ public void setStatus(String status) {
+ this.status = status;
+ }
+
+ public String getReason() {
+ return reason;
+ }
+
+ public void setReason(String reason) {
+ this.reason = reason;
+ }
+
+ public String getMessage() {
+ return message;
+ }
+
+ public void setMessage(String message) {
+ this.message = message;
+ }
+
+ public String getLastTransitionTime() {
+ return lastTransitionTime;
+ }
+
+ public void setLastTransitionTime(String lastTransitionTime) {
+ this.lastTransitionTime = lastTransitionTime;
+ }
+
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ HiveClusterCondition that = (HiveClusterCondition) o;
+ return java.util.Objects.equals(type, that.type) &&
+ java.util.Objects.equals(status, that.status) &&
+ java.util.Objects.equals(reason, that.reason) &&
+ java.util.Objects.equals(message, that.message) &&
+ java.util.Objects.equals(lastTransitionTime, that.lastTransitionTime);
+ }
+
+ public int hashCode() {
+ return java.util.Objects.hash(type, status, reason, message, lastTransitionTime);
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/reconciler/HiveClusterReconciler.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/reconciler/HiveClusterReconciler.java
new file mode 100644
index 000000000000..02895d8bd7d4
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/reconciler/HiveClusterReconciler.java
@@ -0,0 +1,308 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.reconciler;
+
+import java.time.Instant;
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.Optional;
+import java.util.function.Function;
+
+import io.fabric8.kubernetes.api.model.HasMetadata;
+import io.fabric8.kubernetes.api.model.apps.Deployment;
+import io.fabric8.kubernetes.api.model.apps.StatefulSet;
+import io.fabric8.kubernetes.api.model.batch.v1.Job;
+import io.javaoperatorsdk.operator.api.reconciler.Context;
+import io.javaoperatorsdk.operator.api.reconciler.ControllerConfiguration;
+import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusHandler;
+import io.javaoperatorsdk.operator.api.reconciler.ErrorStatusUpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.Reconciler;
+import io.javaoperatorsdk.operator.api.reconciler.UpdateControl;
+import io.javaoperatorsdk.operator.api.reconciler.dependent.Dependent;
+import org.apache.hive.kubernetes.operator.dependent.HadoopConfigMapDependent;
+import org.apache.hive.kubernetes.operator.dependent.HiveServer2ConfigMapDependent;
+import org.apache.hive.kubernetes.operator.dependent.HiveServer2DeploymentDependent;
+import org.apache.hive.kubernetes.operator.dependent.HiveServer2ServiceDependent;
+import org.apache.hive.kubernetes.operator.dependent.LlapConfigMapDependent;
+import org.apache.hive.kubernetes.operator.dependent.LlapServiceDependent;
+import org.apache.hive.kubernetes.operator.dependent.LlapStatefulSetDependent;
+import org.apache.hive.kubernetes.operator.dependent.MetastoreConfigMapDependent;
+import org.apache.hive.kubernetes.operator.dependent.MetastoreDeploymentDependent;
+import org.apache.hive.kubernetes.operator.dependent.MetastoreServiceDependent;
+import org.apache.hive.kubernetes.operator.dependent.SchemaInitJobDependent;
+import org.apache.hive.kubernetes.operator.dependent.ScratchPvcDependent;
+import org.apache.hive.kubernetes.operator.dependent.TezAmServiceDependent;
+import org.apache.hive.kubernetes.operator.dependent.TezAmStatefulSetDependent;
+import org.apache.hive.kubernetes.operator.dependent.condition.LlapEnabledCondition;
+import org.apache.hive.kubernetes.operator.dependent.condition.MetastoreReadyCondition;
+import org.apache.hive.kubernetes.operator.dependent.condition.SchemaJobCompletedCondition;
+import org.apache.hive.kubernetes.operator.dependent.condition.TezAmEnabledCondition;
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+import org.apache.hive.kubernetes.operator.model.HiveClusterStatus;
+import org.apache.hive.kubernetes.operator.model.status.ComponentStatus;
+import org.apache.hive.kubernetes.operator.model.status.HiveClusterCondition;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Main reconciler for the HiveCluster custom resource.
+ * Orchestrates all dependent resources with proper dependency ordering.
+ */
+@ControllerConfiguration(
+ dependents = {
+ // --- ConfigMap dependents ---
+ @Dependent(
+ name = "hadoop-configmap",
+ type = HadoopConfigMapDependent.class
+ ),
+ @Dependent(
+ name = "metastore-configmap",
+ type = MetastoreConfigMapDependent.class
+ ),
+ @Dependent(
+ name = "hiveserver2-configmap",
+ type = HiveServer2ConfigMapDependent.class
+ ),
+ // --- Job dependents ---
+ @Dependent(
+ name = "schema-init-job",
+ type = SchemaInitJobDependent.class,
+ dependsOn = {"metastore-configmap", "hadoop-configmap"},
+ readyPostcondition = SchemaJobCompletedCondition.class
+ ),
+ // --- Deployment dependents ---
+ @Dependent(
+ name = "metastore-deployment",
+ type = MetastoreDeploymentDependent.class,
+ dependsOn = {"schema-init-job"},
+ readyPostcondition = MetastoreReadyCondition.class
+ ),
+ // --- Service dependents ---
+ @Dependent(
+ name = "metastore-service",
+ type = MetastoreServiceDependent.class,
+ dependsOn = {"metastore-configmap"}
+ ),
+ @Dependent(
+ name = "hiveserver2-deployment",
+ type = HiveServer2DeploymentDependent.class,
+ dependsOn = {"metastore-deployment", "hiveserver2-configmap",
+ "hadoop-configmap"}
+ ),
+ @Dependent(
+ name = "hiveserver2-service",
+ type = HiveServer2ServiceDependent.class,
+ dependsOn = {"hiveserver2-configmap"}
+ ),
+ // --- LLAP (conditional) ---
+ @Dependent(
+ name = "llap-configmap",
+ type = LlapConfigMapDependent.class,
+ activationCondition = LlapEnabledCondition.class
+ ),
+ @Dependent(
+ name = "llap-statefulset",
+ type = LlapStatefulSetDependent.class,
+ dependsOn = {"llap-configmap", "hadoop-configmap"},
+ activationCondition = LlapEnabledCondition.class
+ ),
+ @Dependent(
+ name = "llap-service",
+ type = LlapServiceDependent.class,
+ activationCondition = LlapEnabledCondition.class
+ ),
+ // --- TezAM (conditional) ---
+ @Dependent(
+ name = "scratch-pvc",
+ type = ScratchPvcDependent.class,
+ activationCondition = TezAmEnabledCondition.class
+ ),
+ @Dependent(
+ name = "tezam-service",
+ type = TezAmServiceDependent.class,
+ activationCondition = TezAmEnabledCondition.class
+ ),
+ @Dependent(
+ name = "tezam-statefulset",
+ type = TezAmStatefulSetDependent.class,
+ dependsOn = {"hiveserver2-configmap", "hadoop-configmap",
+ "tezam-service", "scratch-pvc"},
+ activationCondition = TezAmEnabledCondition.class
+ )
+ }
+)
+public class HiveClusterReconciler
+ implements Reconciler, ErrorStatusHandler {
+
+ private static final Logger LOG =
+ LoggerFactory.getLogger(HiveClusterReconciler.class);
+
+ @Override
+ public UpdateControl reconcile(HiveCluster resource,
+ Context context) {
+ LOG.info("Reconciling HiveCluster: {}/{}",
+ resource.getMetadata().getNamespace(),
+ resource.getMetadata().getName());
+
+ HiveClusterStatus existingStatus = resource.getStatus();
+ HiveClusterStatus newStatus = buildStatus(resource, context, existingStatus);
+
+ // Relies on HiveClusterStatus.equals() being properly implemented in the POJO
+ if (Objects.equals(existingStatus, newStatus)) {
+ return UpdateControl.noUpdate();
+ }
+
+ resource.setStatus(newStatus);
+ return UpdateControl.patchStatus(resource);
+ }
+
+ @Override
+ public ErrorStatusUpdateControl updateErrorStatus(
+ HiveCluster resource, Context context, Exception e) {
+ LOG.error("Error reconciling HiveCluster: {}/{}",
+ resource.getMetadata().getNamespace(),
+ resource.getMetadata().getName(), e);
+
+ HiveClusterStatus status = resource.getStatus() != null
+ ? resource.getStatus() : new HiveClusterStatus();
+
+ List existingConditions = status.getConditions() != null
+ ? status.getConditions() : Collections.emptyList();
+
+ status.setConditions(List.of(
+ buildCondition("Ready", "False", "ReconciliationError",
+ e.getMessage(), existingConditions)
+ ));
+ status.setObservedGeneration(resource.getMetadata().getGeneration());
+ resource.setStatus(status);
+
+ return ErrorStatusUpdateControl.patchStatus(resource);
+ }
+
+ private HiveClusterStatus buildStatus(HiveCluster resource,
+ Context context, HiveClusterStatus existingStatus) {
+
+ HiveClusterStatus status = new HiveClusterStatus();
+ status.setObservedGeneration(resource.getMetadata().getGeneration());
+
+ List existingConditions = existingStatus != null && existingStatus.getConditions() != null
+ ? existingStatus.getConditions() : Collections.emptyList();
+ List conditions = new ArrayList<>();
+
+ // Schema Init status
+ boolean schemaReady = context.getSecondaryResource(Job.class, "schema-init-job")
+ .map(j -> j.getStatus() != null && j.getStatus().getSucceeded() != null && j.getStatus().getSucceeded() >= 1)
+ .orElse(false);
+
+ conditions.add(buildCondition("SchemaInitialized", schemaReady ? "True" : "False",
+ schemaReady ? "JobCompleted" : "JobPending",
+ schemaReady ? "Schema initialized successfully" : "Schema initialization pending",
+ existingConditions));
+
+ // Metastore status
+ ComponentStatus metastoreStatus = buildComponentStatus(context, Deployment.class, "metastore-deployment",
+ resource.getSpec().getMetastore().getReplicas(),
+ d -> d.getStatus() != null && d.getStatus().getReadyReplicas() != null ? d.getStatus().getReadyReplicas() : 0);
+ status.setMetastore(metastoreStatus);
+
+ boolean metastoreReady = metastoreStatus.getReadyReplicas() >= metastoreStatus.getDesiredReplicas() && metastoreStatus.getDesiredReplicas() > 0;
+ conditions.add(buildCondition("MetastoreReady", metastoreReady ? "True" : "False",
+ metastoreReady ? "DeploymentReady" : "DeploymentNotReady",
+ metastoreReady ? "Metastore is ready" : "Metastore not yet ready",
+ existingConditions));
+
+ // HiveServer2 status
+ ComponentStatus hs2Status = buildComponentStatus(context, Deployment.class, "hiveserver2-deployment",
+ resource.getSpec().getHiveServer2().getReplicas(),
+ d -> d.getStatus() != null && d.getStatus().getReadyReplicas() != null ? d.getStatus().getReadyReplicas() : 0);
+ status.setHiveServer2(hs2Status);
+
+ boolean hs2Ready = hs2Status.getReadyReplicas() >= hs2Status.getDesiredReplicas() && hs2Status.getDesiredReplicas() > 0;
+ conditions.add(buildCondition("HiveServer2Ready", hs2Ready ? "True" : "False",
+ hs2Ready ? "DeploymentReady" : "DeploymentNotReady",
+ hs2Ready ? "HiveServer2 is ready" : "HiveServer2 not yet ready",
+ existingConditions));
+
+ // LLAP status (optional)
+ if (resource.getSpec().getLlap().isEnabled()) {
+ status.setLlap(buildComponentStatus(context, StatefulSet.class, "llap-statefulset",
+ resource.getSpec().getLlap().getReplicas(),
+ s -> s.getStatus() != null && s.getStatus().getReadyReplicas() != null ? s.getStatus().getReadyReplicas() : 0));
+ }
+
+ // TezAM status (optional)
+ if (resource.getSpec().getTezAm().isEnabled()) {
+ status.setTezAm(buildComponentStatus(context, StatefulSet.class, "tezam-statefulset",
+ resource.getSpec().getTezAm().getReplicas(),
+ s -> s.getStatus() != null && s.getStatus().getReadyReplicas() != null ? s.getStatus().getReadyReplicas() : 0));
+ }
+
+ // Overall Ready condition
+ boolean allReady = schemaReady && metastoreReady && hs2Ready;
+ conditions.add(buildCondition("Ready", allReady ? "True" : "False",
+ allReady ? "AllComponentsReady" : "ComponentsNotReady",
+ allReady ? "All Hive components are ready" : "One or more components are not ready",
+ existingConditions));
+
+ status.setConditions(conditions);
+ return status;
+ }
+
+ /**
+ * Unified helper to build status for Deployments, StatefulSets, or any HasMetadata type
+ * that tracks replicas.
+ */
+ private ComponentStatus buildComponentStatus(
+ Context context, Class resourceClass, String dependentName,
+ int desiredReplicas, Function readyExtractor) {
+
+ ComponentStatus cs = new ComponentStatus();
+ cs.setDesiredReplicas(desiredReplicas);
+
+ int ready = context.getSecondaryResource(resourceClass, dependentName)
+ .map(readyExtractor)
+ .orElse(0);
+
+ cs.setReadyReplicas(ready);
+ cs.setPhase(ready >= desiredReplicas && desiredReplicas > 0 ? "Running" : "Pending");
+ return cs;
+ }
+
+ private HiveClusterCondition buildCondition(String type, String status,
+ String reason, String message, List existingConditions) {
+
+ HiveClusterCondition condition = new HiveClusterCondition();
+ condition.setType(type);
+ condition.setStatus(status);
+ condition.setReason(reason);
+ condition.setMessage(message);
+
+ // Preserve lastTransitionTime when the condition status has not changed
+ String preservedTime = existingConditions.stream()
+ .filter(c -> type.equals(c.getType()) && status.equals(c.getStatus()))
+ .map(HiveClusterCondition::getLastTransitionTime)
+ .findFirst()
+ .orElse(null);
+
+ condition.setLastTransitionTime(preservedTime != null ? preservedTime : Instant.now().toString());
+ return condition;
+ }
+}
\ No newline at end of file
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/util/HadoopXmlBuilder.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/util/HadoopXmlBuilder.java
new file mode 100644
index 000000000000..66e5c1b7d4dd
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/util/HadoopXmlBuilder.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.util;
+
+import java.util.Map;
+
+/**
+ * Builds Hadoop-style XML configuration file content from a property map.
+ * The output format matches standard Hadoop configuration files as used by
+ * Hive, HDFS, and Tez.
+ */
+public final class HadoopXmlBuilder {
+
+ private HadoopXmlBuilder() {
+ }
+
+ /**
+ * Renders a property map as a Hadoop-style XML configuration string.
+ *
+ * @param properties key-value pairs to include in the configuration
+ * @return XML string in Hadoop configuration format
+ */
+ public static String buildXml(Map properties) {
+ StringBuilder sb = new StringBuilder();
+ sb.append("\n");
+ sb.append("\n");
+ sb.append("\n");
+ if (properties != null) {
+ for (Map.Entry entry : properties.entrySet()) {
+ sb.append(" \n");
+ sb.append(" ").append(escapeXml(entry.getKey()))
+ .append("\n");
+ sb.append(" ").append(escapeXml(entry.getValue()))
+ .append("\n");
+ sb.append(" \n");
+ }
+ }
+ sb.append("\n");
+ return sb.toString();
+ }
+
+ private static String escapeXml(String value) {
+ if (value == null) {
+ return "";
+ }
+ return value
+ .replace("&", "&")
+ .replace("<", "<")
+ .replace(">", ">")
+ .replace("\"", """)
+ .replace("'", "'");
+ }
+}
diff --git a/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/util/Labels.java b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/util/Labels.java
new file mode 100644
index 000000000000..dcf0cc43b3c6
--- /dev/null
+++ b/packaging/src/kubernetes/src/java/org/apache/hive/kubernetes/operator/util/Labels.java
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hive.kubernetes.operator.util;
+
+import java.util.LinkedHashMap;
+import java.util.Map;
+
+import org.apache.hive.kubernetes.operator.model.HiveCluster;
+
+/** Standard Kubernetes label and selector helpers following recommended label conventions. */
+public final class Labels {
+
+ public static final String APP_NAME = "app.kubernetes.io/name";
+ public static final String APP_INSTANCE = "app.kubernetes.io/instance";
+ public static final String APP_COMPONENT = "app.kubernetes.io/component";
+ public static final String MANAGED_BY = "app.kubernetes.io/managed-by";
+ public static final String MANAGED_BY_VALUE = "hive-kubernetes-operator";
+
+ private Labels() {
+ }
+
+ /**
+ * Returns the full set of labels for a component's Kubernetes resource.
+ *
+ * @param hc the HiveCluster resource
+ * @param component component name (metastore, hiveserver2, llap, tezam, schema-init)
+ * @return label map
+ */
+ public static Map forComponent(HiveCluster hc,
+ String component) {
+ Map labels = new LinkedHashMap<>();
+ labels.put(APP_NAME, "apache-hive");
+ labels.put(APP_INSTANCE, hc.getMetadata().getName());
+ labels.put(APP_COMPONENT, component);
+ labels.put(MANAGED_BY, MANAGED_BY_VALUE);
+ return labels;
+ }
+
+ /**
+ * Returns the minimal selector labels for matching pods of a component.
+ *
+ * @param hc the HiveCluster resource
+ * @param component component name
+ * @return selector map
+ */
+ public static Map selectorForComponent(HiveCluster hc,
+ String component) {
+ Map selector = new LinkedHashMap<>();
+ selector.put(APP_INSTANCE, hc.getMetadata().getName());
+ selector.put(APP_COMPONENT, component);
+ return selector;
+ }
+}
diff --git a/packaging/src/kubernetes/src/resources/log4j2.xml b/packaging/src/kubernetes/src/resources/log4j2.xml
new file mode 100644
index 000000000000..f906eb0fdf29
--- /dev/null
+++ b/packaging/src/kubernetes/src/resources/log4j2.xml
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/pom.xml b/pom.xml
index f80deb53553f..cee7c34feb87 100644
--- a/pom.xml
+++ b/pom.xml
@@ -99,6 +99,8 @@
3.1.0
2.16.0
3.6.0
+ 4.9.6
+ 6.13.4
3.5.3
2.7.10
2.3.0