diff --git a/.gitignore b/.gitignore
index b440d2b597..35096783dd 100644
--- a/.gitignore
+++ b/.gitignore
@@ -21,3 +21,6 @@ linklint-*.zip
linklint/
.checkstyle
**/.checkstyle
+
+# detritus produced by kuttl
+kubeconfig*
diff --git a/dev-support/jenkins/Dockerfile b/dev-support/jenkins/Dockerfile
index 5b7aac29a9..17bcf30645 100644
--- a/dev-support/jenkins/Dockerfile
+++ b/dev-support/jenkins/Dockerfile
@@ -20,16 +20,29 @@
FROM hadolint/hadolint:latest-debian as hadolint
FROM maven:3.8-jdk-8
-
-# hadolint ignore=DL3008
-RUN apt-get -q update && apt-get -q install --no-install-recommends -y \
- binutils \
- git \
- rsync \
- shellcheck \
- wget && \
- apt-get clean && \
- rm -rf /var/lib/apt/lists/*
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+# hadolint ignore=SC1091
+RUN install -m 0755 -d /etc/apt/keyrings \
+ && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /etc/apt/keyrings/docker.gpg \
+ && chmod a+r /etc/apt/keyrings/docker.gpg \
+ && echo \
+ "deb [arch="$(dpkg --print-architecture)" signed-by=/etc/apt/keyrings/docker.gpg] https://download.docker.com/linux/debian \
+ "$(. /etc/os-release && echo "$VERSION_CODENAME")" stable" | \
+ tee /etc/apt/sources.list.d/docker.list > /dev/null \
+ && apt-get -q update \
+ && apt-get -q install --no-install-recommends -y \
+ binutils=* \
+ ca-certificates=* \
+ curl=* \
+ docker-buildx-plugin=* \
+ docker-ce-cli=* \
+ git=* \
+ gnupg=* \
+ rsync=* \
+ shellcheck=* \
+ wget=* \
+ && apt-get clean \
+ && rm -rf /var/lib/apt/lists/*
COPY --from=hadolint /bin/hadolint /bin/hadolint
diff --git a/dev-support/jenkins/jenkins_precommit_github_yetus.sh b/dev-support/jenkins/jenkins_precommit_github_yetus.sh
index 2cede2f047..40f3ead1a9 100755
--- a/dev-support/jenkins/jenkins_precommit_github_yetus.sh
+++ b/dev-support/jenkins/jenkins_precommit_github_yetus.sh
@@ -111,6 +111,8 @@ YETUS_ARGS+=("--tests-filter=test4tests")
# Dockerfile since we don't want to use the auto-pulled version.
YETUS_ARGS+=("--docker")
YETUS_ARGS+=("--dockerfile=${DOCKERFILE}")
+# enabled docker-in-docker so that we can build container images
+YETUS_ARGS+=("--dockerind=true")
YETUS_ARGS+=("--mvn-custom-repos")
YETUS_ARGS+=("--java-home=${SET_JAVA_HOME}")
# effectively treat dev-support as a custom maven module
@@ -127,4 +129,4 @@ YETUS_ARGS+=("--proclimit=5000")
echo "Launching yetus with command line:"
echo "${TESTPATCHBIN} ${YETUS_ARGS[*]}"
-/usr/bin/env bash "${TESTPATCHBIN}" "${YETUS_ARGS[@]}"
\ No newline at end of file
+/usr/bin/env bash "${TESTPATCHBIN}" "${YETUS_ARGS[@]}"
diff --git a/hbase-kubernetes-deployment/components/hbase/hadoop-metrics2-hbase.properties b/hbase-kubernetes-deployment/components/hbase/hadoop-metrics2-hbase.properties
new file mode 100644
index 0000000000..922822310a
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/hadoop-metrics2-hbase.properties
@@ -0,0 +1,46 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+ # syntax: [prefix].[source|sink].[instance].[options]
+ # See javadoc of package-info.java for org.apache.hadoop.metrics2 for details
+
+ *.sink.file*.class=org.apache.hadoop.metrics2.sink.FileSink
+ # default sampling period
+ *.period=10
+
+ # Below are some examples of sinks that could be used
+ # to monitor different hbase daemons.
+
+ # hbase.sink.file-all.class=org.apache.hadoop.metrics2.sink.FileSink
+ # hbase.sink.file-all.filename=all.metrics
+
+ # hbase.sink.file0.class=org.apache.hadoop.metrics2.sink.FileSink
+ # hbase.sink.file0.context=hmaster
+ # hbase.sink.file0.filename=master.metrics
+
+ # hbase.sink.file1.class=org.apache.hadoop.metrics2.sink.FileSink
+ # hbase.sink.file1.context=thrift-one
+ # hbase.sink.file1.filename=thrift-one.metrics
+
+ # hbase.sink.file2.class=org.apache.hadoop.metrics2.sink.FileSink
+ # hbase.sink.file2.context=thrift-two
+ # hbase.sink.file2.filename=thrift-one.metrics
+
+ # hbase.sink.file3.class=org.apache.hadoop.metrics2.sink.FileSink
+ # hbase.sink.file3.context=rest
+ # hbase.sink.file3.filename=rest.metrics
+
+ # Read from src rather than copy over like this.
diff --git a/hbase-kubernetes-deployment/components/hbase/hbase-site.xml b/hbase-kubernetes-deployment/components/hbase/hbase-site.xml
new file mode 100644
index 0000000000..e6917fadff
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/hbase-site.xml
@@ -0,0 +1,75 @@
+
+
+
+
+
+ hbase.cluster.distributed
+ true
+
+
+ hbase.rootdir
+ hdfs://${env.HADOOP_SERVICE}:8020/hbase
+
+
+ hbase.zookeeper.quorum
+ ${env.HBASE_ZOOKEEPER_QUORUM}
+
+
+
+ hbase.unsafe.regionserver.hostname.disable.master.reversedns
+ true
+
+
+ dfs.client.use.datanode.hostname
+ true
+
+
+ hbase.ssl.enabled
+
+ ${env.HBASE_SSL_ENABLED}
+
+
+ hbase.master.cleaner.interval
+
+ 10000
+
+
+ hbase.regionserver.handler.count
+ 45
+
+
+ hbase.netty.eventloop.rpcserver.thread.count
+ 1
+ See the end of https://issues.apache.org/jira/browse/HBASE-27112. Default
+ is 2xCPU_COUNT which seems way too much. 1 thread seems fine for the siri workload at least.
+
+
+ hbase.netty.worker.count
+ 1
+ See the end of https://issues.apache.org/jira/browse/HBASE-27112. Default
+ is 2xCPU_COUNT which seems way too much. 1 thread seems fine for the siri workload at least.
+
+
+ hbase.regionserver.throughput.controller
+ org.apache.hadoop.hbase.regionserver.throttle.NoLimitThroughputController
+
+
diff --git a/hbase-kubernetes-deployment/components/hbase/kustomization.yaml b/hbase-kubernetes-deployment/components/hbase/kustomization.yaml
new file mode 100644
index 0000000000..98ef68a0ec
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/kustomization.yaml
@@ -0,0 +1,40 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1alpha1
+kind: Component
+
+commonLabels:
+ app: hadoop
+
+configMapGenerator:
+- name: hbase-configuration
+ files:
+ - hbase-site.xml
+ - hadoop-metrics2-hbase.properties
+ - log4j2.properties=log4j2.properties.hbase
+- name: hbck2-configuration
+ # Add an hbck2-configuration. Uses log4j1 vs log4j2.
+ files:
+ - hbase-site.xml
+ - hadoop-metrics2-hbase.properties
+ - log4j.properties=log4j.properties.hbase
+
+resources:
+- m-service.yaml
+- m-statefulset.yaml
+- rs-service.yaml
+- rs-statefulset.yaml
diff --git a/hbase-kubernetes-deployment/components/hbase/log4j.properties.hbase b/hbase-kubernetes-deployment/components/hbase/log4j.properties.hbase
new file mode 100644
index 0000000000..c322699ced
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/log4j.properties.hbase
@@ -0,0 +1,68 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hbase.root.logger=INFO,console
+hbase.log.dir=.
+hbase.log.file=hbase.log
+
+# Define the root logger to the system property "hbase.root.logger".
+log4j.rootLogger=${hbase.root.logger}
+
+# Logging Threshold
+log4j.threshold=ALL
+
+#
+# Daily Rolling File Appender
+#
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hbase.log.dir}/${hbase.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+# Debugging Pattern format
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this
+#
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %C{2}(%L): %m%n
+
+# Custom Logging levels
+
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+
+log4j.logger.org.apache.hadoop=WARN
+log4j.logger.org.apache.zookeeper=ERROR
+log4j.logger.org.apache.hadoop.hbase=DEBUG
+
+#These settings are workarounds against spurious logs from the minicluster.
+#See HBASE-4709
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=WARN
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSinkAdapter=WARN
+log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=WARN
+log4j.logger.org.apache.hadoop.metrics2.util.MBeans=WARN
+# Enable this to get detailed connection error/retry logging.
+# log4j.logger.org.apache.hadoop.hbase.client.ConnectionImplementation=TRACE
diff --git a/hbase-kubernetes-deployment/components/hbase/log4j2.properties.hbase b/hbase-kubernetes-deployment/components/hbase/log4j2.properties.hbase
new file mode 100644
index 0000000000..b06a8a6371
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/log4j2.properties.hbase
@@ -0,0 +1,167 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+status = warn
+dest = err
+name = PropertiesConfig
+# refresh configuration from this file every 5 minutes
+monitorInterval = 300
+
+# Console appender
+appender.console.type = Console
+appender.console.target = SYSTEM_ERR
+appender.console.name = console
+appender.console.layout.type = PatternLayout
+appender.console.layout.pattern = %d{ISO8601} %-5p [%t] %c{2}: %.-20000m%n
+
+# Daily Rolling File Appender
+appender.DRFA.type = RollingFile
+appender.DRFA.name = DRFA
+appender.DRFA.fileName = ${sys:hbase.log.dir:-.}/${sys:hbase.log.file:-hbase.log}
+appender.DRFA.filePattern = ${sys:hbase.log.dir:-.}/${sys:hbase.log.file:-hbase.log}.%d{yyyy-MM-dd}
+appender.DRFA.createOnDemand = true
+appender.DRFA.layout.type = PatternLayout
+appender.DRFA.layout.pattern = %d{ISO8601} %-5p [%t] %c{2}: %.-20000m%n
+appender.DRFA.policies.type = Policies
+appender.DRFA.policies.time.type = TimeBasedTriggeringPolicy
+appender.DRFA.policies.time.interval = 1
+appender.DRFA.policies.time.modulate = true
+appender.DRFA.policies.size.type = SizeBasedTriggeringPolicy
+appender.DRFA.policies.size.size = ${sys:hbase.log.maxfilesize:-256MB}
+appender.DRFA.strategy.type = DefaultRolloverStrategy
+appender.DRFA.strategy.max = ${sys:hbase.log.maxbackupindex:-20}
+
+# Rolling File Appender
+appender.RFA.type = RollingFile
+appender.RFA.name = RFA
+appender.RFA.fileName = ${sys:hbase.log.dir:-.}/${sys:hbase.log.file:-hbase.log}
+appender.RFA.filePattern = ${sys:hbase.log.dir:-.}/${sys:hbase.log.file:-hbase.log}.%i
+appender.RFA.createOnDemand = true
+appender.RFA.layout.type = PatternLayout
+appender.RFA.layout.pattern = %d{ISO8601} %-5p [%t] %c{2}: %.-20000m%n
+appender.RFA.policies.type = Policies
+appender.RFA.policies.size.type = SizeBasedTriggeringPolicy
+appender.RFA.policies.size.size = ${sys:hbase.log.maxfilesize:-256MB}
+appender.RFA.strategy.type = DefaultRolloverStrategy
+appender.RFA.strategy.max = ${sys:hbase.log.maxbackupindex:-20}
+
+# Security Audit Appender
+appender.RFAS.type = RollingFile
+appender.RFAS.name = RFAS
+appender.RFAS.fileName = ${sys:hbase.log.dir:-.}/${sys:hbase.security.log.file:-SecurityAuth.audit}
+appender.RFAS.filePattern = ${sys:hbase.log.dir:-.}/${sys:hbase.security.log.file:-SecurityAuth.audit}.%i
+appender.RFAS.createOnDemand = true
+appender.RFAS.layout.type = PatternLayout
+appender.RFAS.layout.pattern = %d{ISO8601} %-5p [%t] %c{2}: %.-20000m%n
+appender.RFAS.policies.type = Policies
+appender.RFAS.policies.size.type = SizeBasedTriggeringPolicy
+appender.RFAS.policies.size.size = ${sys:hbase.security.log.maxfilesize:-256MB}
+appender.RFAS.strategy.type = DefaultRolloverStrategy
+appender.RFAS.strategy.max = ${sys:hbase.security.log.maxbackupindex:-20}
+
+# Http Access Log RFA, uncomment this if you want an http access.log
+# appender.AccessRFA.type = RollingFile
+# appender.AccessRFA.name = AccessRFA
+# appender.AccessRFA.fileName = /var/log/hbase/access.log
+# appender.AccessRFA.filePattern = /var/log/hbase/access.log.%i
+# appender.AccessRFA.createOnDemand = true
+# appender.AccessRFA.layout.type = PatternLayout
+# appender.AccessRFA.layout.pattern = %m%n
+# appender.AccessRFA.policies.type = Policies
+# appender.AccessRFA.policies.size.type = SizeBasedTriggeringPolicy
+# appender.AccessRFA.policies.size.size = 200MB
+# appender.AccessRFA.strategy.type = DefaultRolloverStrategy
+# appender.AccessRFA.strategy.max = 10
+
+# Null Appender
+appender.NullAppender.type = Null
+appender.NullAppender.name = NullAppender
+
+rootLogger = ${sys:hbase.root.logger:-INFO,console}
+
+logger.SecurityLogger.name = SecurityLogger
+logger.SecurityLogger = ${sys:hbase.security.logger:-INFO,console}
+logger.SecurityLogger.additivity = false
+
+# Custom Logging levels
+# logger.zookeeper.name = org.apache.zookeeper
+# logger.zookeeper.level = ERROR
+
+# logger.FSNamesystem.name = org.apache.hadoop.fs.FSNamesystem
+# logger.FSNamesystem.level = DEBUG
+
+# logger.hbase.name = org.apache.hadoop.hbase
+# logger.hbase.level = DEBUG
+
+# logger.META.name = org.apache.hadoop.hbase.META
+# logger.META.level = DEBUG
+
+# Make these two classes below DEBUG to see more zk debug.
+# logger.ZKUtil.name = org.apache.hadoop.hbase.zookeeper.ZKUtil
+# logger.ZKUtil.level = DEBUG
+
+# logger.ZKWatcher.name = org.apache.hadoop.hbase.zookeeper.ZKWatcher
+# logger.ZKWatcher.level = DEBUG
+
+logger.dfs.name = org.apache.hadoop.hdfs
+logger.dfs.level = DEBUG
+
+# Prevent metrics subsystem start/stop messages (HBASE-17722)
+logger.MetricsConfig.name = org.apache.hadoop.metrics2.impl.MetricsConfig
+logger.MetricsConfig.level = WARN
+
+logger.MetricsSinkAdapte.name = org.apache.hadoop.metrics2.impl.MetricsSinkAdapter
+logger.MetricsSinkAdapte.level = WARN
+
+logger.MetricsSystemImpl.name = org.apache.hadoop.metrics2.impl.MetricsSystemImpl
+logger.MetricsSystemImpl.level = WARN
+
+# Disable request log by default, you can enable this by changing the appender
+logger.http.name = http.requests
+logger.http.additivity = false
+logger.http = INFO,NullAppender
+# Replace the above with this configuration if you want an http access.log
+# logger.http = INFO,AccessRFA
+#
+#
+
+# Turn off performanceadvisory... it logs when a block is remote
+# complaining short-circuit read is UNUSABLE when the block is
+# remote (duh!).
+logger.PerformanceAdvisory.name = org.apache.hadoop.util.PerformanceAdvisory
+logger.PerformanceAdvisory.level = INFO
+
+# Turn down CodecPool log level so we avoid messages like the below
+# 2022-04-05 23:34:34,208 DEBUG [RpcServer.default.FPBQ.Fifo.handler=25,queue=0,port=16020] compress.CodecPool: Got recycled decompressor
+#
+logger.CodecPool.name = org.apache.hadoop.io.compress.CodecPool
+logger.CodecPool.level = INFO
+
+# Turn off noise.
+logger.hadoopipc.name = org.apache.hadoop.ipc
+logger.hadoopipc.level = WARN
+logger.jetty.name = org.eclipse.jetty
+logger.jetty.level = INFO
+logger.jetty3rdparty.name = org.apache.hbase.thirdparty.org.eclipse.jetty
+logger.jetty3rdparty.level = INFO
+logger.hdfs.name = org.apache.hadoop.hdfs
+logger.hdfs.level = INFO
+logger.metrics2.name = org.apache.hadoop.metrics2
+logger.metrics2.level = INFO
+logger.jsonbean.name = org.apache.hadoop.hbase.util.JSONBean
+logger.jsonbean.level = INFO
+logger.scheduledchore.name = org.apache.hadoop.hbase.ScheduledChore
+logger.scheduledchore.level = INFO
diff --git a/hbase-kubernetes-deployment/components/hbase/m-service.yaml b/hbase-kubernetes-deployment/components/hbase/m-service.yaml
new file mode 100644
index 0000000000..5146fcf862
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/m-service.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: master
+ labels:
+ jmxexporter: enabled
+spec:
+ selector:
+ role: master
+ clusterIP: None
+ ports:
+ - name: jmxexporter
+ port: 7000
diff --git a/hbase-kubernetes-deployment/components/hbase/m-statefulset.yaml b/hbase-kubernetes-deployment/components/hbase/m-statefulset.yaml
new file mode 100644
index 0000000000..97e282f621
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/m-statefulset.yaml
@@ -0,0 +1,259 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: master
+ labels:
+ role: master
+spec:
+ podManagementPolicy: Parallel
+ replicas: 1
+ selector:
+ matchLabels:
+ role: master
+ serviceName: hadoop
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchExpressions:
+ - key: role
+ operator: In
+ values:
+ - master
+ topologyKey: kubernetes.io/hostname
+ template:
+ metadata:
+ labels:
+ role: master
+ spec:
+ serviceAccountName: hadoop
+ containers:
+ - image: hbase
+ name: hbase
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ # Shell context so we can pull in the environment variables set in the container and
+ # via the env and envFrom.
+ # See https://stackoverflow.com/questions/57885828/netty-cannot-access-class-jdk-internal-misc-unsafe
+ HBASE_CLASSPATH=/etc/hadoop \
+ HBASE_OPTS=" \
+ -XX:MaxRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -XX:InitialRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -javaagent:${JMX_PROMETHEUS_JAR}=7000:/tmp/scratch/jmxexporter.yaml \
+ -Djava.security.properties=/tmp/scratch/java.security \
+ -Djava.library.path=${HADOOP_HOME}/lib/native --add-opens java.base/jdk.internal.misc=ALL-UNNAMED \
+ -Djava.util.logging.config.file=${HBASE_CONF_DIR}/logging.properties \
+ -Dio.netty.tryReflectionSetAccessible=true \
+ -Xlog:gc:${HBASE_LOG_DIR}/gc.log:time,uptime:filecount=10,filesize=100M" \
+ hbase-daemon.sh --config /etc/hbase foreground_start master \
+ ${FAILOVER_PROXY_PROVIDER}
+ # For now, just fetch local /jmx
+ # Says kubelet only exposes failures, not success: https://stackoverflow.com/questions/34455040/kubernetes-liveness-probe-logging
+ # TODO: Use https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/HealthChecker.html
+ livenessProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ port: 16010
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ port: 16010
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ port: 16010
+ initialDelaySeconds: 10
+ failureThreshold: 30
+ periodSeconds: 10
+ resources:
+ requests:
+ memory: "1.5Gi"
+ cpu: "0.1"
+ limits:
+ memory: "2Gi"
+ cpu: "1.0"
+ envFrom:
+ - configMapRef:
+ name: environment
+ - configMapRef:
+ name: zookeeper-quorum
+ - configMapRef:
+ name: jaegertracing
+ optional: true
+ - configMapRef:
+ name: xraytracing
+ optional: true
+ env:
+# not honored by opentelemetry-java/v1.0.1
+# - name: OTEL_EXPERIMENTAL_SDK_ENABLED
+# # flip this switch to disable tracing
+# value: "true"
+# not honored by opentelemetry-java/v1.0.1
+# - name: OTEL_SERVICE_NAME
+# value: "hbase-master"
+# - name: OTEL_RESOURCE_ATTRIBUTES
+# value: "service.name=hbase-master"
+# - name: HBASE_TRACE_OPTS
+# value: "true"
+# - name: HBASE_SHELL_OPTS
+# # it appears that system properties override environment variables, so naming the shell
+# # can via system property will override the master name provided by environment variable
+# value: "-Dotel.resource.attributes=service.name=hbase-shell"
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ ports:
+ - containerPort: 16010
+ name: http
+ - containerPort: 16000
+ name: rpc
+ volumeMounts:
+ - name: hbase-configuration
+ mountPath: /etc/hbase
+ readOnly: true
+ - name: hbck2-configuration
+ mountPath: /etc/hbck2
+ readOnly: true
+ - mountPath: /etc/hadoop
+ name: hadoop-configuration
+ - mountPath: /var/log/hbase
+ name: hbase-logs
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /tmp/scripts
+ name: scripts
+ initContainers:
+ - image: hbase
+ name: bootstrapper
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ set -ex
+ env | sort
+ # Currently, waits till NNs and all DNs are up. Waiting on all DNs is extreme
+ # but will do for now; could just wait on 5 or 10 or so.
+ /tmp/scripts/jmxping.sh namenode ${HADOOP_SERVICE}
+ # TODO: Should we check if ha and if so, if a NN active... get a report on health?
+ /tmp/scripts/jmxping.sh datanode ${HADOOP_SERVICE} 3
+ cp /tmp/global-files/* /tmp/scratch/
+ # Write the USER hbase is running as into temporary file for use by next init container
+ echo ${USER} > /tmp/scratch/hbaseuser.txt
+ # Ditto for the location of the hbase dir.
+ hbase org.apache.hadoop.hbase.util.HBaseConfTool hbase.rootdir > /tmp/scratch/hbaserootdir.txt
+ cat /tmp/scratch/hbaserootdir.txt
+ securityContext:
+ # Run bootstrapper as root so can set ${USER} owner on data volume
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 256Mi
+ limits:
+ cpu: '0.5'
+ memory: 512Mi
+ envFrom:
+ - configMapRef:
+ name: environment
+ volumeMounts:
+ - mountPath: /tmp/scripts
+ name: scripts
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /tmp/global-files
+ name: global-files
+ - name: hbase-configuration
+ mountPath: /etc/hbase
+ readOnly: true
+ - image: hadoop
+ name: prepare-hdfs-for-hbase
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ set -ex
+ # This container runs as the HDFS ${USER}/super-user.
+ # Make sure that this image is the same as that of the cluster hbase is to
+ # run on (else below will fail w/ permissions issues).
+ HBASE_USER=$(cat /tmp/scratch/hbaseuser.txt)
+ HBASE_ROOTDIR=$(cat /tmp/scratch/hbaserootdir.txt)
+ hdfs --config /etc/hadoop dfs -mkdir ${HBASE_ROOTDIR} || echo $?
+ hdfs --config /etc/hadoop dfs -chown -R ${HBASE_USER} ${HBASE_ROOTDIR}
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 256Mi
+ limits:
+ cpu: '0.5'
+ memory: 512Mi
+ envFrom:
+ - configMapRef:
+ name: environment
+ env:
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /etc/hadoop
+ name: hadoop-configuration
+ - mountPath: /var/log/hadoop
+ name: hadoop-logs
+ restartPolicy: Always
+ volumes:
+ - name: hbase-configuration
+ configMap:
+ name: hbase-configuration
+ - name: hbck2-configuration
+ configMap:
+ name: hbck2-configuration
+ - configMap:
+ name: hadoop-configuration
+ name: hadoop-configuration
+ - configMap:
+ name: global-files
+ name: global-files
+ - emptyDir: {}
+ name: hbase-logs
+ - emptyDir: {}
+ name: hadoop-logs
+ - configMap:
+ name: scripts
+ defaultMode: 0555
+ name: scripts
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - emptyDir: {}
+ name: scratch
diff --git a/hbase-kubernetes-deployment/components/hbase/rs-service.yaml b/hbase-kubernetes-deployment/components/hbase/rs-service.yaml
new file mode 100644
index 0000000000..762a677e7d
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/rs-service.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: regionserver
+ labels:
+ jmxexporter: enabled
+spec:
+ selector:
+ role: regionserver
+ clusterIP: None
+ ports:
+ - name: jmxexporter
+ port: 7000
diff --git a/hbase-kubernetes-deployment/components/hbase/rs-statefulset.yaml b/hbase-kubernetes-deployment/components/hbase/rs-statefulset.yaml
new file mode 100644
index 0000000000..1b6ece2abf
--- /dev/null
+++ b/hbase-kubernetes-deployment/components/hbase/rs-statefulset.yaml
@@ -0,0 +1,237 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: regionserver
+ labels:
+ role: regionserver
+spec:
+ podManagementPolicy: Parallel
+ replicas: 1
+ selector:
+ matchLabels:
+ role: regionserver
+ serviceName: hadoop
+ template:
+ metadata:
+ labels:
+ role: regionserver
+ spec:
+ serviceAccountName: hadoop
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ role: regionserver
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - image: hbase
+ name: hbase
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ # Shell context so we can pull in the environment variables set in the container and
+ # via the env and envFrom.
+ #
+ # See https://stackoverflow.com/questions/57885828/netty-cannot-access-class-jdk-internal-misc-unsafe
+ HBASE_CLASSPATH=/etc/hadoop \
+ HBASE_OPTS=" \
+ -XX:MaxRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -XX:InitialRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -javaagent:${JMX_PROMETHEUS_JAR}=7000:/tmp/scratch/jmxexporter.yaml \
+ -Djava.security.properties=/tmp/scratch/java.security \
+ -Djava.library.path=/var/lib/hadoop-native-lib/native \
+ --add-opens java.base/jdk.internal.misc=ALL-UNNAMED \
+ -Djava.util.logging.config.file=${HBASE_CONF_DIR}/logging.properties \
+ -Dio.netty.tryReflectionSetAccessible=true \
+ -Xlog:gc:${HBASE_LOG_DIR}/gc.log:time,uptime:filecount=10,filesize=100M" \
+ hbase-daemon.sh --config /etc/hbase foreground_start regionserver \
+ ${FAILOVER_PROXY_PROVIDER} # &> ${HBASE_LOG_DIR}/regionserver.$(date -u +"%Y-%m-%dT%H%M%SZ").out
+ # For now, just fetch local /jmx
+ # Says kubelet only exposes failures, not success: https://stackoverflow.com/questions/34455040/kubernetes-liveness-probe-logging
+ # TODO: Use https://hbase.apache.org/devapidocs/org/apache/hadoop/hbase/HealthChecker.html
+ livenessProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ port: 16030
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ port: 16030
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ port: 16030
+ initialDelaySeconds: 10
+ failureThreshold: 30
+ periodSeconds: 10
+ resources:
+ requests:
+ memory: "2Gi"
+ cpu: "0.2"
+ limits:
+ memory: "3Gi"
+ cpu: "1.0"
+ envFrom:
+ - configMapRef:
+ name: environment
+ - configMapRef:
+ name: zookeeper-quorum
+ - configMapRef:
+ name: jaegertracing
+ optional: true
+ - configMapRef:
+ name: xraytracing
+ optional: true
+ env:
+# not honored by opentelemetry-java/v1.0.1
+# - name: OTEL_EXPERIMENTAL_SDK_ENABLED
+# # flip this switch to disable tracing
+# value: "true"
+# not honored by opentelemetry-java/v1.0.1
+# - name: OTEL_SERVICE_NAME
+# value: "hbase-regionserver"
+# - name: HBASE_TRACE_OPTS
+# value: "true"
+# - name: OTEL_RESOURCE_ATTRIBUTES
+# value: "service.name=hbase-regionserver"
+# - name: HBASE_SHELL_OPTS
+# # it appears that system properties override environment variables, so naming the shell
+# # can via system property will override the master name provided by environment variable
+# value: "-Dotel.service.name=hbase-shell"
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ ports:
+ - containerPort: 16030
+ name: http
+ - containerPort: 16020
+ name: rpc
+ volumeMounts:
+ - name: hbase-configuration
+ mountPath: /etc/hbase
+ readOnly: true
+ - name: hbck2-configuration
+ mountPath: /etc/hbck2
+ readOnly: true
+ - mountPath: /etc/hadoop
+ name: hadoop-configuration
+ - mountPath: /var/log/hbase
+ name: hbase-logs
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /tmp/scripts
+ name: scripts
+ - mountPath: /var/lib/hadoop-native-lib
+ name: hadoop-native-lib
+ initContainers:
+ - image: hbase
+ name: bootstrapper
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ set -ex
+ # Wait on a single master to be up...
+ /tmp/scripts/jmxping.sh master ${HADOOP_SERVICE} 1
+ cp /tmp/global-files/* /tmp/scratch/
+ securityContext:
+ # Run bootstrapper as root so can set ${USER} owner on data volume
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 256Mi
+ limits:
+ cpu: '0.5'
+ memory: 512Mi
+ envFrom:
+ - configMapRef:
+ name: environment
+ volumeMounts:
+ - mountPath: /tmp/scripts
+ name: scripts
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /tmp/global-files
+ name: global-files
+ - image: hadoop
+ name: copy-hadoop-native-lib
+ command:
+ # Runs as the image/hdfs user.
+ - /bin/bash
+ - -c
+ - |-
+ set -xe
+ if [ -d ~/hadoop/lib/native ]; then
+ cp -r ~/hadoop/lib/native /var/lib/hadoop-native-lib
+ else
+ echo "Native dir not found at ~/hadoop/lib/native"
+ fi
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 256Mi
+ limits:
+ cpu: '0.5'
+ memory: 512Mi
+ envFrom:
+ - configMapRef:
+ name: environment
+ volumeMounts:
+ - mountPath: /var/lib/hadoop-native-lib
+ name: hadoop-native-lib
+ restartPolicy: Always
+ volumes:
+ - name: hbase-configuration
+ configMap:
+ name: hbase-configuration
+ - name: hbck2-configuration
+ configMap:
+ name: hbck2-configuration
+ - configMap:
+ name: hadoop-configuration
+ name: hadoop-configuration
+ - configMap:
+ name: global-files
+ name: global-files
+ - emptyDir: {}
+ name: hbase-logs
+ - configMap:
+ name: scripts
+ defaultMode: 0555
+ name: scripts
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - emptyDir: {}
+ name: scratch
+ - emptyDir: {}
+ name: hadoop-native-lib
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/README.md b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/README.md
new file mode 100644
index 0000000000..3a1b093832
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/README.md
@@ -0,0 +1,56 @@
+
+
+# hbase-kubernetes-hadoop-image
+
+There is a contract that must be maintained between the container image run in the pod and the
+infrastructure that launched the pod. Details like paths, users/groups, permissions, and
+environment variables must align so that the deployment layer can pass runtime concerns down to
+the container.
+
+Start with the official hadoop image and extend it from there. Note that `apache/hadoop:3` is only
+published for `linux/amd64` at this time.
+
+## Build
+
+Input arguments are managed by [docker-bake.override.hcl](./docker-bake.override.hcl).
+
+Start by creating a buildx context that supports at least `linux/amd64` images. If you've created
+this context previously, it's enough to ensure that it's active via `docker buildx ls`.
+
+```shell
+$ docker buildx create \
+ --driver docker-container \
+ --platform linux/amd64,linux/arm64 \
+ --use \
+ --bootstrap
+```
+
+Finally, build the image using `mvn package`, or manually, using,
+
+```shell
+$ docker buildx bake \
+ --file src/main/docker/docker-bake.hcl \
+ --file src/main/docker/docker-bake.override.hcl \
+ --set '*.platform=linux/amd64' \
+ --pull \
+ --load
+```
+
+This exports an image to your local repository that is tagged as
+`${USER}/hbase/operator-tools/hadoop:latest`.
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/pom.xml b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/pom.xml
new file mode 100644
index 0000000000..b4890a519e
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/pom.xml
@@ -0,0 +1,104 @@
+
+
+
+ 4.0.0
+
+ hbase-kubernetes-deployment
+ org.apache.hbase.operator.tools
+ ${revision}
+ ..
+
+
+ hbase-kubernetes-hadoop-image
+ Apache HBase - Kubernetes Hadoop Image
+ A container image to for running Hadoop in Kubernetes.
+ pom
+
+
+
+ linux/amd64
+
+
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+
+ true
+
+
+
+ docker-buildx-bake-print
+
+ exec
+
+ package
+
+ /usr/bin/env
+
+ sh
+ -c
+
+ 2>&1 \
+ docker buildx bake \
+ --print \
+ --file src/main/docker/docker-bake.hcl \
+ --file src/main/docker/docker-bake.override.hcl
+
+
+
+
+
+ docker-buildx-bake
+
+ exec
+
+ package
+
+ /usr/bin/env
+
+ sh
+ -c
+
+ 2>&1 \
+ docker buildx bake \
+ --progress plain \
+ --pull \
+ --load \
+ --set *.platform=${container_image.platforms} \
+ --file src/main/docker/docker-bake.hcl \
+ --file src/main/docker/docker-bake.override.hcl
+
+
+
+
+
+
+
+
+
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/Dockerfile b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/Dockerfile
new file mode 100644
index 0000000000..8c514fa96d
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/Dockerfile
@@ -0,0 +1,57 @@
+# syntax=docker/dockerfile:1.4
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# hadolint global ignore=DL3020
+
+ARG BASE_IMG='apache/hadoop'
+ARG BASE_TAG='3'
+ARG BASE_IMG_ALPINE='alpine'
+ARG BASE_IMG_TAG_ALPINE='latest'
+
+FROM ${BASE_IMG_ALPINE}:${BASE_IMG_TAG_ALPINE} as alpine
+
+## -- Stage installed_jmx_exporter --
+# A stage that prepares the JMX Exporter installation directory. The jar must be in well-known
+# location so can refer to it in command yaml value.
+FROM alpine AS installed_jmx_exporter
+ARG JMX_PROMETHEUS_JAR_URL
+ADD --link ${JMX_PROMETHEUS_JAR_URL} /tmp/jmx_prometheus_javaagent.jar
+
+FROM ${BASE_IMG}:${BASE_TAG} as final
+ARG CORRETTO_KEY_URL
+ARG CORRETTO_KEY
+ARG CORRETTO_REPO_URL
+ARG CORRETTO_REPO
+USER root
+ADD --link ${CORRETTO_KEY_URL} /tmp/
+ADD --link ${CORRETTO_REPO_URL} /etc/yum.repos.d/
+
+RUN rpm --import "/tmp/${CORRETTO_KEY}" \
+ && yum -y remove java-1.8.0-* \
+ && yum -y update \
+ && yum -y install java-17-amazon-corretto-devel \
+ && yum -y clean all \
+ && rm -rf /var/cache
+
+COPY --from=installed_jmx_exporter \
+ --chown=hadoop:users \
+ /tmp/jmx_prometheus_javaagent.jar /opt/hadoop/jmx_prometheus_javaagent.jar
+
+ENV JMX_PROMETHEUS_JAR /opt/hadoop/jmx_prometheus_javaagent.jar
+ENV USER='hadoop'
+USER ${USER}
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/docker-bake.hcl b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/docker-bake.hcl
new file mode 100644
index 0000000000..0040cfad29
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/docker-bake.hcl
@@ -0,0 +1,65 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A convenience script for build the kuttl image.
+# See hbase-kubernetes-deployment/dockerfiles/kuttl/README.md
+#
+
+variable BASE_IMG {
+ default = "apache/hadoop"
+}
+variable BASE_TAG {
+ default = "3"
+}
+variable USER {
+ default = "apache"
+}
+variable IMAGE_TAG {
+ default = "latest"
+}
+variable IMAGE_NAME {
+ default = "${USER}/hbase/operator-tools/hadoop"
+}
+variable CORRETTO_KEY_URL {}
+variable CORRETTO_KEY {}
+variable CORRETTO_REPO_URL {}
+variable CORRETTO_REPO {}
+variable JMX_PROMETHEUS_JAR_URL {}
+variable JMX_PROMETHEUS_JAR {}
+
+group default {
+ targets = [ "hadoop" ]
+}
+
+target hadoop {
+ dockerfile = "src/main/docker/Dockerfile"
+ args = {
+ BASE_IMG = BASE_IMG
+ BASE_TAG = BASE_TAG
+ CORRETTO_KEY_URL = CORRETTO_KEY_URL
+ CORRETTO_KEY = CORRETTO_KEY
+ CORRETTO_REPO_URL = CORRETTO_REPO_URL
+ CORRETTO_REPO = CORRETTO_REPO
+ JMX_PROMETHEUS_JAR_URL = JMX_PROMETHEUS_JAR_URL
+ JMX_PROMETHEUS_JAR = JMX_PROMETHEUS_JAR
+ }
+ target = "final"
+ platforms = [
+ # upstream image only provides linux/amd64
+ "linux/amd64"
+ ]
+ tags = [ "${IMAGE_NAME}:${IMAGE_TAG}" ]
+}
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/docker-bake.override.hcl b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/docker-bake.override.hcl
new file mode 100644
index 0000000000..cbe27ddc65
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-hadoop-image/src/main/docker/docker-bake.override.hcl
@@ -0,0 +1,47 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Externalize default values of build parameters and document how to retrieve them.
+#
+
+function "basename" {
+ params = [a]
+ result = split("/", a)[length(split("/", a)) - 1]
+}
+
+variable CORRETTO_KEY_URL {
+ default = "https://yum.corretto.aws/corretto.key"
+}
+
+variable CORRETTO_KEY {
+ default = "${basename(CORRETTO_KEY_URL)}"
+}
+
+variable CORRETTO_REPO_URL {
+ default = "https://yum.corretto.aws/corretto.repo"
+}
+
+variable CORRETTO_REPO {
+ default = "${basename(CORRETTO_REPO_URL)}"
+}
+
+variable JMX_PROMETHEUS_JAR_URL {
+ default = "https://repo1.maven.org/maven2/io/prometheus/jmx/jmx_prometheus_javaagent/0.16.1/jmx_prometheus_javaagent-0.16.1.jar"
+}
+
+variable JMX_PROMETHEUS_JAR {
+ default = "${basename(JMX_PROMETHEUS_JAR_URL)}"
+}
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/pom.xml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/pom.xml
new file mode 100644
index 0000000000..c33c53cf00
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/pom.xml
@@ -0,0 +1,100 @@
+
+
+
+ 4.0.0
+
+ hbase-kubernetes-deployment
+ org.apache.hbase.operator.tools
+ ${revision}
+ ..
+
+
+ hbase-kubernetes-kustomize
+ Apache HBase - Kubernetes Kustomize
+ Kustomize deployment descriptors.
+ pom
+
+
+
+ org.apache.hbase.operator.tools
+ hbase-kubernetes-testing-image
+ pom
+ test
+
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+
+ true
+ ${skipTests}
+
+
+
+ kuttl-unit-tests
+ test
+
+ exec
+
+
+ /usr/bin/env
+
+ sh
+ -c
+
+ 2>&1 \
+ src/test/resources/mvn_exec_run_kuttl.sh \
+ --config src/test/resources/kuttl-test-unit.yaml \
+ --artifacts-dir target/kuttl-reports
+
+
+
+
+
+ kuttl-integration-tests
+ verify
+
+ exec
+
+
+ /usr/bin/env
+
+ sh
+ -c
+
+ 2>&1 \
+ src/test/resources/mvn_exec_run_kuttl.sh \
+ --config src/test/resources/kuttl-test-integration.yaml \
+ --artifacts-dir target/kuttl-reports
+
+
+
+
+
+
+
+
+
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/README.md b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/README.md
new file mode 100644
index 0000000000..da3c6cd211
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/README.md
@@ -0,0 +1,39 @@
+
+
+# Base
+
+Some values such as SERVICE name, SERVICEACCOUNT name,
+and RBAC role are hard-coded in the environment-configmap.yaml
+and supplied into the pods as environment variables. Other
+hardcodings include the service name ('hadoop') and the
+namespace we run in (also 'hadoop').
+
+The hadoop Configuration system can interpolate environment variables
+into '\*.xml' file values ONLY. See
+[Configuration Javadoc](http://hadoop.apache.org/docs/current/api/org/apache/hadoop/conf/Configuration.html)
+
+...but we can not do interpolation of SERVICE name into '\*.xml' file key names
+as is needed when doing HA in hdfs-site.xml... so for now, we have
+hard-codings in 'hdfs-site.xml' key names. For example, the property key name
+`dfs.ha.namenodes.hadoop` has the SERVICE name ('hadoop') in it or the key
+`dfs.namenode.http-address.hadoop` (TODO: Fix/Workaround).
+
+Edit of pod resources or jvm args for a process are
+done in place in the yaml files or in kustomization
+replacements in overlays.
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/delete-format-hdfs-configmap-job.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/delete-format-hdfs-configmap-job.yaml
new file mode 100644
index 0000000000..cc52f4d36e
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/delete-format-hdfs-configmap-job.yaml
@@ -0,0 +1,89 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Job to delete the 'format-hdfs' configmap after hdfs has come up
+# successfully. The 'format-hdfs' configmap is added by running
+# 'kubectl -n hadoop apply -k tools/format-hdfs' (You need the
+# '-n hadoop' to apply the configmap to the 'hadoop' namespace).
+# Add the configmap if you want hdfs to format the filesystem.
+# Do this on initial install only or if you want to clean out
+# the current HDFS data.
+#
+# If the 'format-hdfs' configmap is NOT present, this Job exits/completes.
+# Otherwise, it keeps probing until HDFS is up and healthy, and then
+# this job removes the 'format-hdfs' configmap. The presence of the
+# 'format-hdfs' configmap is checked by all hdfs pods on startup. If
+# the configmap is present, they clean out their data directories and run
+# format/recreate of their data directories. To install the 'format-hdfs'
+# configmap, do it before launch of hdfs. See tools/format-hdfs.
+---
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: delete-format-hdfs-configmap
+spec:
+ ttlSecondsAfterFinished: 300
+ template:
+ spec:
+ containers:
+ - image: hadoop
+ name: delete-format-hdfs-configmap
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ set -xe
+ # See if 'format-hdfs' configmap is present.
+ # If not, then there is nothing for this job to do, complete, exit 0.
+ /tmp/scripts/exists_configmap.sh format-hdfs || {
+ echo "No 'format-hdfs' configmap found so no work to do; exiting"
+ exit 0
+ }
+ # The `format-hdfs`` configmap is present. Remove it after HDFS is fully up.
+ /tmp/scripts/jmxping.sh namenode ${HADOOP_SERVICE}
+ /tmp/scripts/jmxping.sh datanode ${HADOOP_SERVICE}
+ # TODO: Should we check if ha and if so, if a NN active... get a report on health?
+ # HDFS is up. Delete the format-hdfs flag.
+ /tmp/scripts/delete_configmap.sh format-hdfs
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 256Mi
+ limits:
+ cpu: '0.5'
+ memory: 512Mi
+ envFrom:
+ - configMapRef:
+ name: environment
+ volumeMounts:
+ - mountPath: /tmp/scripts
+ name: scripts
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - mountPath: /tmp/scratch
+ name: scratch
+ serviceAccountName: hadoop
+ restartPolicy: Never
+ volumes:
+ - configMap:
+ name: scripts
+ defaultMode: 0555
+ name: scripts
+ # Scratch dir is location where init containers place items for later use
+ # by the main containers when they run.
+ - emptyDir: {}
+ name: scratch
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/environment-configmap.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/environment-configmap.yaml
new file mode 100644
index 0000000000..d018c22c82
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/environment-configmap.yaml
@@ -0,0 +1,70 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Common environment variables shared across pods.
+# Include w/ the 'envFrom:' directive.
+# We have to be pendantic in here. We cannot have a value
+# refer to a define made earlier; the interpolation
+# doesn't work.
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: environment
+data:
+ DOMAIN: svc.cluster.local
+ # HADOOP_HOME, HADOOP_HDFS_HOME, etc., and HBASE_HOME are provided by the images.
+ #
+ # The headless-service pods in our statefulsets come up in.
+ # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id
+ # The headless-service is defined in the adjacent rbac.yaml.
+ # Matches the serviceName we have on our statefulsets.
+ # Required that we create it according to https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations
+ HADOOP_SERVICE: hadoop
+ # dfs.http.policy
+ # If HTTPS_ONLY or HTTPS_OR_HTTP then we'll depend on https in UI and jmx'ing
+ # and will adjust schema and ports accordingly. If https, we need to get certificates
+ # so cert-manager, etc., needs to be instaled.
+ HTTP_POLICY: HTTP_ONLY
+ DFS_HTTPS_ENABLE: "false"
+ HBASE_SSL_ENABLED: "false"
+ HTTP_AUTH: kerberos
+ # The insecure port for now.
+ DATANODE_DATA_DIR: /data00/dn
+ JOURNALNODE_DATA_DIR: /data00/jn
+ NAMENODE_DATA_DIR: /data00/nn
+ HDFS_AUDIT_LOGGER: INFO,RFAAUDIT
+ HADOOP_DAEMON_ROOT_LOGGER: INFO,RFA,CONSOLE
+ HADOOP_ROOT_LOGGER: INFO,RFA,CONSOLE
+ HADOOP_SECURITY_LOGGER: INFO,RFAS
+ HADOOP_CONF_DIR: /etc/hadoop
+ HADOOP_LOG_DIR: /var/log/hadoop
+ HADOOP_SECURE_LOG: /var/log/hadoop
+ HBASE_ROOT_LOGGER: DEBUG,RFA,console
+ HBASE_LOG_DIR: /var/log/hbase
+ HBASE_CONF_DIR: /etc/hbase
+ # if [ "$HBASE_NO_REDIRECT_LOG" != "" ]; then ... so we are asking for NO redirect of logs.
+ HBASE_NO_REDIRECT_LOG: "true"
+ HBASE_MANAGES_ZK: "false"
+ DFS_REPLICATION: "1"
+ # What percentage of the container memory to give over to the JVM.
+ # Be aware that we look at the container resource limit, NOT request: e.g. if
+ # the resource request memory is set to 8G and the limit is 16G and the
+ # JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT is 50 as in 50%,
+ # the heap will be set to 8G: i.e. 1/2 of the 16G limit.
+ # ip-172-18-132-227.us-west-2.compute.internal
+ # See https://dzone.com/articles/best-practices-java-memory-arguments-for-container
+ JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT: "45"
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/java.security b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/java.security
new file mode 100644
index 0000000000..c5c4f0403d
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/java.security
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+networkaddress.cache.ttl=1
+networkaddress.cache.negative.ttl=0
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/jmxexporter.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/jmxexporter.yaml
new file mode 100644
index 0000000000..4dd20fa0e4
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/jmxexporter.yaml
@@ -0,0 +1,32 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# We run the jmxexporter on most all processes to convert jmx metrics to prometheus.
+# This is the config file it uses.
+#
+# Don't lowercase. Leave the metrics in camelcase. Do this because while
+# jmxexport can lowercase metrics names, telegraf can't.
+#
+#lowercaseOutputName: false
+#lowercaseOutputLabelNames: false
+# From https://godatadriven.com/blog/monitoring-hbase-with-prometheus/
+#rules:
+# - pattern: HadoopNamespace_([^\W_]+)_table_([^\W_]+)_region_([^\W_]+)_metric_(\w+)
+# name: HBase_metric_$4
+# labels:
+# namespace: "$1"
+# table: "$2"
+# region: "$3"
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/kustomization.yaml
new file mode 100644
index 0000000000..43dd57c930
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/kustomization.yaml
@@ -0,0 +1,71 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+configMapGenerator:
+- name: hadoop-configuration
+ # Base set of hadoop configurations. Overlays will add to the set here.
+ files:
+ - log4j.properties=log4j.properties.hadoop
+- name: scripts
+ # Useful scripts
+ files:
+ - scripts/jmxping.sh
+ - scripts/apiserver_access.sh
+ - scripts/get_statefulset_replica_count.sh
+ - scripts/get_statefulset.sh
+ - scripts/exists_configmap.sh
+ - scripts/delete_configmap.sh
+ - scripts/topology.sh
+ - scripts/describe_node.sh
+ - scripts/get_node_name_from_pod_IP.sh
+ - scripts/get_node_labels.sh
+ - scripts/get_node_labels_from_pod_IP.sh
+ - scripts/log.sh
+ options:
+ disableNameSuffixHash: true
+- name: global-files
+ # Add files used by most/all processes into a global configuration configmap
+ # accessible to all processes. The environment-configmap defines env varibles used by
+ # all processes and pods. This configmap loads files used by each process.
+ files:
+ - jmxexporter.yaml
+ - java.security
+ - ssl-client.xml
+ - ssl-server.xml
+ options:
+ disableNameSuffixHash: true
+
+secretGenerator:
+- name: keystore-password
+ type: Opaque
+ options:
+ disableNameSuffixHash: true
+ literals:
+ - password=changeit
+
+resources:
+- namespace.yaml
+# Global environment variables read in by pods
+- environment-configmap.yaml
+- rbac.yaml
+- delete-format-hdfs-configmap-job.yaml
+# These depend on cert-manager being installed.
+# See https://cert-manager.io/docs/installation/
+#- clusterissuer.yaml
+#- certificate.yaml
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/log4j.properties.hadoop b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/log4j.properties.hadoop
new file mode 100644
index 0000000000..df7cf7b6b8
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/log4j.properties.hadoop
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+hadoop.console.threshold=LOG
+hadoop.log.maxbackupindex=20
+hadoop.log.maxfilesize=256MB
+hadoop.root.logger=TRACE,CONSOLE
+hadoop.security.log.file=SecurityAuth-${user.name}.audit
+hadoop.security.log.maxbackupindex=20
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.logger=INFO,RFAS
+hdfs.audit.log.maxbackupindex=20
+hdfs.audit.log.maxfilesize=256MB
+hdfs.audit.logger=INFO,RFAAUDIT
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.CONSOLE.Threshold=${hadoop.console.threshold}
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.RFA.MaxBackupIndex=${hadoop.log.maxbackupindex}
+log4j.appender.RFA.MaxFileSize=${hadoop.log.maxfilesize}
+log4j.appender.RFAAUDIT=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.RFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAAUDIT.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
+log4j.appender.RFAAUDIT.MaxFileSize=${hdfs.audit.log.maxfilesize}
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.category.SecurityLogger=${hadoop.security.logger}
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.logger.org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy=DEBUG
+log4j.logger.org.apache.hadoop.net.NetworkTopology=DEBUG
+log4j.rootLogger=${hadoop.root.logger}
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/namespace.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/namespace.yaml
new file mode 100644
index 0000000000..f3e73a6eea
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/namespace.yaml
@@ -0,0 +1,23 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Allow the namespace of the user's Kustomization to be the destination of the deployment.
+# How to manage the namespace with Kustomize -- https://stackoverflow.com/a/71150557
+---
+apiVersion: v1
+kind: Namespace
+metadata:
+ name: ~~illegal_value_to_be_overridden_in_Kustomization~~
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/rbac.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/rbac.yaml
new file mode 100644
index 0000000000..29e9c89e5a
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/rbac.yaml
@@ -0,0 +1,103 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Service and ServiceAccount names are hard-coded as 'hadoop'.
+# RBAC Role name is also hard-coded as 'hadoop-role'. Service selects on
+# an app named 'hadoop', another hard-coding.
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: Role
+metadata:
+ name: hadoop-role
+rules:
+- resources:
+ - configmaps
+ verbs:
+ - get
+ - delete
+ - list
+ apiGroups:
+ - ''
+- resources:
+ - namespaces
+ verbs:
+ - get
+ - list
+ apiGroups:
+ - ''
+- resources:
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ apiGroups:
+ - 'apps'
+ - 'api'
+- resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - delete
+ - watch
+ apiGroups:
+ - ''
+- resources:
+ - leases
+ verbs:
+ - get
+ - list
+ - watch
+ - create
+ - update
+ - patch
+ - delete
+ apiGroups:
+ - coordination.k8s.io
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: RoleBinding
+metadata:
+ name: hadoop-role-binding
+subjects:
+- kind: ServiceAccount
+ name: hadoop
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: hadoop-role
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: hadoop
+---
+# Headless-service to cluster all our pods under
+# Matches the ServiceAccount above referenced by statefulsets
+# in their serviceName.
+# See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id
+# This is required for statefulsets. See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations
+apiVersion: v1
+kind: Service
+metadata:
+ name: hadoop
+spec:
+ clusterIP: None
+ publishNotReadyAddresses: true
+ selector:
+ app: hadoop
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/apiserver_access.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/apiserver_access.sh
new file mode 100755
index 0000000000..4a2929f2e8
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/apiserver_access.sh
@@ -0,0 +1,26 @@
+#! /usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Defines used accessing the apiserver.
+NAMESPACE=$(cat /var/run/secrets/kubernetes.io/serviceaccount/namespace)
+export NAMESPACE
+APISERVER=https://$KUBERNETES_SERVICE_HOST:$KUBERNETES_SERVICE_PORT
+export APISERVER
+CACERT=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+export CACERT
+TOKEN=$(cat /var/run/secrets/kubernetes.io/serviceaccount/token)
+export TOKEN
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/delete_configmap.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/delete_configmap.sh
new file mode 100755
index 0000000000..58a3107580
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/delete_configmap.sh
@@ -0,0 +1,34 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the description of the named statefulset
+set -x
+configmap_name="${1}"
+outfile="$(mktemp "/tmp/$(basename "$0").XXXX")"
+trap '{ rm -f -- "$outfile"; }' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+# shellcheck source=/dev/null
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+# From https://docs.okd.io/3.7/rest_api/api/v1.ConfigMap.html#Delete-api-v1-namespaces-namespace-configmaps-name
+http_code=$(curl -w "%{http_code}" -sS -X DELETE --cacert "$CACERT" -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/namespaces/$NAMESPACE/configmaps/$configmap_name" -o "$outfile")
+if [[ $http_code -ne 200 ]]; then
+ echo "{\"Result\": \"Failure\", \"httpReturnCode\":$http_code}" | jq '.'
+ exit 1
+fi
+cat "$outfile"
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/describe_node.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/describe_node.sh
new file mode 100644
index 0000000000..df1ffa1e03
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/describe_node.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the description of the named node
+set -x
+node="${1}"
+outfile="$(mktemp "/tmp/$(basename "$0")".XXXX)"
+trap '{ rm -f -- "$outfile"; }' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+# shellcheck source=/dev/null
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+http_code=$(curl -w "%{http_code}" -sS --cacert "$CACERT" -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/nodes/$node" -o "$outfile")
+if [[ $http_code -ne 200 ]]; then
+ echo "{\"Result\": \"Failure\", \"httpReturnCode\":$http_code}" | jq '.'
+ exit 1
+fi
+cat "$outfile"
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/exists_configmap.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/exists_configmap.sh
new file mode 100755
index 0000000000..7ed9a8fb27
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/exists_configmap.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Check passed in configmap exists.
+# Also checks if configmap with the POD_NAME exists too.
+# Returns zero if found.
+set -x
+configmap_name="${1}"
+outfile="$(mktemp "/tmp/$(basename "$0").XXXX")"
+trap 'rm -f -- "$outfile"' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+# shellcheck source=/dev/null
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+# From https://docs.okd.io/3.7/rest_api/api/v1.ConfigMap.html#Delete-api-v1-namespaces-namespace-configmaps-name
+http_code=$(curl -w "%{http_code}" -sS --cacert "$CACERT" -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/namespaces/$NAMESPACE/configmaps/$configmap_name" -o "$outfile")
+[[ $http_code -eq 200 ]] || (
+ # The configmap does not exist. Look for a configmap with this POD_NAME as a suffix too.
+ http_code=$(curl -w "%{http_code}" -sS --cacert "$CACERT" -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/namespaces/$NAMESPACE/configmaps/$configmap_name.${POD_NAME}" -o "$outfile")
+ [[ $http_code -eq 200 ]]
+)
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_labels.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_labels.sh
new file mode 100644
index 0000000000..bb50a6560e
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_labels.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Fetch the labels json object for named node
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+"${script_dir}/describe_node.sh" "${1}" | jq -r '.metadata.labels'
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_labels_from_pod_IP.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_labels_from_pod_IP.sh
new file mode 100644
index 0000000000..7458de36b3
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_labels_from_pod_IP.sh
@@ -0,0 +1,32 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the labels json object of the node upon which the pod with the provided pod IP is running
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+# shellcheck source=/dev/null
+source "${script_dir}/log.sh" "$TOPOLOGY_LOG" # source log function; the $TOPOLOGY_LOG variable is set in topology.sh
+nodeName=$("${script_dir}/get_node_name_from_pod_IP.sh" "${1}") # requesting node name based on pod IP
+if [[ "$nodeName" == "null" ]] # if no node is found when querying with this pod IP
+then
+ log -w "Unhandled case: Kubernetes instance not found for this pod IP"
+ echo "null" # null will get passed back to the topology caller; then when looking for the pertinent labels topology.sh will label this DN with the default rack
+else
+ log "nodeName found in pod description: $nodeName"
+ nodeLabels="$("${script_dir}/get_node_labels.sh" "$nodeName")" # getting the labels of the Kube node the pod is running on
+ log "node metadata labels: $nodeLabels"
+ echo "$nodeLabels"
+fi
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_name_from_pod_IP.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_name_from_pod_IP.sh
new file mode 100644
index 0000000000..a0cb279671
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_node_name_from_pod_IP.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the name of the Kubernetes node with the provided hadoop pod IP
+set -x
+podIP="${1}" # this will be the IP of a datanode
+outfile="$(mktemp "/tmp/$(basename "$0").XXXX")"
+trap '{ rm -f -- "$outfile"; }' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+# shellcheck source=/dev/null
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+http_code=$(curl -w "%{http_code}" -sS --cacert "$CACERT" -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/api/v1/namespaces/hadoop/pods?fieldSelector=status.podIP%3D$podIP" -o "$outfile")
+if [[ $http_code -ne 200 ]]; then
+ echo "{\"Result\": \"Failure\", \"httpReturnCode\":$http_code}" | jq '.'
+ exit 1
+fi
+
+# using jq, only return the name of the node containing this pod; jq will return null if no node is found
+jq -r .items[0].spec.nodeName "$outfile"
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_statefulset.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_statefulset.sh
new file mode 100755
index 0000000000..b6a9162015
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_statefulset.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Get the description of the named statefulset
+set -x
+statefulset="${1}"
+outfile="$(mktemp "/tmp/$(basename "$0").XXXX")"
+trap '{ rm -f -- "$outfile"; }' EXIT
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+# shellcheck source=/dev/null
+source "${script_dir}/apiserver_access.sh"
+# Following model described here: https://chengdol.github.io/2019/11/06/k8s-api/
+# http_code is the return status code
+http_code=$(curl -w "%{http_code}" -sS --cacert "$CACERT" -H "Content-Type: application/json" -H "Accept: application/json, */*" -H "Authorization: Bearer $TOKEN" "$APISERVER/apis/apps/v1/namespaces/$NAMESPACE/statefulsets/$statefulset" -o "$outfile")
+if [[ $http_code -ne 200 ]]; then
+ echo "{\"Result\": \"Failure\", \"httpReturnCode\":$http_code}" | jq '.'
+ exit 1
+fi
+cat "$outfile"
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_statefulset_replica_count.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_statefulset_replica_count.sh
new file mode 100755
index 0000000000..ad1149a375
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/get_statefulset_replica_count.sh
@@ -0,0 +1,20 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Fetch the replica count for named statefulset
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+"${script_dir}/get_statefulset.sh" "${1}" | jq '.spec.replicas'
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/jmxping.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/jmxping.sh
new file mode 100755
index 0000000000..2cf7aabda1
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/jmxping.sh
@@ -0,0 +1,96 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Usage: jmxping.sh []
+# JMX ping that there are at least '' instances of ''
+# running in the sub-domain specified by
+# (See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#stable-network-id).
+# If no '' supplied, we read the replica count from passed
+# in '' statefulset from apiserver.
+set -x
+role="${1}"
+service="${2}"
+count_param="${3}"
+# Schema
+schema=http
+if [[ ${HTTP_POLICY} == HTTPS_* ]]; then
+ schema=https
+fi
+# Jmxport to use
+case "${role}" in
+ datanode)
+ jmxport=9864
+ if [[ ${HTTP_POLICY} == HTTPS_* ]]; then
+ # If HTTP policy is https, use https jmx port.
+ jmxport=9865
+ fi
+ ;;
+ namenode)
+ jmxport=9870
+ if [[ ${HTTP_POLICY} == HTTPS_* ]]; then
+ # If HTTP policy is https, use https jmx port.
+ jmxport=9871
+ fi
+ ;;
+ journalnode)
+ jmxport=8480
+ if [[ ${HTTP_POLICY} == HTTPS_* ]]; then
+ # If HTTP policy is https, use https jmx port.
+ jmxport=8481
+ fi
+ ;;
+ master)
+ jmxport=16010
+ ;;
+ regionserver)
+ jmxport=16030
+ ;;
+ *)
+ exit 1
+ ;;
+esac
+
+interval=5
+timeout=$((60 * 60))
+while ((timeout > 0))
+do
+ # The statefulset we depend on may not have deployed yet... so the first
+ # attempts at getting replicas may fail.
+ # https://stackoverflow.com/questions/3601515/how-to-check-if-a-variable-is-set-in-bash
+ replicas="$(/tmp/scripts/get_statefulset_replica_count.sh "$role")"
+ count=${count_param}
+ if [ "x" = "${count_param}x" ]; then
+ count=${replicas}
+ else
+ count=$((replicas < count_param? replicas : count_param ))
+ fi
+ seq_end=$(( count - 1 ))
+ total=0
+ for i in $( seq 0 $seq_end ); do
+ # Url is http://journalnode-1:8480/jmx?qry=java.lang:type=OperatingSystem
+ url="${schema}://${role}-${i}.${service}:${jmxport}/jmx?qry=java.lang:type=OperatingSystem"
+ # Returns 1 if success, zero otherwise.
+ result=$(curl --cacert /tmp/scratch/ca.crt -v "$url" | grep -c SystemLoadAverage)
+ ((total+=result))
+ ((total != count)) || exit 0
+ done
+ timeout=$((timeout - interval))
+ echo "Failed; sleeping $interval, then retrying for $timeout more seconds"
+ sleep $interval
+done
+echo "Timedout!"
+exit 1
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/log.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/log.sh
new file mode 100644
index 0000000000..f1b9b6a522
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/log.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# when sourcing log, first argument should be the file within $HADOOP_LOG_DIR that will be written to
+
+filename=${1}
+LOG_FILEPATH="$HADOOP_LOG_DIR/$filename"
+
+# logs provided message to whichever filepath is provided when sourcing log.sh
+# Use -e for error logging, -w for warning logs
+# log [-ew] MESSAGE
+log(){
+ prefix="" # No prefix with default INFO-level logging
+ while getopts ":ew" arg; do
+ case $arg in
+ e) # change prefix to ERROR: in logs
+ prefix="ERROR:"
+ shift
+ ;;
+ w) # change prefix to WARNING: in logs
+ prefix="WARNING:"
+ shift
+ ;;
+ *) # what is this?
+ ;;
+ esac
+ done
+ message=${1}
+ echo "$(date +"%F %T") $prefix $message" >> "$LOG_FILEPATH"
+}
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/topology.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/topology.sh
new file mode 100755
index 0000000000..d405136aff
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/scripts/topology.sh
@@ -0,0 +1,66 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Using topology script notion for HDFS rack awareness: https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-common/RackAwareness.html
+
+# This script takes in one or more datanode IPs as args and passes out rack name(s) for the pod(s) based on the EKS instance(s) they're running in.
+# It will look for information about the EKS instance's partition placement group: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/placement-groups.html#placement-groups-partition
+# As well as information about the EKS instance's availability zone according to AWS: https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html#concepts-availability-zones
+
+# if partition placement group information is found (in the form of the $partition_group_label variable defined below),
+# then the rack passed out will be "partition-group-".
+
+# Otherwise, the script will take in availability zone information, pass out a
+# rack label like "availability-zone-".
+
+# Supposition here is that when datanodes crash, the namenodes will provide the same rack when the pod comes back up.
+# This is the behavior that's been observed when terminating datanodes manually and watching topology logs as they re-initialize.
+
+script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )"
+
+TOPOLOGY_LOG="topology.log" # filepath within $HADOOP_LOG_DIR wherein topology logs will be placed
+export TOPOLOGY_LOG
+
+# shellcheck source=/dev/null
+source "${script_dir}/log.sh" "$TOPOLOGY_LOG"
+partition_group_label="partition_number" # this is an assumption made based on the Siri cluster at the moment; modify this variable if the Kube node label signifying placement groups is named differently
+
+log "argument(s) input to script: $*"
+for dn_IP in "$@"
+do
+ log "datanode IP: $dn_IP"
+ nodeLabels="$("${script_dir}/get_node_labels_from_pod_IP.sh" "$dn_IP")"
+ nodePartitionGroup="$(echo "$nodeLabels" | jq -r ".$partition_group_label")"
+ if [[ "$nodePartitionGroup" == "null" ]];
+ then
+ nodeAZ="$(echo "$nodeLabels" | jq -r '."topology.kubernetes.io/zone"')"
+ if [[ "$nodeAZ" == "null" ]];
+ then
+ rack="/default-rack" # when no partition group or availability zone info is found for the datanode
+ log "No partition groups or availability zones found; output default rack $rack for $dn_IP"
+ echo "$rack"
+ else
+ rack="/availability-zone-$nodeAZ"
+ log "output rack $rack for $dn_IP"
+ echo "$rack"
+ fi
+ else
+ rack="/partition-group-$nodePartitionGroup"
+ log "output rack $rack for $dn_IP"
+ echo "$rack"
+ fi
+done
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/ssl-client.xml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/ssl-client.xml
new file mode 100644
index 0000000000..3a8fffffb9
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/ssl-client.xml
@@ -0,0 +1,52 @@
+
+
+
+
+ ssl.client.keystore.keypassword
+ changeit
+
+
+ ssl.client.keystore.location
+ /tmp/scratch/keystore.jks
+
+
+ ssl.client.keystore.password
+ changeit
+
+
+ ssl.client.keystore.type
+ jks
+
+
+ ssl.client.truststore.location
+ /tmp/scratch/keystore.jks
+
+
+ ssl.client.truststore.password
+ changeit
+
+
+ ssl.client.truststore.reload.interval
+ 10000
+
+
+ ssl.client.truststore.type
+ jks
+
+
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/ssl-server.xml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/ssl-server.xml
new file mode 100644
index 0000000000..25e26ddfd8
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/base/ssl-server.xml
@@ -0,0 +1,56 @@
+
+
+
+
+ ssl.server.keystore.keypassword
+
+
+
+ ssl.server.keystore.password
+ changeit
+
+
+ ssl.server.keystore.location
+ /tmp/scratch/keystore.jks
+
+
+ ssl.server.keystore.type
+ jks
+
+
+ ssl.server.truststore.location
+ /tmp/scratch/truststore.jks
+
+
+ ssl.server.truststore.keypassword
+
+
+
+ ssl.server.truststore.password
+ changeit
+
+
+ ssl.server.truststore.reload.interval
+ 10000
+
+
+ ssl.server.truststore.type
+ jks
+
+
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/README.md b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/README.md
new file mode 100644
index 0000000000..b6af185a87
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/README.md
@@ -0,0 +1,23 @@
+
+
+Uses latest image from the apache zookeeper project.
+There is then a start script in 'single-instance/start.sh'
+which sets parameters for the zookeeper image and process
+on startup. Currently only logs to STDOUT/STDERR; there
+are no files in /var/log/zookeeper.
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/kustomization.yaml
new file mode 100644
index 0000000000..1e87468cd7
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/kustomization.yaml
@@ -0,0 +1,35 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1alpha1
+kind: Component
+
+configMapGenerator:
+- name: zookeeper-scripts
+ files:
+ - start.sh
+# Publish where the zk ensemble can be found.
+- name: zookeeper-quorum
+ literals:
+ # Hard-coded. Default we expect a simple standalone zk at this location.
+ # One define is for hbase, the other for hadoop.
+ - HBASE_ZOOKEEPER_QUORUM="zookeeper-0.zookeeper-headless"
+ - HA_ZOOKEEPER_QUORUM="zookeeper-0.zookeeper-headless:2181"
+ options:
+ disableNameSuffixHash: true
+
+resources:
+- zookeeper.yaml
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/start.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/start.sh
new file mode 100755
index 0000000000..48b3d91543
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/start.sh
@@ -0,0 +1,358 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Derived from work Copyright 2017 The Kubernetes Authors.
+# See https://github.com/kow3ns/kubernetes-zookeeper/blob/master/docker/scripts/start-zookeeper for more details
+# and then https://github.com/cloudurable/kube-zookeeper-statefulsets/
+# In the below we change the '--heap' argument to '--percentage' so
+# could set the server heap as a percentage of the container resource
+# limit rather than hard-code it.
+# Currently zookeeper.root.logger is CONSOLE only. We do not write
+# logs to files. Fix config. if you need it.
+#
+#
+# Usage: start-zookeeper [OPTIONS]
+# Starts a ZooKeeper server based on the supplied options.
+# --servers The number of servers in the ensemble. The default
+# value is 1.
+# --data_dir The directory where the ZooKeeper process will store its
+# snapshots. The default is /var/lib/zookeeper/data.
+# --data_log_dir The directory where the ZooKeeper process will store its
+# write ahead log. The default is
+# /var/lib/zookeeper/data/log.
+# --conf_dir The directory where the ZooKeeper process will store its
+# configuration. The default is /opt/zookeeper/conf.
+# --client_port The port on which the ZooKeeper process will listen for
+# client requests. The default is 2181.
+
+# --election_port The port on which the ZooKeeper process will perform
+# leader election. The default is 3888.
+
+# --server_port The port on which the ZooKeeper process will listen for
+# requests from other servers in the ensemble. The
+# default is 2888.
+
+# --tick_time The length of a ZooKeeper tick in ms. The default is
+# 2000.
+
+# --init_limit The number of Ticks that an ensemble member is allowed
+# to perform leader election. The default is 10.
+
+# --sync_limit The maximum session timeout that the ensemble will
+# allows a client to request. The default is 5.
+
+# --percentage The percentage of container memory to give to the JVM.
+
+# --max_client_cnxns The maximum number of client connections that the
+# ZooKeeper process will accept simultaneously. The
+# default is 60.
+
+# --snap_retain_count The maximum number of snapshots the ZooKeeper process
+# will retain if purge_interval is greater than 0. The
+# default is 3.
+
+# --purge_interval The number of hours the ZooKeeper process will wait
+# between purging its old snapshots. If set to 0 old
+# snapshots will never be purged. The default is 0.
+
+# --max_session_timeout The maximum time in milliseconds for a client session
+# timeout. The default value is 2 * tick time.
+
+# --min_session_timeout The minimum time in milliseconds for a client session
+# timeout. The default value is 20 * tick time.
+
+# --log_level The log level for the zookeeeper server. Either FATAL,
+# ERROR, WARN, INFO, DEBUG. The default is INFO.
+
+# --quorum_listen_on_all_ips
+# When set to true the ZooKeeper server will listen for
+# connections from its peers on all available IP addresses,
+# and not only the address configured in the server list of
+# the configuration file. It affects the connections handling
+# the ZAB protocol and the Fast Leader Election protocol.
+# Default value is false.
+set -x
+
+ZOOKEEPER_HOME="$( ls -d /apache-zookeeper* )"
+USER="$(whoami)"
+HOST="$(hostname -s)"
+DOMAIN="$(hostname -d)"
+LOG_LEVEL=INFO
+DATA_DIR="/var/lib/zookeeper/data"
+DATA_LOG_DIR="/var/lib/zookeeper/log"
+LOG_DIR="/var/log/zookeeper"
+CONF_DIR="/opt/zookeeper/conf"
+CLIENT_PORT=2181
+SERVER_PORT=2888
+ELECTION_PORT=3888
+PROM_PORT=7001
+TICK_TIME=2000
+INIT_LIMIT=10
+SYNC_LIMIT=5
+JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT=50
+MAX_CLIENT_CNXNS=1000
+SNAP_RETAIN_COUNT=3
+PURGE_INTERVAL=0
+SERVERS=1
+QUORUM_LISTEN_ON_ALL_IPS=false
+
+function print_usage() {
+echo "\
+Usage: start-zookeeper [OPTIONS]
+Starts a ZooKeeper server based on the supplied options.
+ --servers The number of servers in the ensemble. The default
+ value is 1.
+
+ --data_dir The directory where the ZooKeeper process will store its
+ snapshots. The default is /var/lib/zookeeper/data.
+
+ --data_log_dir The directory where the ZooKeeper process will store its
+ write ahead log. The default is
+ /var/lib/zookeeper/data/log.
+
+ --conf_dir The directoyr where the ZooKeeper process will store its
+ configuration. The default is /opt/zookeeper/conf.
+
+ --client_port The port on which the ZooKeeper process will listen for
+ client requests. The default is 2181.
+
+ --election_port The port on which the ZooKeeper process will perform
+ leader election. The default is 3888.
+
+ --server_port The port on which the ZooKeeper process will listen for
+ requests from other servers in the ensemble. The
+ default is 2888.
+
+ --tick_time The length of a ZooKeeper tick in ms. The default is
+ 2000.
+
+ --init_limit The number of Ticks that an ensemble member is allowed
+ to perform leader election. The default is 10.
+
+ --sync_limit The maximum session timeout that the ensemble will
+ allows a client to request. The default is 5.
+
+ --percentage The percentage of container memory to give to the JVM.
+
+ --max_client_cnxns The maximum number of client connections that the
+ ZooKeeper process will accept simultaneously. The
+ default is 60.
+
+ --snap_retain_count The maximum number of snapshots the ZooKeeper process
+ will retain if purge_interval is greater than 0. The
+ default is 3.
+
+ --purge_interval The number of hours the ZooKeeper process will wait
+ between purging its old snapshots. If set to 0 old
+ snapshots will never be purged. The default is 0.
+
+ --max_session_timeout The maximum time in milliseconds for a client session
+ timeout. The default value is 2 * tick time.
+
+ --min_session_timeout The minimum time in milliseconds for a client session
+ timeout. The default value is 20 * tick time.
+
+ --log_level The log level for the zookeeeper server. Either FATAL,
+ ERROR, WARN, INFO, DEBUG. The default is INFO.
+"
+}
+
+function create_data_dirs() {
+ if [ ! -d "$DATA_DIR" ]; then
+ mkdir -p "$DATA_DIR"
+ chown -R "$USER":"$USER" "$DATA_DIR"
+ fi
+
+ if [ ! -d "$DATA_LOG_DIR" ]; then
+ mkdir -p "$DATA_LOG_DIR"
+ chown -R "$USER":"$USER" "$DATA_LOG_DIR"
+ fi
+
+ if [ ! -d "$LOG_DIR" ]; then
+ mkdir -p "$LOG_DIR"
+ chown -R "$USER":"$USER" "$LOG_DIR"
+ fi
+ if [ ! -f "$ID_FILE" ] && [ "$SERVERS" -gt 1 ]; then
+ echo "$MY_ID" >> "$ID_FILE"
+ fi
+}
+
+function print_servers() {
+ for (( i=1; i<=SERVERS; i++ ))
+ do
+ echo "server.$i=$NAME-$((i-1)).$DOMAIN:$SERVER_PORT:$ELECTION_PORT"
+ done
+}
+
+function create_config() {
+ rm -f "$CONFIG_FILE"
+ {
+ echo "#This file was autogenerated DO NOT EDIT"
+ echo "clientPort=$CLIENT_PORT"
+ echo "dataDir=$DATA_DIR"
+ echo "dataLogDir=$DATA_LOG_DIR"
+ echo "tickTime=$TICK_TIME"
+ echo "initLimit=$INIT_LIMIT"
+ echo "syncLimit=$SYNC_LIMIT"
+ echo "maxClientCnxns=$MAX_CLIENT_CNXNS"
+ echo "minSessionTimeout=$MIN_SESSION_TIMEOUT"
+ echo "maxSessionTimeout=$MAX_SESSION_TIMEOUT"
+ echo "autopurge.snapRetainCount=$SNAP_RETAIN_COUNT"
+ echo "autopurge.purgeInteval=$PURGE_INTERVAL"
+ echo "quorumListenOnAllIPs=$QUORUM_LISTEN_ON_ALL_IPS"
+ # Allow running all zk commands.
+ echo "4lw.commands.whitelist=*"
+ if [ "$SERVERS" -gt 1 ]; then
+ print_servers
+ fi
+ echo "metricsProvider.className=org.apache.zookeeper.metrics.prometheus.PrometheusMetricsProvider"
+ echo "metricsProvider.httpPort=$PROM_PORT"
+ } >> "$CONFIG_FILE"
+ cat "$CONFIG_FILE" >&2
+}
+
+function create_jvm_props() {
+ rm -f "$JAVA_ENV_FILE"
+ {
+ echo "SERVER_JVMFLAGS=\"-XX:MaxRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -XX:InitialRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT}\""
+ echo "ZOO_LOG_DIR=$LOG_DIR"
+ echo "JVMFLAGS="
+ } >> "$JAVA_ENV_FILE"
+}
+
+function create_log_props() {
+ rm -f "$LOGGER_PROPS_FILE"
+ echo "Creating ZooKeeper log4j configuration"
+ {
+ echo "zookeeper.root.logger=CONSOLE"
+ echo "zookeeper.console.threshold=$LOG_LEVEL"
+ echo "log4j.rootLogger=\${zookeeper.root.logger}"
+ echo "log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender"
+ echo "log4j.appender.CONSOLE.Threshold=\${zookeeper.console.threshold}"
+ echo "log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout"
+ echo "log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n"
+ } >> "$LOGGER_PROPS_FILE"
+}
+
+optspec=":hv-:"
+while getopts "$optspec" optchar; do
+
+ case "${optchar}" in
+ -)
+ case "${OPTARG}" in
+ servers=*)
+ SERVERS=${OPTARG##*=}
+ ;;
+ data_dir=*)
+ DATA_DIR=${OPTARG##*=}
+ ;;
+ data_log_dir=*)
+ DATA_LOG_DIR=${OPTARG##*=}
+ ;;
+ log_dir=*)
+ LOG_DIR=${OPTARG##*=}
+ ;;
+ conf_dir=*)
+ CONF_DIR=${OPTARG##*=}
+ ;;
+ client_port=*)
+ CLIENT_PORT=${OPTARG##*=}
+ ;;
+ election_port=*)
+ ELECTION_PORT=${OPTARG##*=}
+ ;;
+ server_port=*)
+ SERVER_PORT=${OPTARG##*=}
+ ;;
+ tick_time=*)
+ TICK_TIME=${OPTARG##*=}
+ ;;
+ init_limit=*)
+ INIT_LIMIT=${OPTARG##*=}
+ ;;
+ sync_limit=*)
+ SYNC_LIMIT=${OPTARG##*=}
+ ;;
+ percentage=*)
+ JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT=${OPTARG##*=}
+ ;;
+ max_client_cnxns=*)
+ MAX_CLIENT_CNXNS=${OPTARG##*=}
+ ;;
+ snap_retain_count=*)
+ SNAP_RETAIN_COUNT=${OPTARG##*=}
+ ;;
+ purge_interval=*)
+ PURGE_INTERVAL=${OPTARG##*=}
+ ;;
+ max_session_timeout=*)
+ MAX_SESSION_TIMEOUT=${OPTARG##*=}
+ ;;
+ min_session_timeout=*)
+ MIN_SESSION_TIMEOUT=${OPTARG##*=}
+ ;;
+ quorum_listen_on_all_ips=*)
+ QUORUM_LISTEN_ON_ALL_IPS=${OPTARG##*=}
+ ;;
+ log_level=*)
+ LOG_LEVEL=${OPTARG##*=}
+ ;;
+ *)
+ echo "Unknown option --${OPTARG}" >&2
+ exit 1
+ ;;
+ esac;;
+ h)
+ print_usage
+ exit
+ ;;
+ v)
+ echo "Parsing option: '-${optchar}'" >&2
+ ;;
+ *)
+ if [ "$OPTERR" != 1 ] || [ "${optspec:0:1}" = ":" ]; then
+ echo "Non-option argument: '-${OPTARG}'" >&2
+ fi
+ ;;
+ esac
+done
+
+MIN_SESSION_TIMEOUT=${MIN_SESSION_TIMEOUT:- $((TICK_TIME*2))}
+MAX_SESSION_TIMEOUT=${MAX_SESSION_TIMEOUT:- $((TICK_TIME*20))}
+ID_FILE="$DATA_DIR/myid"
+if [ ! -d "$CONF_DIR" ]; then
+ mkdir -p "$CONF_DIR"
+ chown -R "$USER":"$USER" "$CONF_DIR"
+fi
+CONFIG_FILE="$CONF_DIR/zoo.cfg"
+LOGGER_PROPS_FILE="$CONF_DIR/log4j.properties"
+JAVA_ENV_FILE="$CONF_DIR/java.env"
+
+if [[ $HOST =~ (.*)-([0-9]+)$ ]]; then
+ NAME=${BASH_REMATCH[1]}
+ ORD=${BASH_REMATCH[2]}
+else
+ echo "Failed to parse name and ordinal of Pod"
+ exit 1
+fi
+
+MY_ID=$((ORD+1))
+
+export ZOOCFGDIR=${CONF_DIR}
+create_config && create_jvm_props && create_log_props && create_data_dirs && exec "${ZOOKEEPER_HOME}/bin/zkServer.sh" start-foreground
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/zookeeper.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/zookeeper.yaml
new file mode 100644
index 0000000000..b01022972a
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/components/zookeeper/single-instance/zookeeper.yaml
@@ -0,0 +1,142 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: zookeeper
+spec:
+ replicas: 1
+ serviceName: zookeeper-headless
+ selector:
+ matchLabels:
+ cluster: zookeeper # has to match .spec.template.metadata.labels
+ role: zookeeper
+ updateStrategy:
+ type: RollingUpdate
+ podManagementPolicy: Parallel
+ template:
+ metadata:
+ labels:
+ cluster: zookeeper
+ role: zookeeper
+ spec:
+ containers:
+ - image: zookeeper
+ name: zookeeper
+ imagePullPolicy: IfNotPresent
+ resources:
+ requests:
+ cpu: '0.1'
+ memory: 768Mi
+ limits:
+ cpu: '1.0'
+ memory: 1Gi
+ env:
+ - name: JAVA_HOME
+ value: /usr/local/openjdk-11
+ - name: LANG
+ value: C.UTF-8
+ - name: PATH
+ value: /usr/local/openjdk-11/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
+ - name: ZK_LOG_DIR
+ value: /var/log/zookeeper
+ ports:
+ - containerPort: 2181
+ name: client
+ - containerPort: 2888
+ name: server
+ - containerPort: 3888
+ name: leader-election
+ - containerPort: 7001
+ name: metrics
+ command:
+ - sh
+ - -c
+ - |-
+ export LOG_DIR="${ZK_LOG_DIR}"
+ /usr/bin/zookeeper/start.sh --servers=1 --percentage=50
+ readinessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ [ "$(echo ruok | nc 127.0.0.1 2181)" == "imok" ]
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ livenessProbe:
+ exec:
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ [ "$(echo ruok | nc 127.0.0.1 2181)" == "imok" ]
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ volumeMounts:
+ - name: datadir
+ mountPath: /var/lib/zookeeper
+ - mountPath: /usr/bin/zookeeper
+ name: zookeeper-scripts
+ - mountPath: /var/log/zookeeper
+ name: zk-logs
+ volumes:
+ - emptyDir: {}
+ name: zk-logs
+ - configMap:
+ name: zookeeper-scripts
+ defaultMode: 0555
+ name: zookeeper-scripts
+ volumeClaimTemplates:
+ - metadata:
+ name: datadir
+ spec:
+ accessModes: [ "ReadWriteOnce" ]
+ resources:
+ requests:
+ storage: 2Gi
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: zookeeper-pdb
+spec:
+ selector:
+ matchLabels:
+ cluster: zookeeper
+ maxUnavailable: 1
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: zookeeper-headless
+spec:
+ ports:
+ - port: 2888
+ name: server
+ - port: 3888
+ name: leader-election
+ - port: 2181
+ name: client
+ - port: 8080
+ name: http
+ clusterIP: None
+ publishNotReadyAddresses: true
+ # Select our zookeeper app. This is what gets us dns entries
+ # https://kubernetes.io/docs/concepts/services-networking/service/#with-selectors
+ selector:
+ cluster: zookeeper
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/core-site.xml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/core-site.xml
new file mode 100644
index 0000000000..782c61f0e9
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/core-site.xml
@@ -0,0 +1,96 @@
+
+
+
+
+ fs.defaultFS
+ hdfs://${env.HADOOP_SERVICE}
+
+
+ fs.trash.interval
+ 10080
+
+
+ fs.trash.checkpoint.interval
+ 10080
+
+
+ ha.zookeeper.acl
+ world:anyone:rwcda
+
+
+ ha.zookeeper.auth
+
+
+
+ ha.zookeeper.quorum
+ ${env.HA_ZOOKEEPER_QUORUM}
+
+
+ ha.zookeeper.parent-znode
+ /
+
+
+ hadoop.proxyuser.hdfs.hosts
+ *
+
+
+ hadoop.proxyuser.hdfs.users
+ *
+
+
+ hadoop.user.group.static.mapping.overrides
+ hdfs=supergroup;nobody=;
+
+
+ net.topology.script.file.name
+ /tmp/scripts/topology.sh
+
+
+ net.topology.script.number.args
+ 1
+
+
+ hadoop.rpc.protection
+ authentication
+
+
+ hadoop.security.authorization
+ false
+
+
+ hadoop.ssl.client.conf
+ ssl-client.xml
+
+
+ hadoop.ssl.enabled
+ false
+
+
+ hadoop.ssl.keystores.factory.class
+ org.apache.hadoop.security.ssl.FileBasedKeyStoresFactory
+
+
+ hadoop.ssl.require.client.cert
+ false
+
+
+ hadoop.ssl.server.conf
+ ssl-server.xml
+
+
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/dn-service.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/dn-service.yaml
new file mode 100644
index 0000000000..41515276a2
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/dn-service.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: datanode
+ labels:
+ jmxexporter: enabled
+spec:
+ selector:
+ role: datanode
+ clusterIP: None
+ ports:
+ - name: jmxexporter
+ port: 8000
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/dn-statefulset.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/dn-statefulset.yaml
new file mode 100644
index 0000000000..772b87de93
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/dn-statefulset.yaml
@@ -0,0 +1,223 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: datanode
+spec:
+ podManagementPolicy: Parallel
+ replicas: 1
+ selector:
+ matchLabels:
+ role: datanode
+ serviceName: hadoop
+ template:
+ metadata:
+ labels:
+ role: datanode
+ spec:
+ affinity:
+ podAntiAffinity:
+ requiredDuringSchedulingIgnoredDuringExecution:
+ - labelSelector:
+ matchLabels:
+ role: datanode
+ topologyKey: kubernetes.io/hostname
+ containers:
+ - image: hadoop
+ name: datanode
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ # Shell context so we can pull in the environment variables set in the container and
+ # via the env and envFrom.
+ # See https://stackoverflow.com/questions/57885828/netty-cannot-access-class-jdk-internal-misc-unsafe
+ HADOOP_LOGFILE="hdfs-${HOSTNAME}.log" \
+ HDFS_DATANODE_OPTS=" \
+ -XX:MaxRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -XX:InitialRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -Djava.security.properties=/tmp/scratch/java.security \
+ -javaagent:${JMX_PROMETHEUS_JAR}=8000:/tmp/scratch/jmxexporter.yaml \
+ -Djava.library.path=${HADOOP_HOME}/lib/native \
+ --add-opens java.base/jdk.internal.misc=ALL-UNNAMED \
+ -Dio.netty.tryReflectionSetAccessible=true \
+ -Xlog:gc:/var/log/hadoop/gc.log:time,uptime:filecount=10,filesize=100M" \
+ hdfs datanode
+ # For now, just fetch local /jmx
+ # Says kubelet only exposes failures, not success: https://stackoverflow.com/questions/34455040/kubernetes-liveness-probe-logging
+ # Do better. Check this DN successfully registered w/ NN. TODO.
+ livenessProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ # 9865 if HTTPS
+ port: 9864
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ failureThreshold: 3
+ readinessProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ # 9865 if HTTPS
+ port: 9864
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ failureThreshold: 3
+ startupProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ # 9865 if HTTPS
+ port: 9864
+ initialDelaySeconds: 10
+ failureThreshold: 30
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 1Gi
+ limits:
+ cpu: '1.0'
+ memory: 1.5Gi
+ envFrom:
+ - configMapRef:
+ name: environment
+ env:
+ # The 'node' this container is running on, not hdfs namenode.
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ ports:
+ - name: http
+ containerPort: 9864
+ - name: https
+ containerPort: 9865
+ - name: data
+ containerPort: 9866
+ - name: ipc
+ containerPort: 9867
+ - name: jmx
+ containerPort: 9864
+ volumeMounts:
+ - mountPath: /etc/hadoop
+ name: hadoop-configuration
+ - mountPath: /var/log/hadoop
+ name: hadoop-logs
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /tmp/scripts
+ name: scripts
+ - mountPath: /data00
+ name: data00
+ initContainers:
+ - image: hadoop
+ name: bootstrapper
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ set -xe
+ mkdir -p ${HADOOP_LOG_DIR} || echo $?
+ chown -R ${USER} ${HADOOP_LOG_DIR}
+ # If format-hdfs configmap present, format.
+ ! /tmp/scripts/exists_configmap.sh format-hdfs || (
+ for dir in $( echo "${DATANODE_DATA_DIR}" | tr ',' '\n')
+ do
+ rm -rf ${dir}
+ done
+ )
+ for dir in $( echo "${DATANODE_DATA_DIR}" | tr ',' '\n')
+ do
+ mkdir -p ${dir} || :
+ chown -R ${USER} ${dir}
+ done
+ df -h
+ cp /tmp/global-files/* /tmp/scratch/
+ # Wait for the nns to come up.
+ /tmp/scripts/jmxping.sh namenode ${HADOOP_SERVICE}
+ securityContext:
+ # Run bootstrapper as root so can set ${USER} owner on data volume
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 256Mi
+ limits:
+ cpu: '0.5'
+ memory: 512Mi
+ envFrom:
+ - configMapRef:
+ name: environment
+ env:
+ # Used by scripts that run during bootstrap
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - mountPath: /data00
+ name: data00
+ - mountPath: /tmp/scripts
+ name: scripts
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /tmp/global-files
+ name: global-files
+ serviceAccountName: hadoop
+ volumes:
+ - configMap:
+ name: hadoop-configuration
+ name: hadoop-configuration
+ - configMap:
+ name: scripts
+ defaultMode: 0555
+ name: scripts
+ - configMap:
+ name: global-files
+ name: global-files
+ - emptyDir: {}
+ name: hadoop-logs
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - emptyDir: {}
+ name: scratch
+ updateStrategy:
+ type: RollingUpdate
+ volumeClaimTemplates:
+ - metadata:
+ name: data00
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 10Gi
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/hdfs-site.xml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/hdfs-site.xml
new file mode 100644
index 0000000000..2ae424942e
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/hdfs-site.xml
@@ -0,0 +1,274 @@
+
+
+
+
+ dfs.block.replicator.classname
+ org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant
+
+
+ dfs.blocksize
+ 64m
+
+
+ dfs.datanode.address
+ 0.0.0.0:9866
+
+
+ dfs.datanode.balance.bandwidthPerSec
+ 20m
+
+
+ dfs.datanode.balance.max.concurrent.moves
+ 100
+
+
+ dfs.datanode.data.dir
+ ${env.DATANODE_DATA_DIR}
+
+
+ dfs.datanode.failed.volumes.tolerated
+ 0
+
+
+ dfs.datanode.du.reserved
+ 1073741824
+
+
+ dfs.datanode.fileio.profiling.sampling.percentage
+ 10
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:9864
+
+
+ dfs.datanode.https.address
+ 0.0.0.0:9865
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:9867
+
+
+ dfs.datanode.max.locked.memory
+ 0
+
+
+ dfs.datanode.peer.stats.enabled
+ true
+
+
+ dfs.encrypt.data.transfer
+ false
+
+
+ dfs.encrypt.data.transfer.algorithm
+ rc4
+
+
+ dfs.ha.automatic-failover.enabled
+ true
+
+
+ dfs.ha.fencing.methods
+ shell(/usr/bin/true)
+
+
+ dfs.journalnode.edits.dir
+ ${env.JOURNALNODE_DATA_DIR}
+
+
+ dfs.journalnode.http-address
+ 0.0.0.0:8480
+
+
+ dfs.journalnode.https-address
+ 0.0.0.0:8481
+
+
+ dfs.journalnode.rpc-address
+ 0.0.0.0:8485
+
+
+ dfs.namenode.handler.count
+ 64
+
+
+
+
+ dfs.namenode.http-bind-host
+ 0.0.0.0
+
+
+ dfs.namenode.https-bind-host
+ 0.0.0.0
+
+
+ dfs.namenode.name.dir
+ ${env.NAMENODE_DATA_DIR}
+
+
+ dfs.namenode.replication.max-streams
+ 20
+
+
+ dfs.namenode.replication.max-streams-hard-limit
+ 40
+
+
+ dfs.namenode.replication.min
+ ${env.DFS_REPLICATION}
+
+
+ dfs.namenode.replication.work.multiplier.per.iteration
+ 10
+
+
+ dfs.namenode.safemode.threshold-pct
+ 0.9
+
+
+ dfs.namenode.service.handler.count
+ 64
+
+
+ dfs.nameservices
+ ${env.HADOOP_SERVICE}
+
+
+ dfs.reformat.disabled
+ false
+
+
+ dfs.replication
+ ${env.DFS_REPLICATION}
+
+
+ dfs.replication.max
+ 512
+
+
+ ipc.8020.callqueue.impl
+ org.apache.hadoop.ipc.FairCallQueue
+
+
+ ipc.8020.scheduler.impl
+ org.apache.hadoop.ipc.DecayRpcScheduler
+
+
+ zk-dt-secret-manager.zkAuthType
+ digest
+
+
+ zk-dt-secret-manager.digest.auth
+ @/etc/hadoop/zookeeper/auth/zk-auth.txt
+
+
+ zk-dt-secret-manager.zkConnectionString
+ TODO
+
+
+ zk-dt-secret-manager.znodeWorkingPath
+ TODO
+
+
+ dfs.client.failover.proxy.provider.hadoop
+ org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider
+
+
+
+ dfs.ha.namenodes.hadoop
+ namenode-0
+
+
+ dfs.namenode.http-address.hadoop.namenode-0
+ namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9870
+
+
+ dfs.namenode.https-address.hadoop.namenode-0
+ namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:9871
+
+
+ dfs.namenode.rpc-address.hadoop.namenode-0
+ namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8020
+
+
+ dfs.namenode.servicerpc-address.hadoop.namenode-0
+ namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8022
+
+
+ dfs.namenode.lifeline.rpc-address.hadoop.namenode-0
+ namenode-0.${env.HADOOP_SERVICE}.${env.POD_NAMESPACE}.${env.DOMAIN}:8050
+
+
+ dfs.client.https.keystore.resource
+ ssl-client.xml
+
+
+ dfs.client.https.need-auth
+ false
+
+
+ dfs.http.policy
+ ${env.HTTP_POLICY}
+
+
+ dfs.https.enable
+ ${env.DFS_HTTPS_ENABLE}
+
+
+ dfs.https.server.keystore.resource
+ ssl-server.xml
+
+
+ dfs.namenode.acls.enabled
+ true
+
+
+ dfs.datanode.use.datanode.hostname
+ true
+
+
+ dfs.client.use.datanode.hostname
+ true
+
+
+
+ dfs.namenode.datanode.registration.ip-hostname-check
+ false
+
+
+ dfs.blockreport.intervalMsec
+ 900000
+ Determines block reporting interval in milliseconds.
+ Report frequently else around recovery storms, the NN gets convinced
+ there is no block space left because of 'scheduled space' reserved.
+
+
+
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/kustomization.yaml
new file mode 100644
index 0000000000..6bd3abea34
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/kustomization.yaml
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+commonLabels:
+ app: hadoop
+
+configMapGenerator:
+- name: hadoop-configuration
+ # Add in single-instance namenode and datanode hdfs-site and core-site.
+ behavior: merge
+ files:
+ - hdfs-site.xml
+ - core-site.xml
+
+resources:
+- nn-statefulset.yaml
+- nn-service.yaml
+- dn-statefulset.yaml
+- dn-service.yaml
+- ../../base
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/nn-service.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/nn-service.yaml
new file mode 100644
index 0000000000..66ac266cb4
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/nn-service.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+kind: Service
+apiVersion: v1
+metadata:
+ name: namenode
+ labels:
+ jmxexporter: enabled
+spec:
+ selector:
+ role: namenode
+ clusterIP: None
+ ports:
+ - name: jmxexporter
+ port: 8000
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/nn-statefulset.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/nn-statefulset.yaml
new file mode 100644
index 0000000000..d36a61f69b
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/main/kustomize/overlays/hdfs/nn-statefulset.yaml
@@ -0,0 +1,325 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: namenode
+spec:
+ minAvailable: 1
+ selector:
+ matchLabels:
+ role: namenode
+---
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: namenode
+spec:
+ podManagementPolicy: Parallel
+ replicas: 1
+ selector:
+ matchLabels:
+ role: namenode
+ serviceName: hadoop
+ template:
+ metadata:
+ labels:
+ role: namenode
+ spec:
+ affinity:
+ podAntiAffinity:
+ preferredDuringSchedulingIgnoredDuringExecution:
+ - podAffinityTerm:
+ labelSelector:
+ matchLabels:
+ role: namenode
+ topologyKey: kubernetes.io/hostname
+ weight: 30
+ containers:
+ - image: hadoop
+ name: namenode
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/bash
+ - -c
+ - |-
+ # Shell context so we can pull in the environment variables set in the container and
+ # via the env and envFrom.
+ # See https://stackoverflow.com/questions/57885828/netty-cannot-access-class-jdk-internal-misc-unsafe
+ HADOOP_LOGFILE="hdfs-${HOSTNAME}.log" \
+ HDFS_NAMENODE_OPTS=" \
+ -XX:MaxRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -XX:InitialRAMPercentage=${JVM_HEAP_PERCENTAGE_OF_RESOURCE_LIMIT} \
+ -Djava.security.properties=/tmp/scratch/java.security \
+ -javaagent:${JMX_PROMETHEUS_JAR}=8000:/tmp/scratch/jmxexporter.yaml \
+ -Djava.library.path=${HADOOP_HOME}/lib/native \
+ --add-opens java.base/jdk.internal.misc=ALL-UNNAMED \
+ -Dio.netty.tryReflectionSetAccessible=true \
+ -Xlog:gc:/var/log/hadoop/gc.log:time,uptime:filecount=10,filesize=100M" \
+ hdfs namenode
+ # For now, just fetch local /jmx
+ # Says kubelet only exposes failures, not success: https://stackoverflow.com/questions/34455040/kubernetes-liveness-probe-logging
+ livenessProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ # 9871 if HTTPS
+ port: 9870
+ initialDelaySeconds: 1
+ failureThreshold: 6
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ # 9871 if HTTPS
+ port: 9870
+ initialDelaySeconds: 10
+ failureThreshold: 3
+ periodSeconds: 10
+ startupProbe:
+ httpGet:
+ path: /jmx?qry=java.lang:type=OperatingSystem
+ # 9871 if HTTPS
+ port: 9870
+ initialDelaySeconds: 10
+ failureThreshold: 30
+ periodSeconds: 10
+ resources:
+ requests:
+ cpu: '0.4'
+ memory: 2Gi
+ limits:
+ cpu: '1'
+ memory: 3Gi
+ envFrom:
+ - configMapRef:
+ name: environment
+ env:
+ # The 'node' this container is running on, not hdfs namenode.
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ ports:
+ - name: http
+ containerPort: 9870
+ - name: https
+ containerPort: 9871
+ - name: jmx
+ containerPort: 9870
+ - name: rpc
+ containerPort: 8020
+ - name: servicerpc
+ containerPort: 8022
+ - name: lifelinerpc
+ containerPort: 8050
+ volumeMounts:
+ - mountPath: /etc/hadoop
+ name: hadoop-configuration
+ - mountPath: /var/log/hadoop
+ name: hadoop-logs
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /tmp/scripts
+ name: scripts
+ - mountPath: /data00
+ name: data00
+ initContainers:
+ - image: hadoop
+ name: bootstrapper
+ imagePullPolicy: IfNotPresent
+ command:
+ # This container is running as root so can set permissions.
+ - /bin/bash
+ - -c
+ - |-
+ set -xe
+ if [ -n "${QJOURNAL}" ]; then
+ # If QJOURNAL, then HA and journalnodes are in the mix. Wait on them to come up.
+ /tmp/scripts/jmxping.sh journalnode ${HADOOP_SERVICE}
+ fi
+ # Copy over the files under global-files so in place for the runtime container.
+ cp /tmp/global-files/* /tmp/scratch/
+ # Set perms
+ chown -R ${USER} ${HADOOP_LOG_DIR}
+ # If format-hdfs configmap present, format.
+ find ${NAMENODE_DATA_DIR} || :
+ ! /tmp/scripts/exists_configmap.sh format-hdfs || (
+ rm -rf ${NAMENODE_DATA_DIR}
+ )
+ chmod 777 /data00
+ securityContext:
+ # Run bootstrapper as root so can set ${USER} owner on data volume
+ allowPrivilegeEscalation: false
+ runAsUser: 0
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 256Mi
+ limits:
+ cpu: '0.5'
+ memory: 512Mi
+ envFrom:
+ - configMapRef:
+ name: environment
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - mountPath: /etc/hadoop
+ name: hadoop-configuration
+ - mountPath: /var/log/hadoop
+ name: hadoop-logs
+ - mountPath: /data00
+ name: data00
+ - mountPath: /etc/hadoop/zookeeper/auth
+ name: zookeeper-credentials
+ readOnly: true
+ - mountPath: /tmp/scripts
+ name: scripts
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - mountPath: /tmp/scratch
+ name: scratch
+ - mountPath: /tmp/global-files
+ name: global-files
+ - image: hadoop
+ name: format-hdfs
+ imagePullPolicy: IfNotPresent
+ command:
+ # Runs as the image/hdfs user.
+ - /bin/bash
+ - -c
+ - |-
+ set -xe
+ find /data00 || echo $?
+ # Run format if no nn dir.
+ if [ ! -d "${NAMENODE_DATA_DIR}" ]; then
+ ordinal=$(echo $POD_NAME | sed -e 's/^[^-]*-\(.*\)/\1/')
+ case $ordinal in
+ 0)
+ hdfs namenode -format -nonInteractive || (
+ # Perhaps another nn is active? If so, we should do bootstrap here instead.
+ hdfs namenode -bootstrapStandby -nonInteractive
+ )
+ ;;
+ *)
+ hdfs namenode -bootstrapStandby -nonInteractive
+ ;;
+ esac
+ fi
+ resources:
+ requests:
+ cpu: '0.2'
+ memory: 256Mi
+ limits:
+ cpu: '0.5'
+ memory: 512Mi
+ envFrom:
+ - configMapRef:
+ name: environment
+ env:
+ - name: NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ - name: POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ - name: POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ - name: POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ volumeMounts:
+ - mountPath: /etc/hadoop
+ name: hadoop-configuration
+ - mountPath: /var/log/hadoop
+ name: hadoop-logs
+ - mountPath: /data00
+ name: data00
+ - mountPath: /etc/hadoop/zookeeper/auth
+ name: zookeeper-credentials
+ readOnly: true
+ - mountPath: /tmp/scripts
+ name: scripts
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - mountPath: /tmp/scratch
+ name: scratch
+ serviceAccountName: hadoop
+ volumes:
+ - configMap:
+ name: hadoop-configuration
+ name: hadoop-configuration
+ - configMap:
+ name: scripts
+ defaultMode: 0555
+ name: scripts
+ - configMap:
+ name: global-files
+ name: global-files
+ - emptyDir: {}
+ name: hadoop-logs
+ # Scratch dir is a location where init containers place items for later use
+ # by the main containers when they run.
+ - emptyDir: {}
+ name: scratch
+ - secret:
+ secretName: zookeeper-credentials
+ defaultMode: 400
+ optional: true
+ name: zookeeper-credentials
+ updateStrategy:
+ type: RollingUpdate
+ volumeClaimTemplates:
+ - metadata:
+ name: data00
+ spec:
+ accessModes: ["ReadWriteOnce"]
+ resources:
+ requests:
+ storage: 2Gi
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/README.md b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/README.md
new file mode 100644
index 0000000000..69a91c23ee
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/README.md
@@ -0,0 +1,159 @@
+
+
+# Kubernetes Deployment Testing Using
+
+Defines a set of tests that are suitable for running against a target cluster -- they are not too
+resource intensive and do not require any vendor-specific extensions. It should be possible to run
+these tests against a multi-node KinD cluster, below are some notes to help a developer to run
+them locally.
+
+## Run the tests locally
+
+Assumes a Docker Desktop or some other docker-in-docker type of environment. First, prepare your
+cluster connection details such that they can be passed into the container context. Next, launch
+the test runner in a container:
+
+```shell
+$ docker container run \
+ --env KUBECONFIG=/workspace/your-kubeconfig \
+ --mount type=bind,source=$(PWD),target=/workspace \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ --workdir /workspace \
+ ${USER}/hbase/operator-tools/kuttl:latest \
+ --config tests/kuttl-test-integration.yaml \
+ --parallel 1
+```
+
+## Run the tests in AWS EKS
+
+It is possible to run these tests in AWS EKS. This requires configuring an RBAC on your target
+cluster that maps to an AIM profile. Next, define a profile in AWS configuration. When you launch
+the container, pass configuration and profile selection through to the running container.
+
+Building on the previous example,
+
+```shell
+$ docker container run \
+ --env AWS_PROFILE="your-profile" \
+ --env KUBECONFIG=/workspace/your-kubeconfig \
+ --mount type=bind,source=$(PWD),target=/workspace \
+ -v /var/run/docker.sock:/var/run/docker.sock \
+ -v ~/.aws:/root/.aws \
+ --workdir /workspace \
+ ${USER}/hbase/operator-tools/kuttl:latest \
+ --config tests/kuttl-test-integration.yaml
+```
+
+## Prepare a KinD cluster
+
+Ask KinD to create a cluster (and docker network), and export the configuration oriented as from
+inside the cluster. Start by creating a kind-config.yaml and configuring it for muliple nodes.
+See https://kind.sigs.k8s.io/docs/user/quick-start/#configuring-your-kind-cluster
+
+```shell
+$ kind create cluster --config kind-config.yaml
+...
+You can now use your cluster with:
+
+kubectl cluster-info --context kind --kubeconfig kubeconfig
+$ kind export kubeconfig --name kind --internal --kubeconfig kubeconfig-internal
+```
+
+## Local KinD Hacks
+
+Preparing and staging the large container images into the kind nodes is slow. Speed up the process
+a bit by creating a single-node KinD cluster and letting `kuttl` populate the images you need.
+
+First, find all the images used in your tests,
+
+```shell
+$ find tests/kind -type f -iname '*kustomization.yaml' \
+ -exec yq '.images[] | .newName + ":" + .newTag' {} + \
+ | sort -u
+hadoop:...
+hbase:...
+zookeeper:...
+```
+
+Pull those images locally.
+
+```shell
+$ docker image pull hadoop:...
+$ docker image pull hbase:...
+$ docker image pull zookeeper:...
+```
+
+Now make sure kuttl is using a docker volume for the containerd directory on each container, and
+populate those images into your kuttl configuration using this config snippet:
+
+```yaml
+kindNodeCache:
+ # Have kuttl create and mount volumes for a container image cache to each kind pod. Kuttl will
+ # reuse these mounts across runs, so we can save time the next the tests run.
+ true
+kindContainers:
+ # pre-populate the kind containers with these images pulled from the host registry. They'll be
+ # cached via `kindNodeCache`.
+- hadoop...
+- hbase...
+- zookeeper:...
+```
+
+When you run `kuttl` with this config, you'll see that it has mounted a volume for each container.
+It'll take a while, but `kuttl` will report its progress copying these container images.
+
+```
+== RUN kuttl
+...
+ harness.go:202: node mount point /var/lib/docker/volumes/kind-0/_data
+...
+ harness.go:155: Starting KIND cluster
+ kind.go:66: Adding Containers to KIND...
+ kind.go:75: Add image zookeeper:... to node control-plane
+...
+```
+
+Once copied into one volume, create all the additional volumes you'll need and clone the original.
+Repeat this for every worker node you'd like in your cluster.
+
+```shell
+$ docker volume create --name kind-1
+$ docker container run --rm -it \
+ -v kind-0:/from \
+ -v kind-1:/to \
+ alpine ash -c "cd /from ; cp -a . /to"
+```
+
+In `kind-config.yaml`, specify the mount points for each of your KinD processes.
+
+```yaml
+nodes:
+- role: control-plane
+ extraMounts:
+ - &extra-mounts
+ hostPath: /var/lib/docker/volumes/kind-0/_data
+ containerPath: /var/lib/containerd
+ readOnly: false
+ propagation: HostToContainer
+- role: worker
+ extraMounts:
+ - <<: *extra-mounts
+ hostPath: /var/lib/docker/volumes/kind-1/_data
+...
+```
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/00-assert-zookeeper.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/00-assert-zookeeper.yaml
new file mode 100644
index 0000000000..4e109f0846
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/00-assert-zookeeper.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Asserts on the ZooKeeper portion of the deployment.
+#
+---
+# assert that there is a `StatefulSet` named "zookeeper" that has one live instance.
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: zookeeper
+status:
+ availableReplicas: 1
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/00-kustomize.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/00-kustomize.yaml
new file mode 100644
index 0000000000..cb2588cf03
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/00-kustomize.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+
+commands:
+- script: ../../../resources/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/kustomization.yaml
new file mode 100644
index 0000000000..4f27d9b394
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/components_zookeeper_single/kustomization.yaml
@@ -0,0 +1,26 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+images:
+- name: zookeeper
+ newName: zookeeper
+ newTag: 3.8.0
+
+components:
+ - ../../../../main/kustomize/components/zookeeper/single-instance
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/00-assert.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/00-assert.yaml
new file mode 100644
index 0000000000..899c485303
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/00-assert.yaml
@@ -0,0 +1,31 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# assert that there is a `StatefulSet` named "namenode" that has one live instance
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: namenode
+status:
+ availableReplicas: 1
+---
+# assert that there is a `StatefulSet` named "datanode" that has one live instance
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: datanode
+status:
+ availableReplicas: 1
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/00-kustomize.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/00-kustomize.yaml
new file mode 100644
index 0000000000..3947f48c77
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/00-kustomize.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: ../../bin/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/kustomization.yaml
new file mode 100644
index 0000000000..ba9a7bf866
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/overlays_hdfs/kustomization.yaml
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- ../test_base
+- ../../../overlays/hdfs
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/test_base/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/test_base/kustomization.yaml
new file mode 100644
index 0000000000..387cf31eeb
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/test_base/kustomization.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+commonLabels:
+ # Must repeat common labels and images in each overlay; can't inherit to keep each overlay independent
+ # https://github.com/kubernetes-sigs/kustomize/issues/915
+ # This label is used to open up calico network acls
+ app: hadoop
+
+resources:
+# When inter-pod networking is limited, apply this policy to open communications between pods that
+# bear the "hadoop" label.
+- networkpolicy.yaml
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/test_base/networkpolicy.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/test_base/networkpolicy.yaml
new file mode 100644
index 0000000000..8da0c53f70
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/integration/test_base/networkpolicy.yaml
@@ -0,0 +1,29 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# Explicitly permit all traffic between Hadoop-related pods in our namespace
+kind: NetworkPolicy
+apiVersion: networking.k8s.io/v1
+metadata:
+ name: allow-all
+spec:
+ podSelector:
+ matchLabels:
+ app: hadoop
+ ingress:
+ - {}
+ egress:
+ - {}
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/00-assert.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/00-assert.yaml
new file mode 100644
index 0000000000..1e24de4eb8
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/00-assert.yaml
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# assert that there is a `ConfigMap` named "environment"
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: environment
+---
+# assert that there is a `ConfigMap` named "global-files"
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: global-files
+# TODO: kuttl has no means to express `any` value, so cannot assert on data keys.
+#data:
+# java.security: ...
+# jmxexporter.yaml: ...
+# ssl-client.xml: ...
+# ssl-server.xml: ...
+---
+# assert that there is a `ConfigMap` named "hadoop-configuration-XXX"
+# TODO: kuttl does not support generated names
+#apiVersion: v1
+#kind: ConfigMap
+#metadata:
+# name: hadoop-configuration-c94h8k249d
+# TODO: kuttl has no means to express `any` value, so cannot assert on data keys.
+#data:
+# log4j.properties: ...
+---
+# assert that there is a `ConfigMap` named "scripts"
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: scripts
+# TODO: kuttl has no means to express `any` value, so cannot assert on data keys.
+#data:
+# apiserver_access.sh: ...
+# ...
+---
+# assert that there is a `Secret` named "keystore-password"
+apiVersion: v1
+kind: Secret
+metadata:
+ name: keystore-password
+type: Opaque
+---
+# assert that there is a `Service` names "hadoop"
+apiVersion: v1
+kind: Service
+metadata:
+ name: hadoop
+---
+# assert that there is a `Job` named "delete-format-hdfs-configmap"
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: delete-format-hdfs-configmap
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/00-kustomize.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/00-kustomize.yaml
new file mode 100644
index 0000000000..c42afed213
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/00-kustomize.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: ../../../resources/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/README.md b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/README.md
new file mode 100644
index 0000000000..e6a4aef607
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/README.md
@@ -0,0 +1,24 @@
+
+
+# tests/unit/base
+
+A collection of asserts on the resources allocated by `hbase-kubernetes-deployment/base` that are
+not explicitly covered by a more specific test case.
+
+Hopefully the scope of this test case shrinks over time.
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/kustomization.yaml
new file mode 100644
index 0000000000..82e3b2c241
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/base/kustomization.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+resources:
+ - ../../../../main/kustomize/base
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/00-assert-zookeeper.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/00-assert-zookeeper.yaml
new file mode 100644
index 0000000000..41fc2d8581
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/00-assert-zookeeper.yaml
@@ -0,0 +1,49 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Asserts on the ZooKeeper portion of the deployment.
+#
+---
+# assert that there is a `ConfigMap` named "zookeeper-scripts-XXX"
+# TODO: kuttl does not support generated names
+#apiVersion: v1
+#kind: ConfigMap
+#metadata:
+# name: zookeeper-scripts-c94h8k249d
+---
+# assert that there is a `ConfigMap` named "zookeeper-quorum"
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: zookeeper-quorum
+---
+# assert that there is a `PodDisruptionBudget` named "zookeeper-pdb"
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: zookeeper-pdb
+---
+# assert that there is a `StatefulSet` named "zookeeper" that:
+# - provides pods labeled role:zookeeper
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: zookeeper
+spec:
+ template:
+ metadata:
+ labels:
+ role: zookeeper
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/00-kustomize.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/00-kustomize.yaml
new file mode 100644
index 0000000000..cb2588cf03
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/00-kustomize.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+
+commands:
+- script: ../../../resources/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/kustomization.yaml
new file mode 100644
index 0000000000..ad0d9e9015
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/components_zookeeper_single/kustomization.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+components:
+ - ../../../../main/kustomize/components/zookeeper/single-instance
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/00-assert.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/00-assert.yaml
new file mode 100644
index 0000000000..cf367108c6
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/00-assert.yaml
@@ -0,0 +1,73 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+# assert that there is a `ConfigMap` named "hadoop-configuration-XXX"
+# TODO: kuttl does not support generated names
+#apiVersion: v1
+#kind: ConfigMap
+#metadata:
+# name: hadoop-configuration-c94h8k249d
+# TODO: kuttl has no means to express `any` value, so cannot assert on data keys.
+#data:
+# log4j.properties: ...
+# hdfs-site.xml: ...
+# core-site.xml: ...
+---
+# assert that there is a `PodDisruptionBudget` named "namenode"
+apiVersion: policy/v1
+kind: PodDisruptionBudget
+metadata:
+ name: namenode
+---
+# assert that there is a `StatefulSet` named "namenode" that it provides pods labeled role:namenode
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: namenode
+spec:
+ template:
+ metadata:
+ labels:
+ role: namenode
+---
+# assert that there is a `Service` named "namenode" pointing to pods labeled role:namenode
+apiVersion: v1
+kind: Service
+metadata:
+ name: namenode
+spec:
+ selector:
+ role: namenode
+---
+# assert that there is a `StatefulSet` named "datanode" that it provides pods labeled role:datanode
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: datanode
+spec:
+ template:
+ metadata:
+ labels:
+ role: datanode
+---
+# assert that there is a `Service` named "datanode" pointing to pods labeled role:datanode
+apiVersion: v1
+kind: Service
+metadata:
+ name: datanode
+spec:
+ selector:
+ role: datanode
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/00-kustomize.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/00-kustomize.yaml
new file mode 100644
index 0000000000..c42afed213
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/00-kustomize.yaml
@@ -0,0 +1,20 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: ../../../resources/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/kustomization.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/kustomization.yaml
new file mode 100644
index 0000000000..a557e53e95
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/kustomize/unit/overlays_hdfs/kustomization.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- ../../../../main/kustomize/overlays/hdfs
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kustomize_into_tmpdir.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kustomize_into_tmpdir.sh
new file mode 100755
index 0000000000..7e5cef1048
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kustomize_into_tmpdir.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Test runner using KUTTL against a target cluster.
+# https://kuttl.dev
+# https://kind.sigs.k8s.io
+#
+# Materialize a kustomize directory for a kuttl test.
+#
+# Kustomize is clunky for automated testing. It's pretty opinionated in that it will only evaluate
+# a directory off of disk -- you cannot generate a kustomization and pass it in via stdin.
+# In order to use kuttl generated namespaces within the kustomization, we have to modify the
+# kustomization.yaml before applying it. If we modify that file in the source tree, we end up with
+# the test namespace appended to the file under source control. So, this script creates a temp
+# directory, copies all the resources into that directory, and modifies the kustomization.yaml as
+# necessary. It then runs `kubectl apply -k` against that temporary directory.
+#
+
+declare DEBUG="${DEBUG:false}"
+if [ "${DEBUG}" = 'true' ] ; then
+ set -x
+fi
+
+set -eou pipefail
+
+declare NAMESPACE
+declare NEW_RESOURCES='[]'
+declare NEW_COMPONENTS='[]'
+declare kustomize_dir
+declare -a rewritten_resources=()
+declare -a rewritten_components=()
+
+kustomize_dir="$(mktemp -d -p /tmp "${NAMESPACE}.XXXXXXXXXX")"
+trap '[ -d "${kustomize_dir}" ] && rm -rf "${kustomize_dir}"' EXIT
+
+cp -r ./* "${kustomize_dir}/"
+
+for r in $(yq '.resources[]' kustomization.yaml) ; do
+ if [[ "${r}" =~ ^\.\./.* ]] ; then
+ # resolve the new relative location for any resource path that is not in the local directory
+ canonized="$(cd "${r}" ; pwd)"
+ r="../..${canonized}"
+ fi
+ rewritten_resources+=("'${r}'")
+done
+if [ "${#rewritten_resources[@]}" -gt 0 ] ; then
+ NEW_RESOURCES="[ $(printf '%s,' "${rewritten_resources[@]}") ]"
+fi
+
+for r in $(yq '.components[]' kustomization.yaml) ; do
+ if [[ "${r}" =~ ^\.\./.* ]] ; then
+ # resolve the new relative location for any resource path that is not in the local directory
+ canonized="$(cd "${r}" ; pwd)"
+ r="../..${canonized}"
+ fi
+ rewritten_components+=("'${r}'")
+done
+if [ "${#rewritten_components[@]}" -gt 0 ] ; then
+ NEW_COMPONENTS="[ $(printf '%s,' "${rewritten_components[@]}") ]"
+fi
+
+env NAMESPACE="${NAMESPACE}" \
+ NEW_RESOURCES="${NEW_RESOURCES}" \
+ NEW_COMPONENTS="${NEW_COMPONENTS}" \
+ yq -i '
+ .namespace = strenv(NAMESPACE) |
+ .resources = env(NEW_RESOURCES) |
+ .components = env(NEW_COMPONENTS)
+' "${kustomize_dir}/kustomization.yaml"
+
+if [ "${DEBUG}" = 'true' ] ; then
+ cat "${kustomize_dir}/kustomization.yaml"
+fi
+
+kubectl apply -k "${kustomize_dir}"
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kuttl-test-integration.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kuttl-test-integration.yaml
new file mode 100644
index 0000000000..3329cdf10d
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kuttl-test-integration.yaml
@@ -0,0 +1,37 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Test runner using KUTTL against a target cluster.
+# https://kuttl.dev
+# https://kind.sigs.k8s.io
+#
+# Test runner using KUTTL against a target cluster.
+# https://kuttl.dev
+# https://kind.sigs.k8s.io
+---
+# Does not use Kuttl's built-in KIND support -- it doesn't quite work correctly with a VM-based
+# (Docker Desktop) style of runtime. Instead, assumes the cluster is established outside of kuttl
+# and configuration is provided via `--env`.
+apiVersion: kuttl.dev/v1beta1
+kind: TestSuite
+testDirs:
+- ./src/test/kustomize/integration
+timeout:
+ # these tests allocate several pods with dependencies between them, allow some time for
+ # everything to launch and settle.
+ 300
+reportName: TEST-kuttl-report-integration
+reportFormat: xml
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kuttl-test-unit.yaml b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kuttl-test-unit.yaml
new file mode 100644
index 0000000000..d87b2d6691
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/kuttl-test-unit.yaml
@@ -0,0 +1,28 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Test runner using KUTTL against a target cluster.
+# https://kuttl.dev
+# https://kind.sigs.k8s.io
+# Test runner using https://kuttl.dev
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestSuite
+startControlPlane: true
+testDirs:
+- ./src/test/kustomize/unit
+reportName: TEST-kuttl-report-unit
+reportFormat: xml
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/mvn_exec_run_kuttl.sh b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/mvn_exec_run_kuttl.sh
new file mode 100755
index 0000000000..e544d65fac
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-kustomize/src/test/resources/mvn_exec_run_kuttl.sh
@@ -0,0 +1,37 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Wrap up the complexities of launching `kubectl kuttl test` via docker container.
+
+set -euox pipefail
+
+declare default_run_args
+default_run_args="--rm --mount type=bind,source=$(pwd),target=/workspace --workdir /workspace"
+
+# from the calling environment
+declare DOCKER_EXE="${DOCKER_EXE:-"$(command -v docker 2>/dev/null)"}"
+declare DOCKER_CONTAINER_RUN_ADDITIONAL_ARGS="${DOCKER_CONTAINER_RUN_ADDITIONAL_ARGS:-"${default_run_args}"}"
+declare USER="${USER:-apache}"
+declare KUTTL_IMAGE="${KUTTL_IMAGE:-"${USER}/hbase/operator-tools/kuttl:latest"}"
+
+declare run_args
+read -r -a run_args <<< "$DOCKER_CONTAINER_RUN_ADDITIONAL_ARGS"
+
+exec "${DOCKER_EXE}" container run \
+ "${run_args[@]}" \
+ "${KUTTL_IMAGE}" \
+ "$@"
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/README.md b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/README.md
new file mode 100644
index 0000000000..1dffc2a591
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/README.md
@@ -0,0 +1,70 @@
+
+
+# hbase-kubernetes-testing-image
+
+This directory builds a docker image containing everything required to run `kubectl-kuttl` in
+"mocked control plane" mode. This image is used as the basis for both dev and test environments.
+
+## Build
+
+Building the docker image locally is a little picky because there's lots of input arguments. These
+are managed via the [docker-bake.override.hcl](./src/main/docker/docker-bake.override.hcl).
+
+Start by creating a buildx context that supports (optionally) multi-platform images. If you've
+created this context previously, it's enough to ensure that it's active via `docker buildx ls`.
+
+```shell
+$ docker buildx create \
+ --driver docker-container \
+ --platform linux/amd64,linux/arm64 \
+ --use \
+ --bootstrap
+```
+
+Finally, build the image using `maven package`, or manually, using,
+
+```shell
+$ docker buildx bake \
+ --file src/main/docker/docker-bake.hcl \
+ --file src/main/docker/docker-bake.override.hcl \
+ --pull \
+ --load
+```
+
+This exports an image to your local repository that is tagged as `${USER}/hbase/operator-tools/kuttl:latest`.
+
+## Usage
+
+The image is configured with `kuttle` as the entrypoint.
+
+```shell
+$ docker container run --rm -it ${USER}/hbase/operator-tools/kuttl:latest --help
+
+```
+
+Running tests in the image requires mounting the workspace into the container image and passing
+appropriate parameters to `kuttl`. For example, run the "unit" tests like this:
+
+```shell
+$ docker container run \
+ --mount type=bind,source=$(pwd),target=/workspace \
+ --workdir /workspace \
+ ${USER}/hbase/operator-tools/kuttl:latest \
+ --config tests/kuttl-test-unit.yaml
+```
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/pom.xml b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/pom.xml
new file mode 100644
index 0000000000..f6b7c0e65a
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/pom.xml
@@ -0,0 +1,126 @@
+
+
+
+ 4.0.0
+
+ hbase-kubernetes-deployment
+ org.apache.hbase.operator.tools
+ ${revision}
+ ..
+
+
+ hbase-kubernetes-testing-image
+ Apache HBase - Kubernetes Testing Image
+ A container image to facilitate testing of Kubernetes Deployment.
+ pom
+
+
+ linux/amd64
+
+
+
+
+
+
+
+ org.codehaus.mojo
+ exec-maven-plugin
+
+ true
+
+
+
+ docker-buildx-bake-print
+
+ exec
+
+ package
+
+ /usr/bin/env
+
+ sh
+ -c
+
+ 2>&1 \
+ docker buildx bake \
+ --print \
+ --file src/main/docker/docker-bake.hcl \
+ --file src/main/docker/docker-bake.override.hcl
+
+
+
+
+
+ docker-buildx-bake
+
+ exec
+
+ package
+
+ /usr/bin/env
+
+ sh
+ -c
+
+ 2>&1 \
+ docker buildx bake \
+ --progress plain \
+ --pull \
+ --load \
+ --set *.platform=${container_image.platforms} \
+ --file src/main/docker/docker-bake.hcl \
+ --file src/main/docker/docker-bake.override.hcl
+
+
+
+
+
+
+
+
+
+
+
+ amd64
+
+
+ x86_64
+
+
+
+ linux/amd64
+
+
+
+ arm64
+
+
+ aarch64
+
+
+
+ linux/arm64
+
+
+
+
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/Dockerfile b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/Dockerfile
new file mode 100644
index 0000000000..847724c09e
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/Dockerfile
@@ -0,0 +1,161 @@
+# syntax=docker/dockerfile:1.4
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# hadolint global ignore=DL3020
+
+ARG IMG_BASE='golang'
+ARG IMG_BASE_TAG='1.19-alpine'
+ARG BASE_IMG_ALPINE='alpine'
+ARG BASE_IMG_TAG_ALPINE='latest'
+ARG YQ_IMG='mikefarah/yq'
+ARG YQ_IMG_TAG='latest'
+
+FROM ${BASE_IMG_ALPINE}:${BASE_IMG_TAG_ALPINE} as alpine
+RUN apk add --no-cache bash~=5
+
+FROM ${YQ_IMG}:${YQ_IMG_TAG} as yq
+
+## -- Stages kubectl_${TARGETARCH} --
+# Define stages that facilitate bringing in platform-specific binaries.
+FROM alpine as kubectl_amd64
+ARG KUBECTL_SHA_AMD64_URL
+ARG KUBECTL_SHA_AMD64
+ARG KUBECTL_BIN_AMD64_URL
+ARG KUBECTL_BIN_AMD64
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+WORKDIR /tmp
+ADD --link ${KUBECTL_SHA_AMD64_URL} /tmp/
+ADD --link ${KUBECTL_BIN_AMD64_URL} /tmp/
+RUN echo "$(cat "${KUBECTL_SHA_AMD64}") ${KUBECTL_BIN_AMD64}" | sha512sum -c
+ENV KUBECTL_BIN "${KUBECTL_BIN_AMD64}"
+
+FROM alpine as kubectl_arm64
+ARG KUBECTL_SHA_ARM64_URL
+ARG KUBECTL_SHA_ARM64
+ARG KUBECTL_BIN_ARM64_URL
+ARG KUBECTL_BIN_ARM64
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+WORKDIR /tmp
+ADD --link ${KUBECTL_SHA_ARM64_URL} /tmp/
+ADD --link ${KUBECTL_BIN_ARM64_URL} /tmp/
+RUN echo "$(cat "${KUBECTL_SHA_ARM64}") ${KUBECTL_BIN_ARM64}" | sha512sum -c
+ENV KUBECTL_BIN "${KUBECTL_BIN_ARM64}"
+
+ARG TARGETARCH
+# hadolint ignore=DL3006
+FROM kubectl_${TARGETARCH} as kubectl
+RUN mv "/tmp/${KUBECTL_BIN}" /tmp/kubectl \
+ && chmod a+x /tmp/kubectl
+
+## -- Stages kuttl_${TARGETARCH} --
+# Define stages that facilitate bringing in platform-specific binaries.
+FROM alpine as kuttl_amd64
+ARG KUTTL_CHECKSUMS_URL
+ARG KUTTL_CHECKSUMS
+ARG KUTTL_BIN_AMD64_URL
+ARG KUTTL_BIN_AMD64
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+WORKDIR /tmp
+ADD --link ${KUTTL_CHECKSUMS_URL} /tmp/
+ADD --link ${KUTTL_BIN_AMD64_URL} /tmp/
+RUN sha256sum -c <(grep "${KUTTL_BIN_AMD64}" "${KUTTL_CHECKSUMS}")
+ENV KUTTL_BIN "${KUTTL_BIN_AMD64}"
+
+FROM alpine as kuttl_arm64
+ARG KUTTL_CHECKSUMS_URL
+ARG KUTTL_CHECKSUMS
+ARG KUTTL_BIN_ARM64_URL
+ARG KUTTL_BIN_ARM64
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+WORKDIR /tmp
+ADD --link ${KUTTL_CHECKSUMS_URL} /tmp/
+ADD --link ${KUTTL_BIN_ARM64_URL} /tmp/
+RUN sha256sum -c <(grep "${KUTTL_BIN_ARM64}" "${KUTTL_CHECKSUMS}")
+ENV KUTTL_BIN "${KUTTL_BIN_ARM64}"
+
+ARG TARGETARCH
+# hadolint ignore=DL3006
+FROM kuttl_${TARGETARCH} as kuttl
+RUN mv "/tmp/${KUTTL_BIN}" /tmp/kubectl-kuttl \
+ && chmod a+x /tmp/kubectl-kuttl
+
+## -- Stages kustomize_${TARGETARCH} --
+# Define stages that facilitate bringing in platform-specific binaries.
+FROM alpine as kustomize_amd64
+ARG KUSTOMIZE_CHECKSUMS_URL
+ARG KUSTOMIZE_CHECKSUMS
+ARG KUSTOMIZE_BIN_AMD64_TGZ_URL
+ARG KUSTOMIZE_BIN_AMD64_TGZ
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+WORKDIR /tmp
+ADD --link ${KUSTOMIZE_CHECKSUMS_URL} /tmp/
+ADD --link ${KUSTOMIZE_BIN_AMD64_TGZ_URL} /tmp/
+RUN sha256sum -c <(grep "${KUSTOMIZE_BIN_AMD64_TGZ}" "${KUSTOMIZE_CHECKSUMS}")
+ENV KUSTOMIZE_BIN_TGZ "${KUSTOMIZE_BIN_AMD64_TGZ}"
+
+FROM alpine as kustomize_arm64
+ARG KUSTOMIZE_CHECKSUMS_URL
+ARG KUSTOMIZE_CHECKSUMS
+ARG KUSTOMIZE_BIN_ARM64_TGZ_URL
+ARG KUSTOMIZE_BIN_ARM64_TGZ
+SHELL ["/bin/bash", "-o", "pipefail", "-c"]
+WORKDIR /tmp
+ADD --link ${KUSTOMIZE_CHECKSUMS_URL} /tmp/
+ADD --link ${KUSTOMIZE_BIN_ARM64_TGZ_URL} /tmp/
+RUN sha256sum -c <(grep "${KUSTOMIZE_BIN_ARM64_TGZ}" "${KUSTOMIZE_CHECKSUMS}")
+ENV KUSTOMIZE_BIN_TGZ "${KUSTOMIZE_BIN_ARM64_TGZ}"
+
+ARG TARGETARCH
+# hadolint ignore=DL3006
+FROM kustomize_${TARGETARCH} as kustomize
+RUN tar xzf "/tmp/${KUSTOMIZE_BIN_TGZ}" \
+ && chmod a+x /tmp/kustomize
+
+FROM ${IMG_BASE}:${IMG_BASE_TAG} as final
+ARG IMG_BASE
+ARG IMG_BASE_TAG
+
+COPY --from=yq /usr/bin/yq /usr/bin/yq
+COPY --from=kubectl /tmp/kubectl /usr/local/bin/
+COPY --from=kuttl /tmp/kubectl-kuttl /usr/local/bin/
+COPY --from=kustomize /tmp/kustomize /usr/local/bin/
+COPY src/main/docker/entrypoint.sh /bin/
+
+# nonroot user as defined in https://github.com/GoogleContainerTools/distroless
+ENV NON_ROOT_USER=nonroot
+ENV NON_ROOT_USER_ID=65532
+ENV NON_ROOT_USER_HOME=/home/nonroot
+
+# hadolint ignore=DL3018
+RUN apk add --update --no-cache \
+ bash~=5 \
+ docker-cli \
+ && adduser -D -u "${NON_ROOT_USER_ID}" -h "${NON_ROOT_USER_HOME}" "${NON_ROOT_USER}"
+
+# replicate the test-related bits generated by `kubebuilder` into its Makefile.
+ENV GOBIN="${GOPATH}/bin"
+ENV ENVTEST_K8S_VERSION='1.23.x'
+RUN chmod a+x /bin/entrypoint.sh \
+ && go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
+WORKDIR "${NON_ROOT_USER_HOME}"
+USER "${NON_ROOT_USER}"
+RUN "${GOBIN}/setup-envtest" use "${ENVTEST_K8S_VERSION}"
+# disable downloading remote content henceforth
+ENV ENVTEST_INSTALLED_ONLY=true
+
+ENTRYPOINT ["/bin/entrypoint.sh"]
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/docker-bake.hcl b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/docker-bake.hcl
new file mode 100644
index 0000000000..54c4d83017
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/docker-bake.hcl
@@ -0,0 +1,88 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# A convenience script for build the kuttl image.
+# See hbase-kubernetes-deployment/dockerfiles/kuttl/README.md
+#
+
+# input variables
+variable KUBECTL_SHA_AMD64_URL {}
+variable KUBECTL_SHA_AMD64 {}
+variable KUBECTL_BIN_AMD64_URL {}
+variable KUBECTL_BIN_AMD64 {}
+variable KUBECTL_SHA_ARM64_URL {}
+variable KUBECTL_SHA_ARM64 {}
+variable KUBECTL_BIN_ARM64_URL {}
+variable KUBECTL_BIN_ARM64 {}
+variable KUTTL_CHECKSUMS_URL {}
+variable KUTTL_CHECKSUMS {}
+variable KUTTL_BIN_AMD64_URL {}
+variable KUTTL_BIN_AMD64 {}
+variable KUTTL_BIN_ARM64_URL {}
+variable KUTTL_BIN_ARM64 {}
+variable KUSTOMIZE_CHECKSUMS_URL {}
+variable KUSTOMIZE_CHECKSUMS {}
+variable KUSTOMIZE_BIN_AMD64_TGZ_URL {}
+variable KUSTOMIZE_BIN_AMD64_TGZ {}
+variable KUSTOMIZE_BIN_ARM64_TGZ_URL {}
+variable KUSTOMIZE_BIN_ARM64_TGZ {}
+
+# output variables
+variable USER {
+ default = "apache"
+}
+variable IMAGE_TAG {
+ default = "latest"
+}
+variable IMAGE_NAME {
+ default = "${USER}/hbase/operator-tools/kuttl"
+}
+
+group default {
+ targets = [ "kuttl" ]
+}
+
+target kuttl {
+ dockerfile = "src/main/docker/Dockerfile"
+ args = {
+ KUBECTL_SHA_AMD64_URL = KUBECTL_SHA_AMD64_URL
+ KUBECTL_SHA_AMD64 = KUBECTL_SHA_AMD64
+ KUBECTL_BIN_AMD64_URL = KUBECTL_BIN_AMD64_URL
+ KUBECTL_BIN_AMD64 = KUBECTL_BIN_AMD64
+ KUBECTL_SHA_ARM64_URL = KUBECTL_SHA_ARM64_URL
+ KUBECTL_SHA_ARM64 = KUBECTL_SHA_ARM64
+ KUBECTL_BIN_ARM64_URL = KUBECTL_BIN_ARM64_URL
+ KUBECTL_BIN_ARM64 = KUBECTL_BIN_ARM64
+ KUTTL_CHECKSUMS_URL = KUTTL_CHECKSUMS_URL
+ KUTTL_CHECKSUMS = KUTTL_CHECKSUMS
+ KUTTL_BIN_AMD64_URL = KUTTL_BIN_AMD64_URL
+ KUTTL_BIN_AMD64 = KUTTL_BIN_AMD64
+ KUTTL_BIN_ARM64_URL = KUTTL_BIN_ARM64_URL
+ KUTTL_BIN_ARM64 = KUTTL_BIN_ARM64
+ KUSTOMIZE_CHECKSUMS_URL = KUSTOMIZE_CHECKSUMS_URL
+ KUSTOMIZE_CHECKSUMS = KUSTOMIZE_CHECKSUMS
+ KUSTOMIZE_BIN_AMD64_TGZ_URL = KUSTOMIZE_BIN_AMD64_TGZ_URL
+ KUSTOMIZE_BIN_AMD64_TGZ = KUSTOMIZE_BIN_AMD64_TGZ
+ KUSTOMIZE_BIN_ARM64_TGZ_URL = KUSTOMIZE_BIN_ARM64_TGZ_URL
+ KUSTOMIZE_BIN_ARM64_TGZ = KUSTOMIZE_BIN_ARM64_TGZ
+ }
+ target = "final"
+ platforms = [
+ "linux/amd64",
+ "linux/arm64"
+ ]
+ tags = [ "${IMAGE_NAME}:${IMAGE_TAG}" ]
+}
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/docker-bake.override.hcl b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/docker-bake.override.hcl
new file mode 100644
index 0000000000..fa33354830
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/docker-bake.override.hcl
@@ -0,0 +1,115 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Externalize default values of build parameters and document how to retrieve them.
+#
+
+function "basename" {
+ params = [a]
+ result = split("/", a)[length(split("/", a)) - 1]
+}
+
+variable KUBECTL_VERSION {
+ default = "1.24.10"
+}
+
+variable KUBECTL_SHA_AMD64_URL {
+ default = "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl.sha512"
+}
+
+variable KUBECTL_SHA_AMD64 {
+ default = "${basename(KUBECTL_SHA_AMD64_URL)}"
+}
+
+variable KUBECTL_BIN_AMD64_URL {
+ default = "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/amd64/kubectl"
+}
+
+variable KUBECTL_BIN_AMD64 {
+ default = "${basename(KUBECTL_BIN_AMD64_URL)}"
+}
+
+variable KUBECTL_SHA_ARM64_URL {
+ default = "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/arm64/kubectl.sha512"
+}
+
+variable KUBECTL_SHA_ARM64 {
+ default = "${basename(KUBECTL_SHA_ARM64_URL)}"
+}
+
+variable KUBECTL_BIN_ARM64_URL {
+ default = "https://dl.k8s.io/release/v${KUBECTL_VERSION}/bin/linux/arm64/kubectl"
+}
+
+variable KUBECTL_BIN_ARM64 {
+ default = "${basename(KUBECTL_BIN_ARM64_URL)}"
+}
+
+variable KUTTL_VERSION {
+ default = "0.15.0"
+}
+
+variable KUTTL_CHECKSUMS_URL {
+ default = "https://github.com/kudobuilder/kuttl/releases/download/v${KUTTL_VERSION}/checksums.txt"
+}
+
+variable KUTTL_CHECKSUMS {
+ default = "${basename(KUTTL_CHECKSUMS_URL)}"
+}
+
+variable KUTTL_BIN_AMD64_URL {
+ default = "https://github.com/kudobuilder/kuttl/releases/download/v${KUTTL_VERSION}/kubectl-kuttl_${KUTTL_VERSION}_linux_x86_64"
+}
+
+variable KUTTL_BIN_AMD64 {
+ default = "${basename(KUTTL_BIN_AMD64_URL)}"
+}
+
+variable KUTTL_BIN_ARM64_URL {
+ default = "https://github.com/kudobuilder/kuttl/releases/download/v${KUTTL_VERSION}/kubectl-kuttl_${KUTTL_VERSION}_linux_arm64"
+}
+
+variable KUTTL_BIN_ARM64 {
+ default = "${basename(KUTTL_BIN_ARM64_URL)}"
+}
+
+variable KUSTOMIZE_VERSION {
+ default = "4.5.4"
+}
+
+variable KUSTOMIZE_CHECKSUMS_URL {
+ default = "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv${KUSTOMIZE_VERSION}/checksums.txt"
+}
+
+variable KUSTOMIZE_CHECKSUMS {
+ default = "${basename(KUSTOMIZE_CHECKSUMS_URL)}"
+}
+
+variable KUSTOMIZE_BIN_AMD64_TGZ_URL {
+ default = "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv${KUSTOMIZE_VERSION}/kustomize_v${KUSTOMIZE_VERSION}_linux_amd64.tar.gz"
+}
+
+variable KUSTOMIZE_BIN_AMD64_TGZ {
+ default = "${basename(KUSTOMIZE_BIN_AMD64_TGZ_URL)}"
+}
+
+variable KUSTOMIZE_BIN_ARM64_TGZ_URL {
+ default = "https://github.com/kubernetes-sigs/kustomize/releases/download/kustomize%2Fv${KUSTOMIZE_VERSION}/kustomize_v${KUSTOMIZE_VERSION}_linux_arm64.tar.gz"
+}
+
+variable KUSTOMIZE_BIN_ARM64_TGZ {
+ default = "${basename(KUSTOMIZE_BIN_ARM64_TGZ_URL)}"
+}
diff --git a/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/entrypoint.sh b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/entrypoint.sh
new file mode 100644
index 0000000000..8436e82600
--- /dev/null
+++ b/hbase-kubernetes-deployment/hbase-kubernetes-testing-image/src/main/docker/entrypoint.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set -e
+set -o pipefail
+set -x
+
+declare ENVTEST_K8S_VERSION
+
+declare KUBEBUILDER_ASSETS
+KUBEBUILDER_ASSETS="$(setup-envtest use -i "${ENVTEST_K8S_VERSION}" -p path)"
+export KUBEBUILDER_ASSETS
+
+/usr/local/bin/kubectl kuttl test "$@"
diff --git a/hbase-kubernetes-deployment/overlays/hbase/kustomization.yaml b/hbase-kubernetes-deployment/overlays/hbase/kustomization.yaml
new file mode 100644
index 0000000000..e91ad8a6bb
--- /dev/null
+++ b/hbase-kubernetes-deployment/overlays/hbase/kustomization.yaml
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- ../hdfs
+
+components:
+- ../../components/zookeeper/single-instance
+- ../../components/hbase
diff --git a/hbase-kubernetes-deployment/pom.xml b/hbase-kubernetes-deployment/pom.xml
index f0f8053dd8..2bfe45d012 100644
--- a/hbase-kubernetes-deployment/pom.xml
+++ b/hbase-kubernetes-deployment/pom.xml
@@ -2,40 +2,46 @@
-
- 4.0.0
-
- hbase-operator-tools
- org.apache.hbase.operator.tools
- ${revision}
- ..
-
+
+ 4.0.0
+
+ hbase-operator-tools
+ org.apache.hbase.operator.tools
+ ${revision}
+ ..
+
- hbase-kubernetes-deployment
- Apache HBase - Kubernetes Deployment
- Resource definitions for deploying HBase on Kubernetes.
- pom
+ hbase-kubernetes-deployment
+ Apache HBase - Kubernetes Deployment
+ Resource definitions for deploying HBase on Kubernetes.
+ pom
-
+
+ hbase-kubernetes-hadoop-image
+ hbase-kubernetes-kustomize
+ hbase-kubernetes-testing-image
+
-
-
+
+
+
+
diff --git a/hbase-kubernetes-deployment/tests/integration/overlays_hbase/00-assert-hbase.yaml b/hbase-kubernetes-deployment/tests/integration/overlays_hbase/00-assert-hbase.yaml
new file mode 100644
index 0000000000..62e4aabbf2
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/overlays_hbase/00-assert-hbase.yaml
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Asserts on the HBase portion of the deployment.
+#
+---
+# assert that there is a `StatefulSet` named "master" that has one live instance
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: master
+status:
+ availableReplicas: 1
+---
+# assert that there is a `StatefulSet` named "regionserver" that has one live instance
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: regionserver
+status:
+ availableReplicas: 1
diff --git a/hbase-kubernetes-deployment/tests/integration/overlays_hbase/00-kustomize.yaml b/hbase-kubernetes-deployment/tests/integration/overlays_hbase/00-kustomize.yaml
new file mode 100644
index 0000000000..b365471f5f
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/overlays_hbase/00-kustomize.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+
+commands:
+- script: ../../bin/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/tests/integration/overlays_hbase/kustomization.yaml b/hbase-kubernetes-deployment/tests/integration/overlays_hbase/kustomization.yaml
new file mode 100644
index 0000000000..115caa7495
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/integration/overlays_hbase/kustomization.yaml
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+- ../test_base
+- ../../../overlays/hbase
diff --git a/hbase-kubernetes-deployment/tests/unit/overlays_hbase/00-assert-hbase.yaml b/hbase-kubernetes-deployment/tests/unit/overlays_hbase/00-assert-hbase.yaml
new file mode 100644
index 0000000000..27e809dcd7
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/overlays_hbase/00-assert-hbase.yaml
@@ -0,0 +1,72 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Asserts on the HBase portion of the deployment.
+#
+---
+# assert that there is a `ConfigMap` named "hbase-configuration-XXX"
+# TODO: kuttl does not support generated names
+#apiVersion: v1
+#kind: ConfigMap
+#metadata:
+# name: hbase-configuration-c94h8k249d
+---
+# assert that there is a `ConfigMap` named "hbck2-configuration-XXX"
+# TODO: kuttl does not support generated names
+#apiVersion: v1
+#kind: ConfigMap
+#metadata:
+# name: hbck2-configuration-c94h8k249d
+---
+# assert that there is a `StatefulSet` named "master" that it provides pods labeled role:master
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: master
+spec:
+ template:
+ metadata:
+ labels:
+ role: master
+---
+# assert that there is a `Service` named "master" pointing to pods labeled role:master
+apiVersion: v1
+kind: Service
+metadata:
+ name: master
+spec:
+ selector:
+ role: master
+---
+# assert that there is a `StatefulSet` named "regionserver" that it provides pods labeled role:regionserver
+apiVersion: apps/v1
+kind: StatefulSet
+metadata:
+ name: regionserver
+spec:
+ template:
+ metadata:
+ labels:
+ role: regionserver
+---
+# assert that there is a `Service` named "regionserver" pointing to pods labeled role:regionserver
+apiVersion: v1
+kind: Service
+metadata:
+ name: regionserver
+spec:
+ selector:
+ role: regionserver
diff --git a/hbase-kubernetes-deployment/tests/unit/overlays_hbase/00-kustomize.yaml b/hbase-kubernetes-deployment/tests/unit/overlays_hbase/00-kustomize.yaml
new file mode 100644
index 0000000000..b365471f5f
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/overlays_hbase/00-kustomize.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+
+commands:
+- script: ../../bin/kustomize_into_tmpdir.sh
diff --git a/hbase-kubernetes-deployment/tests/unit/overlays_hbase/kustomization.yaml b/hbase-kubernetes-deployment/tests/unit/overlays_hbase/kustomization.yaml
new file mode 100644
index 0000000000..7f33c58571
--- /dev/null
+++ b/hbase-kubernetes-deployment/tests/unit/overlays_hbase/kustomization.yaml
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+---
+apiVersion: kustomize.config.k8s.io/v1beta1
+kind: Kustomization
+
+resources:
+ - ../../../overlays/hbase
diff --git a/pom.xml b/pom.xml
index 3bfeff5b07..392d4e242a 100644
--- a/pom.xml
+++ b/pom.xml
@@ -163,6 +163,12 @@
hbase-tools
${project.version}
+
+ org.apache.hbase.operator.tools
+ hbase-kubernetes-testing-image
+ pom
+ ${project.version}
+
@@ -322,6 +328,11 @@
true
+
+ org.codehaus.mojo
+ exec-maven-plugin
+ 3.1.0
+
@@ -429,6 +440,13 @@
+
+
+ kr.motd.maven
+ os-maven-plugin
+ 1.7.1
+
+