-
-
Notifications
You must be signed in to change notification settings - Fork 6
Expand file tree
/
Copy pathDockerfile
More file actions
187 lines (150 loc) · 9.23 KB
/
Dockerfile
File metadata and controls
187 lines (150 loc) · 9.23 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
# syntax=docker/dockerfile:1.20.0@sha256:26147acbda4f14c5add9946e2fd2ed543fc402884fd75146bd342a7f6271dc1d
# check=error=true
FROM local-image/hadoop/hadoop AS hadoop-builder
FROM local-image/hbase/hbase AS hbase-builder
FROM local-image/hbase/phoenix AS phoenix
FROM local-image/hbase/hbase-operator-tools AS hbase-operator-tools
FROM local-image/hbase/hbase-opa-authorizer AS hbase-opa-authorizer
# Splitting this out into its own builder so that Hadoop & HBase can be built in parallel
# envsubst is only available in java-devel which is why we don't just do this in the final image
FROM local-image/java-devel AS hadoop-s3-builder
ARG PRODUCT_VERSION
ARG RELEASE_VERSION
ARG HADOOP_HADOOP_VERSION
# Reassign the arg to `HADOOP_VERSION` for better readability.
ENV HADOOP_VERSION=${HADOOP_HADOOP_VERSION}
ARG HBASE_HBASE_VERSION
# Reassign the arg to `HBASE_VERSION` for better readability.
# It is passed as `HBASE_HBASE_VERSION`, because boil-config.toml has to contain `hbase/hbase` to establish a dependency on the HBase builder.
# The value of `hbase/hbase` is transformed by `bake` and automatically passed as `HBASE_HBASE_VERSION` arg.
ENV HBASE_VERSION=${HBASE_HBASE_VERSION}
ARG STACKABLE_USER_UID
USER ${STACKABLE_USER_UID}
WORKDIR /stackable
COPY --from=hadoop-builder --chown=${STACKABLE_USER_UID}:0 \
# The artifact name of the AWS bundle has changed between Haddop 3.3.6 and 3.4.1
# from aws-java-sdk-bundle-*.jar to bundle-*.jar.
# See: https://hadoop.apache.org/docs/stable/hadoop-aws/tools/hadoop-aws/aws_sdk_upgrade.html
# So we try to copy both and if one of them doesn't exist buildx will just ignore it :)
/stackable/hadoop/share/hadoop/tools/lib/bundle-*.jar \
/stackable/hadoop/share/hadoop/tools/lib/aws-java-sdk-bundle-*.jar \
/stackable/hadoop/share/hadoop/tools/lib/hadoop-aws-${HADOOP_VERSION}-stackable${RELEASE_VERSION}.jar \
/stackable/hadoop/share/hadoop/tools/lib/
COPY --chown=${STACKABLE_USER_UID}:0 hbase/hbase/stackable/bin/export-snapshot-to-s3.env /stackable/bin/
RUN <<EOF
# Resolve paths in bin/export-snapshot-to-s3
export LIBS=$(find /stackable/hadoop/share/hadoop -name '*.jar' -printf '%p:' | sed 's/:$//')
# The variable names are intentionally passed to envsubst in single-quotes,
# so that they are not expanded. Disabling ShellCheck rules in a Dockerfile
# does not work, so please ignore the according warning (SC2016).
envsubst '${HBASE_VERSION}:${RELEASE_VERSION}:${LIBS}' < /stackable/bin/export-snapshot-to-s3.env > /stackable/bin/export-snapshot-to-s3
chmod +x /stackable/bin/export-snapshot-to-s3
rm /stackable/bin/export-snapshot-to-s3.env
# set correct groups
chmod --recursive g=u /stackable
EOF
# Final Image
FROM local-image/java-base AS final
ARG PRODUCT_VERSION
ARG RELEASE_VERSION
ARG HADOOP_HADOOP_VERSION
# Reassign the arg to `HADOOP_VERSION` for better readability.
ENV HADOOP_VERSION=${HADOOP_HADOOP_VERSION}
ARG HBASE_PROFILE
ARG HBASE_HBASE_VERSION
# Reassign the arg to `HBASE_VERSION` for better readability.
ENV HBASE_VERSION=${HBASE_HBASE_VERSION}
ARG HBASE_HBASE_OPERATOR_TOOLS_VERSION
ARG HBASE_HBASE_OPA_AUTHORIZER_VERSION
ARG HBASE_PHOENIX_VERSION
ARG STACKABLE_USER_UID
ARG NAME="Apache HBase"
ARG DESCRIPTION="This image is deployed by the Stackable Operator for Apache HBase"
LABEL name="${NAME}"
LABEL version="${PRODUCT_VERSION}"
LABEL release="${RELEASE_VERSION}"
LABEL summary="The Stackable image for Apache HBase"
LABEL description="${DESCRIPTION}"
# https://github.com/opencontainers/image-spec/blob/036563a4a268d7c08b51a08f05a02a0fe74c7268/annotations.md#annotations
LABEL org.opencontainers.image.documentation="https://docs.stackable.tech/home/stable/hbase/"
LABEL org.opencontainers.image.version="${PRODUCT_VERSION}"
LABEL org.opencontainers.image.revision="${RELEASE_VERSION}"
LABEL org.opencontainers.image.title="${NAME}"
LABEL org.opencontainers.image.description="${DESCRIPTION}"
# https://docs.openshift.com/container-platform/4.16/openshift_images/create-images.html#defining-image-metadata
# https://github.com/projectatomic/ContainerApplicationGenericLabels/blob/master/vendor/redhat/labels.md
LABEL io.openshift.tags="ubi9,stackable,hbase,sdp,nosql"
LABEL io.k8s.description="${DESCRIPTION}"
LABEL io.k8s.display-name="${NAME}"
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE_VERSION} /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE_VERSION}/
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE_VERSION}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-builder /stackable/async-profiler /stackable/async-profiler/
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS_VERSION}-stackable${RELEASE_VERSION} /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS_VERSION}-stackable${RELEASE_VERSION}/
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS_VERSION}-stackable${RELEASE_VERSION}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/bin/hbck2 /stackable/bin/hbck2
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-operator-tools /stackable/bin/hbase-entrypoint.sh /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE_VERSION}/bin/hbase-entrypoint.sh
COPY --chown=${STACKABLE_USER_UID}:0 --from=phoenix /stackable/phoenix /stackable/phoenix/
COPY --chown=${STACKABLE_USER_UID}:0 --from=phoenix /stackable/phoenix-${HBASE_PHOENIX_VERSION}-stackable${RELEASE_VERSION}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-s3-builder /stackable/bin/export-snapshot-to-s3 /stackable/bin/export-snapshot-to-s3
COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-s3-builder /stackable/hadoop/share/hadoop/tools/lib/ /stackable/hadoop/share/hadoop/tools/lib/
# Copy the dependencies from Hadoop which are required for the Azure Data Lake
# Storage (ADLS) to /stackable/hbase-${HBASE_VERSION}/lib which is on the classpath.
# hadoop-azure-${HADOOP}.jar contains the AzureBlobFileSystem which is required
# by hadoop-common-${HADOOP}.jar if the scheme of a file system is "abfs://".
COPY --chown=${STACKABLE_USER_UID}:0 --from=hadoop-builder \
/stackable/hadoop/share/hadoop/tools/lib/hadoop-azure-${HADOOP_VERSION}-stackable${RELEASE_VERSION}.jar \
/stackable/hbase-${HBASE_VERSION}-stackable${RELEASE_VERSION}/lib/
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-opa-authorizer /stackable/hbase-opa-authorizer-${HBASE_HBASE_OPA_AUTHORIZER_VERSION}-src.tar.gz /stackable
COPY --chown=${STACKABLE_USER_UID}:0 --from=hbase-opa-authorizer /stackable/hbase-opa-authorizer/target/hbase-opa-authorizer*.jar /stackable/hbase-${HBASE_VERSION}-stackable${RELEASE_VERSION}/lib
RUN <<EOF
microdnf update
# The tar and python packages are required by the Phoenix command line.
# We add zip and gzip because tar without compression is seldom useful.
microdnf install \
gzip \
python \
python-pip \
tar \
zip
microdnf clean all
rpm -qa --qf "%{NAME}-%{VERSION}-%{RELEASE_VERSION}\n" | sort > /stackable/package_manifest.txt
chown ${STACKABLE_USER_UID}:0 /stackable/package_manifest.txt
chmod g=u /stackable/package_manifest.txt
rm -rf /var/cache/yum
ln --symbolic --logical --verbose "/stackable/hbase-${HBASE_VERSION}-stackable${RELEASE_VERSION}" /stackable/hbase
chown --no-dereference ${STACKABLE_USER_UID}:0 /stackable/hbase
chmod g=u /stackable/hbase
ln --symbolic --logical --verbose "/stackable/hbase-operator-tools-${HBASE_HBASE_OPERATOR_TOOLS_VERSION}-stackable${RELEASE_VERSION}" /stackable/hbase-operator-tools
chown --no-dereference ${STACKABLE_USER_UID}:0 /stackable/hbase-operator-tools
chmod g=u /stackable/hbase-operator-tools
ln --symbolic --logical --verbose "/stackable/phoenix/phoenix-server-hbase-${HBASE_PROFILE}.jar" "/stackable/hbase/lib/phoenix-server-hbase-${HBASE_PROFILE}.jar"
chown --no-dereference ${STACKABLE_USER_UID}:0 "/stackable/hbase/lib/phoenix-server-hbase-${HBASE_PROFILE}.jar"
chmod g=u "/stackable/hbase/lib/phoenix-server-hbase-${HBASE_PROFILE}.jar"
# fix missing permissions
chmod g=u /stackable/async-profiler
chmod g=u /stackable/bin
chmod g=u /stackable/phoenix
chmod g=u /stackable/*-src.tar.gz
# the whole directory tree /stackable/hadoop/share/hadoop/tools/lib/ must be adapted
find /stackable/hadoop -type d -exec chmod g=u {} +
EOF
# ----------------------------------------
# Checks
# This section is to run final checks to ensure the created final images
# adhere to several minimal requirements like:
# - check file permissions and ownerships
# ----------------------------------------
# Check that permissions and ownership in /stackable are set correctly
# This will fail and stop the build if any mismatches are found.
RUN <<EOF
/bin/check-permissions-ownership.sh /stackable ${STACKABLE_USER_UID} 0
EOF
# ----------------------------------------
# Attention: Do not perform any file based actions (copying/creating etc.) below this comment because the permissions would not be checked.
USER ${STACKABLE_USER_UID}
ENV HBASE_CONF_DIR=/stackable/hbase/conf
ENV HOME=/stackable
ENV PATH="${PATH}:/stackable/bin:/stackable/hbase/bin"
ENV ASYNC_PROFILER_HOME=/stackable/async-profiler
WORKDIR /stackable/hbase
CMD ["./bin/hbase-entrypoint", "master", "localhost", "16010" ]