Containers in a multiple container openshift pod can't communicate between themselves - openshift

I'm working on migrating our existing docker containers into openshift, and am running into an issue trying to get 2 of our containers into a single pod. We're using Spring Cloud Config Server for our services, with a Gitea backend. I'd like to have these in a single pod so that the java server and git server are always tied together.
I'm able to get to each of the containers individually via the associated routes, but the config server is unable to reach the git server, and vice versa. I get a 404 when the config server tries to clone the git repo. I've tried using gitea-${INSTANCE_IDENTIFIER} (INSTANCE_IDENTIFIER just being a generated value to tie all of the objects together at a glance), gitea-${INSTANCE_IDENTIFIER}.myproject.svc, and gitea-${INSTANCE_IDENTIFIER}.myproject.svc.cluster.local, as well as the full url for the route that gets created, and nothing works.
Here is my template, with a few things removed (...) for security:
apiVersion: v1
kind: Template
metadata:
name: configuration-template
annotations:
description: 'Configuration containers template'
iconClass: 'fa fa-gear'
tags: 'git, Spring Cloud Configuration'
objects:
- apiVersion: v1
kind: ConfigMap
metadata:
name: 'gitea-config-${INSTANCE_IDENTIFIER}'
labels:
app: 'configuration-${INSTANCE_IDENTIFIER}'
gitea: '${GITEA_VERSION}'
configuration: '${CONFIGURATION_VERSION}'
data:
local-docker.ini: |
APP_NAME = Git Server
RUN_USER = git
RUN_MODE = prod
[repository]
ROOT = /home/git/data/git/repositories
[repository.upload]
TEMP_PATH = /home/git/data/gitea/uploads
[server]
APP_DATA_PATH = /home/git/data/gitea
HTTP_PORT = 8443
DISABLE_SSH = true
SSH_PORT = 22
LFS_START_SERVER = false
OFFLINE_MODE = false
PROTOCOL = https
CERT_FILE = /var/run/secrets/service-cert/tls.crt
KEY_FILE = /var/run/secrets/service-cert/tls.key
REDIRECT_OTHER_PORT = true
PORT_TO_REDIRECT = 8080
[database]
PATH = /home/git/data/gitea/gitea.db
DB_TYPE = sqlite3
NAME = gitea
USER = gitea
PASSWD = XXXX
[session]
PROVIDER_CONFIG = /home/git/data/gitea/sessions
PROVIDER = file
[picture]
AVATAR_UPLOAD_PATH = /home/git/data/gitea/avatars
DISABLE_GRAVATAR = false
ENABLE_FEDERATED_AVATAR = false
[attachment]
PATH = /home/git/data/gitea/attachments
[log]
ROOT_PATH = /home/git/data/gitea/log
MODE = file
LEVEL = Info
[mailer]
ENABLED = false
[service]
REGISTER_EMAIL_CONFIRM = false
ENABLE_NOTIFY_MAIL = false
DISABLE_REGISTRATION = false
ENABLE_CAPTCHA = false
REQUIRE_SIGNIN_VIEW = false
DEFAULT_KEEP_EMAIL_PRIVATE = false
NO_REPLY_ADDRESS = noreply.example.org
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: 'gitea-${INSTANCE_IDENTIFIER}'
labels:
app: 'configuration-${INSTANCE_IDENTIFIER}'
gitea: '${GITEA_VERSION}'
configuration: '${CONFIGURATION_VERSION}'
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
- apiVersion: v1
kind: Route
metadata:
name: 'gitea-${INSTANCE_IDENTIFIER}'
labels:
app: 'configuration-${INSTANCE_IDENTIFIER}'
gitea: '${GITEA_VERSION}'
configuration: '${CONFIGURATION_VERSION}'
spec:
port:
targetPort: 'https'
tls:
termination: 'passthrough'
to:
kind: Service
name: 'gitea-${INSTANCE_IDENTIFIER}'
- apiVersion: v1
kind: Service
metadata:
name: 'gitea-${INSTANCE_IDENTIFIER}'
labels:
app: 'configuration-${INSTANCE_IDENTIFIER}'
gitea: '${GITEA_VERSION}'
configuration: '${CONFIGURATION_VERSION}'
annotations:
service.alpha.openshift.io/serving-cert-secret-name: 'gitea-certs-${INSTANCE_IDENTIFIER}'
spec:
type: ClusterIP
ports:
- name: 'https'
port: 443
targetPort: 8443
selector:
app: 'configuration-${INSTANCE_IDENTIFIER}'
- apiVersion: v1
kind: Route
metadata:
name: 'configuration-${INSTANCE_IDENTIFIER}'
labels:
app: 'configuration-${INSTANCE_IDENTIFIER}'
gitea: '${GITEA_VERSION}'
configuration: '${CONFIGURATION_VERSION}'
spec:
port:
targetPort: 'https'
tls:
termination: 'passthrough'
to:
kind: Service
name: 'configuration-${INSTANCE_IDENTIFIER}'
- apiVersion: v1
kind: Service
metadata:
name: 'configuration-${INSTANCE_IDENTIFIER}'
labels:
app: 'configuration-${INSTANCE_IDENTIFIER}'
gitea: '${GITEA_VERSION}'
configuration: '${CONFIGURATION_VERSION}'
annotations:
service.alpha.openshift.io/serving-cert-secret-name: 'configuration-certs-${INSTANCE_IDENTIFIER}'
spec:
type: ClusterIP
ports:
- name: 'https'
port: 443
targetPort: 8105
selector:
app: 'configuration-${INSTANCE_IDENTIFIER}'
- apiVersion: v1
kind: DeploymentConfig
metadata:
name: 'gitea-${INSTANCE_IDENTIFIER}'
labels:
app: 'configuration-${INSTANCE_IDENTIFIER}'
gitea: '${GITEA_VERSION}'
configuration: '${CONFIGURATION_VERSION}'
spec:
selector:
app: 'configuration-${INSTANCE_IDENTIFIER}'
replicas: 1
template:
metadata:
labels:
app: 'configuration-${INSTANCE_IDENTIFIER}'
gitea: '${GITEA_VERSION}'
spec:
initContainers:
- name: pem-to-keystore
image: nginx
env:
- name: keyfile
value: /var/run/secrets/openshift.io/services_serving_certs/tls.key
- name: crtfile
value: /var/run/secrets/openshift.io/services_serving_certs/tls.crt
- name: keystore_pkcs12
value: /var/run/secrets/java.io/keystores/keystore.pkcs12
- name: password
value: '${STORE_PASSWORD}'
command: ['sh']
args: ['-c', "openssl pkcs12 -export -inkey $keyfile -in $crtfile -out $keystore_pkcs12 -password pass:$password -name 'server certificate'"]
volumeMounts:
- mountPath: /var/run/secrets/java.io/keystores
name: 'configuration-keystore-${INSTANCE_IDENTIFIER}'
- mountPath: /var/run/secrets/openshift.io/services_serving_certs
name: 'configuration-certs-${INSTANCE_IDENTIFIER}'
- name: pem-to-truststore
image: openjdk:alpine
env:
- name: ca_bundle
value: /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt
- name: truststore_jks
value: /var/run/secrets/java.io/keystores/truststore.jks
- name: password
value: '${STORE_PASSWORD}'
command: ['/bin/sh']
args: ["-c",
"keytool -noprompt -importkeystore -srckeystore $JAVA_HOME/jre/lib/security/cacerts -srcstoretype JKS -destkeystore $truststore_jks -storepass $password -srcstorepass changeit && cd /var/run/secrets/java.io/keystores/ && awk '/-----BEGIN CERTIFICATE-----/{filename=\"crt-\"NR}; {print >filename}' $ca_bundle && for file in crt-*; do keytool -import -noprompt -keystore $truststore_jks -file $file -storepass $password -alias service-$file; done && rm crt-*"]
volumeMounts:
- mountPath: /var/run/secrets/java.io/keystores
name: 'configuration-keystore-${INSTANCE_IDENTIFIER}'
containers:
- name: 'gitea-${INSTANCE_IDENTIFIER}'
image: '...'
command: ['sh',
'-c',
'tar xf /app/gitea/gitea-data.tar.gz -C /home/git/data && cp /app/config/local-docker.ini /home/git/config/local-docker.ini && gitea web --config /home/git/config/local-docker.ini']
ports:
- containerPort: 8443
protocol: TCP
imagePullPolicy: Always
volumeMounts:
- mountPath: '/home/git/data'
name: 'gitea-data-${INSTANCE_IDENTIFIER}'
readOnly: false
- mountPath: '/app/config'
name: 'gitea-config-${INSTANCE_IDENTIFIER}'
readOnly: false
- mountPath: '/var/run/secrets/service-cert'
name: 'gitea-certs-${INSTANCE_IDENTIFIER}'
- name: 'configuration-${INSTANCE_IDENTIFIER}'
image: '...'
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command: [
"java",
"-Djava.security.egd=file:/dev/./urandom",
"-Dspring.profiles.active=terraform",
"-Djavax.net.ssl.trustStore=/var/run/secrets/java.io/keystores/truststore.jks",
"-Djavax.net.ssl.trustStoreType=JKS",
"-Djavax.net.ssl.trustStorePassword=${STORE_PASSWORD}",
"-Dserver.ssl.key-store=/var/run/secrets/java.io/keystores/keystore.pkcs12",
"-Dserver.ssl.key-store-password=${STORE_PASSWORD}",
"-Dserver.ssl.key-store-type=PKCS12",
"-Dserver.ssl.trust-store=/var/run/secrets/java.io/keystores/truststore.jks",
"-Dserver.ssl.trust-store-password=${STORE_PASSWORD}",
"-Dserver.ssl.trust-store-type=JKS",
"-Dspring.cloud.config.server.git.uri=https://gitea-${INSTANCE_IDENTIFIER}.svc.cluster.local/org/centralrepo.git",
"-jar",
"/app.jar"
]
ports:
- containerPort: 8105
protocol: TCP
imagePullPolicy: Always
volumeMounts:
- mountPath: '/var/run/secrets/java.io/keystores'
name: 'configuration-keystore-${INSTANCE_IDENTIFIER}'
readOnly: true
- mountPath: 'target/centralrepo'
name: 'configuration-${INSTANCE_IDENTIFIER}'
readOnly: false
volumes:
- name: 'gitea-data-${INSTANCE_IDENTIFIER}'
persistentVolumeClaim:
claimName: 'gitea-${INSTANCE_IDENTIFIER}'
- name: 'gitea-config-${INSTANCE_IDENTIFIER}'
configMap:
defaultMode: 0660
name: 'gitea-config-${INSTANCE_IDENTIFIER}'
- name: 'gitea-certs-${INSTANCE_IDENTIFIER}'
secret:
defaultMode: 0640
secretName: 'gitea-certs-${INSTANCE_IDENTIFIER}'
- name: 'configuration-keystore-${INSTANCE_IDENTIFIER}'
emptyDir:
- name: 'configuration-certs-${INSTANCE_IDENTIFIER}'
secret:
defaultMode: 0640
secretName: 'configuration-certs-${INSTANCE_IDENTIFIER}'
- name: 'configuration-${INSTANCE_IDENTIFIER}'
emptyDir:
defaultMode: 660
restartPolicy: Always
terminationGracePeriodSeconds: 62
dnsPolicy: ClusterFirst
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
parameters:
- name: GITEA_VERSION
displayName: Gitea Image Version
description: The version of the gitea image.
required: true
- name: CONFIGURATION_VERSION
displayName: Configuration Service Image Version
description: The version of the configuration service image.
required: true
- name: INSTANCE_IDENTIFIER
description: Provides an identifier to tie all objects in the deployment together.
generate: expression
from: "[a-z0-9]{6}"
- name: STORE_PASSWORD
generate: expression
from: "[a-zA-Z0-9]{25}"

Related

Mysql container not starting up on Kubernetes

I was using this image to run my application in docker-compose. However, when I run the same on a Kubernetes cluster I get the error
[ERROR] Could not open file '/opt/bitnami/mysql/logs/mysqld.log' for error logging: Permission denied
Here's my deployment file
apiVersion: apps/v1
kind: Deployment
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 ()
creationTimestamp: null
labels:
io.kompose.service: common-db
name: common-db
spec:
replicas: 1
selector:
matchLabels:
io.kompose.service: common-db
strategy:
type: Recreate
template:
metadata:
annotations:
kompose.cmd: kompose convert
kompose.version: 1.21.0 ()
creationTimestamp: null
labels:
io.kompose.service: common-db
spec:
containers:
- env:
- name: ALLOW_EMPTY_PASSWORD
value: "yes"
- name: MYSQL_DATABASE
value: "common-development"
- name: MYSQL_REPLICATION_MODE
value: "master"
- name: MYSQL_REPLICATION_PASSWORD
value: "repl_password"
- name: MYSQL_REPLICATION_USER
value: "repl_user"
image: bitnami/mysql:5.7
imagePullPolicy: ""
name: common-db
ports:
- containerPort: 3306
securityContext:
runAsUser: 0
resources:
requests:
memory: 512Mi
cpu: 500m
limits:
memory: 512Mi
cpu: 500m
volumeMounts:
- name: common-db-initdb
mountPath: /opt/bitnami/mysql/conf/my_custom.cnf
volumes:
- name: common-db-initdb
configMap:
name: common-db-config
serviceAccountName: ""
status: {}
The config map has the config my.cnf data. Any pointers on where I could be going wrong? Specially if the same image works in the docker-compose?
Try changing the file permission using init container as in official bitnami helm chart they are also updating file permissions and managing security context.
helm chart : https://github.com/bitnami/charts/blob/master/bitnami/mysql/templates/master-statefulset.yaml
UPDATE :
initContainers:
- command:
- /bin/bash
- -ec
- |
chown -R 1001:1001 /bitnami/mysql
image: docker.io/bitnami/minideb:buster
imagePullPolicy: Always
name: volume-permissions
resources: {}
securityContext:
runAsUser: 0
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- mountPath: /bitnami/mysql
name: data
restartPolicy: Always
schedulerName: default-scheduler
securityContext:
fsGroup: 1001
runAsUser: 1001
serviceAccount: mysql
You may need to use subpath. To know details about subpath click here
volumeMounts:
- name: common-db-initd
mountPath: /opt/bitnami/mysql/conf/my_custom.cnf
subPath: my_custom.cnf
Also, you can install bitnami mysql using helm chart easily.

Unable to connect tomcat container to mariadb database container in kubernetes?

Tomcat and mariadb services are up and running and pinging was happening from tomcat service to mariadb service but not communicating from db to tomcat using ping command. attached my scripts for troubleshooting issue-----------------------------
tomcat deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: xyzapp-tomcat
spec:
selector:
matchLabels:
app: xyzapp-tomcat
tier: backend
track: stable
template:
metadata:
labels:
app: xyzapp-tomcat
tier: backend
track: stable
spec:
containers:
- name: xyzapp-tomcat
image: xyz/xyz:tomcat
env:
- name: MYSQL_SERVICE_HOST
value: "xyzapp-mariadb"
- name: MYSQL_SERVICE_PORT
value: "3306"
#- name: DB_PORT_3306_TCP_ADDR
#value: xyzapp-mariadb #service name of mysql
- name: MYSQL_DATABASE
value: xyzapp
- name: MYSQL_USER
value: student
- name: MYSQL_PASSWORD
value: student
ports:
- name: http
containerPort: 8080
database deployment.yml
apiVersion: v1
kind: Service
metadata:
name: xyzapp-mariadb
labels:
app: xyzapp
spec:
ports:
- port: 3306
selector:
app: xyzapp
tier: mariadb
clusterIP: None
---
apiVersion: apps/v1 # for versions before 1.9.0 use apps/v1beta2
kind: Deployment
metadata:
name: xyzapp-mariadb
labels:
app: xyzapp
spec:
selector:
matchLabels:
app: xyzapp
tier: mariadb
strategy:
type: Recreate
template:
metadata:
labels:
app: xyzapp
tier: mariadb
spec:
containers:
- image: mariadb
name: xyzapp-mariadb
env:
- name: MYSQL_ROOT_PASSWORD
value: password
- name: MYSQL_DATABASE
value: xyzapp
- name: MYSQL_USER
value: student
- name: MYSQL_PASSWORD
value: student
args: ["--default-authentication-plugin=mysql_native_password"]
ports:
- containerPort: 3306
volumeMounts:
- name: mariadb-init
mountPath: /docker-entrypoint-initdb.d
volumes:
- name: mariadb-init
persistentVolumeClaim:
claimName: xyzapp-initdb-pv-claim
tomcat logs:
05-May-2020 11:48:53.486 WARNING [main] org.apache.tomcat.util.scan.StandardJarScanner.processURLs Failed to scan [file:/usr/local/tomcat/lib/mysql-connector-java-8.0.20.jar] from classloader hierarchy
JDBC mapped context.xml:
<Resource name="jdbc/TestDB" auth="Container" type="javax.sql.DataSource" maxTotal="100" maxIdle="30" maxWaitMillis="10000" username="$MYSQL_USER" password="$MYSQL_PASSWORD" driverClassName="com.mysql.jdbc.Driver" url="jdbc:mysql://$MYSQL_SERVICE_HOST:$MYSQL_SERVICE_PORT/$MYSQL_DATABASE"/>

Openshift Enterprise : Unable to connect to deployed app via browser

Deployed a java application in Openshift (3.9 and 3.11), but cannot reach the application via the browser.
Created an REST API application image (OpenLiberty and openjdk 11) and pushed it to openshift docker-registry via a maven build. ImageStream is created. Deployed the image, created a route. The pod comes up. Pod logs shows the liberty server is started. Accessed the pod via Terminal and was able to use curl (http://localhost:9080) in the terminal and test the apis. But when I used the route to access the app from a browser, getting host could not be found error.
I have the same application successfully running on minishift.
Where and what errors do I look for ?
apiVersion: v1
kind: Template
metadata:
name: ${APPLICATION_NAME}-template
annotations:
description: ${APPLICATION_NAME}
objects:
# Application Service
- apiVersion: v1
kind: Service
metadata:
annotations:
openshift.io/generated-by: OpenShiftNewApp
service.alpha.openshift.io/serving-cert-secret-name: app-certs
labels:
app: ${APPLICATION_NAME}-${APP_VERSION_TAG}
name: ${APPLICATION_NAME}-${APP_VERSION_TAG}
namespace: ${NAME_SPACE}
spec:
ports:
- name: 9443-tcp
port: 9443
protocol: TCP
targetPort: 9443
selector:
app: ${APPLICATION_NAME}-${APP_VERSION_TAG}
deploymentconfig: ${APPLICATION_NAME}-${APP_VERSION_TAG}
sessionAffinity: None
type: ClusterIP
# Application Route
- apiVersion: v1
kind: Route
metadata:
annotations:
openshift.io/host.generated: "true"
labels:
app: ${APPLICATION_NAME}-${APP_VERSION_TAG}
name: ${APPLICATION_NAME}-${APP_VERSION_TAG}
spec:
port:
targetPort: 9443-tcp
tls:
termination: reencrypt
to:
kind: Service
name: ${APPLICATION_NAME}-${APP_VERSION_TAG}
weight: 100
wildcardPolicy: None
# APPLICATION DEPLOYMENT CONFIG
- apiVersion: v1
kind: DeploymentConfig
metadata:
annotations:
openshift.io/generated-by: OpenShiftNewApp
generation: 1
labels:
app: ${APPLICATION_NAME}-${APP_VERSION_TAG}
name: ${APPLICATION_NAME}-${APP_VERSION_TAG}
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
app: ${APPLICATION_NAME}-${APP_VERSION_TAG}
deploymentconfig: ${APPLICATION_NAME}-${APP_VERSION_TAG}
strategy:
activeDeadlineSeconds: 21600
resources: {}
rollingParams:
intervalSeconds: 1
maxSurge: 25%
maxUnavailable: 25%
timeoutSeconds: 600
updatePeriodSeconds: 1
type: Rolling
template:
metadata:
annotations:
openshift.io/generated-by: OpenShiftNewApp
labels:
app: ${APPLICATION_NAME}-${APP_VERSION_TAG}
deploymentconfig: ${APPLICATION_NAME}-${APP_VERSION_TAG}
spec:
affinity:
podAntiAffinity:
preferredDuringSchedulingIgnoredDuringExecution:
- weight: 100
podAffinityTerm:
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- ${APPLICATION_NAME}-${APP_VERSION_TAG}
# - key: region
# operator: In
# values:
# - ${TARGET_ENVIRONMENT}
topologyKey: "kubernetes.io/hostname"
containers:
- image: ${APPLICATION_NAME}:${TAG}
imagePullPolicy: Always
# livenessProbe:
# failureThreshold: 3
# httpGet:
# path: ${APPLICATION_HEALTH_CHECK_URL}
# port: 8080
# scheme: HTTP
# initialDelaySeconds: 15
# periodSeconds: 15
# successThreshold: 1
# timeoutSeconds: 25
# readinessProbe:
# failureThreshold: 3
# httpGet:
# path: ${APPLICATION_READINESS_CHECK_URL}
# port: 8080
# scheme: HTTP
# initialDelaySeconds: 10
# periodSeconds: 15
# successThreshold: 1
# timeoutSeconds: 25
name: ${APPLICATION_NAME}-${APP_VERSION_TAG}
envFrom:
- configMapRef:
name: server-env
- secretRef:
name: server-env
env:
- name: KEYSTORE_PASSWORD
valueFrom:
secretKeyRef:
name: keystore-secret
key: KEYSTORE_PASSWORD
- name: KEYSTORE_PKCS12
value: /var/run/secrets/java.io/keystores/keystore.pkcs12
ports:
- containerPort: 9443
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
terminationMessagePolicy: File
volumeMounts:
- name: app-certs
mountPath: /var/run/secrets/openshift.io/app-certs
- name: keystore-volume
mountPath: /var/run/secrets/java.io/keystores
initContainers:
- name: pem-to-keystore
image: registry.access.redhat.com/redhat-sso-7/sso71-openshift:1.1-16
env:
- name: keyfile
value: /var/run/secrets/openshift.io/app-certs/tls.key
- name: crtfile
value: /var/run/secrets/openshift.io/app-certs/tls.crt
- name: keystore_pkcs12
value: /var/run/secrets/java.io/keystores/keystore.pkcs12
- name: keystore_jks
value: /var/run/secrets/java.io/keystores/keystore.jks
- name: password
valueFrom:
secretKeyRef:
name: keystore-secret
key: KEYSTORE_PASSWORD
command: ['/bin/bash']
args: ['-c', "openssl pkcs12 -export -inkey $keyfile -in $crtfile -out $keystore_pkcs12 -password pass:$password"]
volumeMounts:
- name: keystore-volume
mountPath: /var/run/secrets/java.io/keystores
- name: app-certs
mountPath: /var/run/secrets/openshift.io/app-certs
dnsPolicy: ClusterFirst
restartPolicy: Always
schedulerName: default-scheduler
securityContext: {}
terminationGracePeriodSeconds: 30
volumes:
- name: app-certs
secret:
secretName: app-certs
- name: keystore-volume
emptyDir: {}
test: false
triggers:
- type: ConfigChange
- imageChangeParams:
automatic: true
containerNames:
- ${APPLICATION_NAME}-${APP_VERSION_TAG}
from:
kind: ImageStreamTag
name: ${APPLICATION_NAME}:${APP_VERSION_TAG}
type: ImageChange
parameters:
- name: APPLICATION_NAME
description: Name of the app
value: microservice
required: true
- name: APP_VERSION_TAG
description: TAG of the image stream tag
value: latest
required: true
- name: NAME_SPACE
description: Namespace
value: microservice--sbx--microservice
required: true
- name: DOMAIN_URL
description: DOMAIN_URL
value: microservice-myproject
required: true
- name: APPLICATION_LIVENESS_CHECK_URL
description: LIVENESS Check URL
value: /health
required: true
- name: APPLICATION_READINESS_CHECK_URL
description: READINESS Check URL
value: /microservice/envvariables
required: true
- name: DOCKER_IMAGE_REPO
description: Docker Image Repository
value: docker-registry-default.apps.xxxx.xxx.xxx.xxx.com
required: true

Openshift - timeout expired waiting for volumes to attach or mount for pod

I'm having some difficulties deploying an Openshift template, specifically with attaching a persistent volume. The template is meant to deploy Jira and a MYSQL database for persistence. I have the following persistent volume configuration deployed:
apiVersion: v1
kind: PersistentVolume
metadata:
name: mysqlpv0003
spec:
capacity:
storage: 2Gi
accessModes:
- ReadWriteMany
nfs:
path: /var/nfs/mysql
server: 192.168.0.171
persistentVolumeReclaimPolicy: Retain
Where 192.168.0.171 is a valid, working nfs server. My aim is to use this persistent volume as storage for the MYSQL server. The template I'm trying to deploy is as follows:
---
apiVersion: v1
kind: Template
labels:
app: jira-persistent
template: jira-persistent
message: |-
The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.
metadata:
annotations:
description: Deploys an instance of Jira, backed by a mysql database
iconClass: icon-perl
openshift.io/display-name: Jira + Mysql
openshift.io/documentation-url: https://github.com/sclorg/dancer-ex
openshift.io/long-description: Deploys an instance of Jira, backed by a mysql database
openshift.io/provider-display-name: ABXY Games, Inc.
openshift.io/support-url: abxygames.com
tags: quickstart,JIRA
template.openshift.io/bindable: 'false'
name: jira-persistent
objects:
# Database secrets
- apiVersion: v1
kind: Secret
metadata:
name: "${NAME}"
stringData:
database-password: "${DATABASE_PASSWORD}"
database-user: "${DATABASE_USER}"
keybase: "${SECRET_KEY_BASE}"
# application service
- apiVersion: v1
kind: Service
metadata:
annotations:
description: Exposes and load balances the application pods
service.alpha.openshift.io/dependencies: '[{"name": "${DATABASE_SERVICE_NAME}",
"kind": "Service"}]'
name: "${NAME}"
spec:
ports:
- name: web
port: 8080
targetPort: 8080
selector:
name: "${NAME}"
# application route
- apiVersion: v1
kind: Route
metadata:
name: "${NAME}"
spec:
host: "${APPLICATION_DOMAIN}"
to:
kind: Service
name: "${NAME}"
# application image
- apiVersion: v1
kind: ImageStream
metadata:
annotations:
description: Keeps track of changes in the application image
name: "${NAME}"
# Application buildconfig
- apiVersion: v1
kind: BuildConfig
metadata:
annotations:
description: Defines how to build the application
template.alpha.openshift.io/wait-for-ready: 'true'
name: "${NAME}"
spec:
output:
to:
kind: ImageStreamTag
name: "${NAME}:latest"
source:
contextDir: "${CONTEXT_DIR}"
git:
ref: "${SOURCE_REPOSITORY_REF}"
uri: "${SOURCE_REPOSITORY_URL}"
type: Git
strategy:
dockerStrategy:
env:
- name: CPAN_MIRROR
value: "${CPAN_MIRROR}"
dockerfilePath: Dockerfile
type: Source
triggers:
- type: ImageChange
- type: ConfigChange
- github:
secret: "${GITHUB_WEBHOOK_SECRET}"
type: GitHub
# application deployConfig
- apiVersion: v1
kind: DeploymentConfig
metadata:
annotations:
description: Defines how to deploy the application server
template.alpha.openshift.io/wait-for-ready: 'true'
name: "${NAME}"
spec:
replicas: 1
selector:
name: "${NAME}"
strategy:
type: Recreate
template:
metadata:
labels:
name: "${NAME}"
name: "${NAME}"
spec:
containers:
- env:
- name: DATABASE_SERVICE_NAME
value: "${DATABASE_SERVICE_NAME}"
- name: MYSQL_USER
valueFrom:
secretKeyRef:
key: database-user
name: "${NAME}"
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
key: database-password
name: "${NAME}"
- name: MYSQL_DATABASE
value: "${DATABASE_NAME}"
- name: SECRET_KEY_BASE
valueFrom:
secretKeyRef:
key: keybase
name: "${NAME}"
- name: PERL_APACHE2_RELOAD
value: "${PERL_APACHE2_RELOAD}"
image: " "
livenessProbe:
httpGet:
path: "/"
port: 8080
initialDelaySeconds: 30
timeoutSeconds: 3
name: jira-mysql-persistent
ports:
- containerPort: 8080
readinessProbe:
httpGet:
path: "/"
port: 8080
initialDelaySeconds: 3
timeoutSeconds: 3
resources:
limits:
memory: "${MEMORY_LIMIT}"
triggers:
- imageChangeParams:
automatic: true
containerNames:
- jira-mysql-persistent
from:
kind: ImageStreamTag
name: "${NAME}:latest"
type: ImageChange
- type: ConfigChange
# database persistentvolumeclaim
- apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: "${DATABASE_SERVICE_NAME}"
spec:
accessModes:
- ReadWriteMany
resources:
requests:
storage: "${VOLUME_CAPACITY}"
# database service
- apiVersion: v1
kind: Service
metadata:
annotations:
description: Exposes the database server
name: "${DATABASE_SERVICE_NAME}"
spec:
ports:
- name: mysql
port: 3306
targetPort: 3306
selector:
name: "${DATABASE_SERVICE_NAME}"
# database deployment config
- apiVersion: v1
kind: DeploymentConfig
metadata:
annotations:
description: Defines how to deploy the database
template.alpha.openshift.io/wait-for-ready: 'true'
name: "${DATABASE_SERVICE_NAME}"
spec:
replicas: 1
selector:
name: "${DATABASE_SERVICE_NAME}"
strategy:
type: Recreate
template:
metadata:
labels:
name: "${DATABASE_SERVICE_NAME}"
name: "${DATABASE_SERVICE_NAME}"
spec:
containers:
- env:
- name: MYSQL_USER
valueFrom:
secretKeyRef:
key: database-user
name: "${NAME}"
- name: MYSQL_PASSWORD
valueFrom:
secretKeyRef:
key: database-password
name: "${NAME}"
- name: MYSQL_DATABASE
value: "${DATABASE_NAME}"
image: " "
livenessProbe:
initialDelaySeconds: 30
tcpSocket:
port: 3306
timeoutSeconds: 1
name: mysql
ports:
- containerPort: 3306
readinessProbe:
exec:
command:
- "/bin/sh"
- "-i"
- "-c"
- MYSQL_PWD='${DATABASE_PASSWORD}' mysql -h 127.0.0.1 -u ${DATABASE_USER}
-D ${DATABASE_NAME} -e 'SELECT 1'
initialDelaySeconds: 5
timeoutSeconds: 1
resources:
limits:
memory: "${MEMORY_MYSQL_LIMIT}"
volumeMounts:
- mountPath: "/var/lib/mysql/data"
name: "${DATABASE_SERVICE_NAME}-data"
volumes:
- name: "${DATABASE_SERVICE_NAME}-data"
persistentVolumeClaim:
claimName: "${DATABASE_SERVICE_NAME}"
triggers:
- imageChangeParams:
automatic: true
containerNames:
- mysql
from:
kind: ImageStreamTag
name: mysql:5.7
namespace: "${NAMESPACE}"
type: ImageChange
- type: ConfigChange
parameters:
- description: The name assigned to all of the frontend objects defined in this template.
displayName: Name
name: NAME
required: true
value: jira-persistent
- description: The OpenShift Namespace where the ImageStream resides.
displayName: Namespace
name: NAMESPACE
required: true
value: openshift
- description: Maximum amount of memory the JIRA container can use.
displayName: Memory Limit
name: MEMORY_LIMIT
required: true
value: 512Mi
- description: Maximum amount of memory the MySQL container can use.
displayName: Memory Limit (MySQL)
name: MEMORY_MYSQL_LIMIT
required: true
value: 512Mi
- description: Volume space available for data, e.g. 512Mi, 2Gi
displayName: Volume Capacity
name: VOLUME_CAPACITY
required: true
value: 1Gi
- description: The URL of the repository with your application source code.
displayName: Git Repository URL
name: SOURCE_REPOSITORY_URL
required: true
value: https://github.com/stpork/jira.git
- description: Set this to a branch name, tag or other ref of your repository if you
are not using the default branch.
displayName: Git Reference
name: SOURCE_REPOSITORY_REF
- description: Set this to the relative path to your project if it is not in the root
of your repository.
displayName: Context Directory
name: CONTEXT_DIR
- description: The exposed hostname that will route to the jira service, if left
blank a value will be defaulted.
displayName: Application Hostname
name: APPLICATION_DOMAIN
value: ''
- description: Github trigger secret. A difficult to guess string encoded as part
of the webhook URL. Not encrypted.
displayName: GitHub Webhook Secret
from: "[a-zA-Z0-9]{40}"
generate: expression
name: GITHUB_WEBHOOK_SECRET
- displayName: Database Service Name
name: DATABASE_SERVICE_NAME
required: true
value: database
- displayName: Database Username
from: user[A-Z0-9]{3}
generate: expression
name: DATABASE_USER
- displayName: Database Password
from: "[a-zA-Z0-9]{8}"
generate: expression
name: DATABASE_PASSWORD
- displayName: Database Name
name: DATABASE_NAME
required: true
value: sampledb
- description: Set this to "true" to enable automatic reloading of modified Perl modules.
displayName: Perl Module Reload
name: PERL_APACHE2_RELOAD
value: ''
- description: Your secret key for verifying the integrity of signed cookies.
displayName: Secret Key
from: "[a-z0-9]{127}"
generate: expression
name: SECRET_KEY_BASE
- description: The custom CPAN mirror URL
displayName: Custom CPAN Mirror URL
name: CPAN_MIRROR
value: ''
When run, the deployment for the MYSQL server eventually fails with the following error:
Unable to mount volumes for pod
"database-1-qvv86_test3(54f01c55-6885-11e9-bc42-3a342852673a)":
timeout expired waiting for volumes to attach or mount for pod
"test3"/"database-1-qvv86". list of unmounted volumes=[database-data
default-token-8hjgv]. list of unattached volumes=[database-data
default-token-8hjgv]
The persistent volume claim is attaching to the persistent volume successfully, but as far as I can tell the pod is not attaching to that volume. The template is being deployed in a fresh project, and the PV is freshly created and the nfs is empty. I can't see any errors with how the pod is referencing the persistent volume claim. I'm not sure why this error is occurring, but I'm just learning templates and am clearly missing something. Does anyone see what I'm missing?
The issue was in my NFS permissions. Here is the working content of my /etc/exports file:
/var/nfs *(rw,root_squash,no_wdelay)

How to make connection with Google Cloud SQL using Google Container Engine?

i am using Node JS and deployed it using Kubernetes in Google Container Engine. but I cant make a connection to MySQL.
this is my node JS connection
var pool = mysql.createPool({
connectionLimit : 100,
user : process.env.DB_USER,
password : process.env.DB_PASSWORD,
database : process.env.DB_NAME,
multipleStatements : true,
socketPath : '/cloudsql/' + process.env.INSTANCE_CONNECTION_NAME
})
I was using this for my Google App Engine and its work. now i need to move to GKE and it sent an error that said mySQL is not defined.
this is my app-frontend.yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: app-frontend
labels:
app: app
spec:
replicas: 1
template:
metadata:
labels:
app: app
tier: frontend
spec:
containers:
- name: app
image: gcr.io/app-12345/app:1.0
env :
- name : DB_HOST
value : 127.0.0.1:3306
- name : DB_USER
valueFrom:
secretKeyRef:
name: cloudsql-db-credentials
key: username
- name : DB_PASSWORD
valueFrom:
secretKeyRef:
name: cloudsql-db-credentials
key: password
ports:
- name: http-server
containerPort: 8080
imagePullPolicy: Always
- image: gcr.io/cloudsql-docker/gce-proxy:1.09
name: cloudsql-proxy
imagePullPolicy: Always
command:
- /cloud_sql_proxy
- --dir=/cloudsql
- --instances=mulung=tcp:3306
- --credential_file=/secrets/cloudsql/credentials.json
volumeMounts:
- name: cloudsql-instance-credentials
mountPath: /secrets/cloudsql
readOnly: true
- name: ssl-certs
mountPath: /etc/ssl/certs
- name: cloudsql
mountPath: /cloudsql
# [END proxy_container]
ports:
- name: portdb
containerPort: 3306
# [START volumes]
volumes:
- name: cloudsql-instance-credentials
secret:
secretName: cloudsql-instance-credentials
- name: ssl-certs
hostPath:
path: /etc/ssl/certs
- name: cloudsql
emptyDir:
what should i do to fix it? thank you guys.
Can you check the command part
command: ["/cloud_sql_proxy", "--dir=/cloudsql",
"-instances=CLOUD_SQL_INSTANCE_NAME",
"-credential_file=/secrets/cloudsql/credentials.json"]
Here is the working deployment.yaml from one of my backend application
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: <appname>
spec:
replicas: 2
template:
metadata:
labels:
app: <appname>
spec:
containers:
- image: gcr.io/<some_name>/cloudsql-docker/gce-proxy:1.05
name: cloudsql-proxy
command: ["/cloud_sql_proxy", "--dir=/cloudsql",
"-instances=CLOUD_SQL_INSTANCE_NAME",
"-credential_file=/secrets/cloudsql/credentials.json"]
volumeMounts:
- name: cloudsql-oauth-credentials
mountPath: /secrets/cloudsql
readOnly: true
- name: ssl-certs
mountPath: /etc/ssl/certs
- name: cloudsql
mountPath: /cloudsql
- name: <appname>
image: IMAGE_NAME
ports:
- containerPort: 8888
readinessProbe:
httpGet:
path: /<appname>/health
port: 8888
initialDelaySeconds: 30
periodSeconds: 30
timeoutSeconds: 30
successThreshold: 1
failureThreshold: 5
env:
- name: PROJECT_NAME
value: <some_name>
- name: PROJECT_ZONE
value: <some_name>
- name: INSTANCE_NAME
value: <some_name>
- name: INSTANCE_PORT
value: <some_name>
- name: CONTEXT_PATH
value: <appname>
volumeMounts:
- name: application-config
mountPath: /opt/config-mount
volumes:
- name: cloudsql-oauth-credentials
secret:
secretName: cloudsql-oauth-credentials
- name: ssl-certs
hostPath:
path: /etc/ssl/certs
- name: cloudsql
emptyDir:
- name: application-config
secret:
secretName: <appname>-ENV_NAME-config