Merge remote-tracking branch 'att-comdev/master' into heat

This commit is contained in:
portdirect 2017-01-09 00:47:01 +00:00
commit a61caa8d91
64 changed files with 1962 additions and 368 deletions

@ -14,6 +14,30 @@ Until a 1.0.0 release, this collection is a work in progress and components will
This project is under heavy development. We encourage anyone who is interested in Openstack-Helm to review our [Getting Started](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md) documentation, complete with verification procedures. Feel free to ask questions or check out our current [Issues](https://github.com/att-comdev/openstack-helm/issues), [Project Plan](https://github.com/att-comdev/openstack-helm/projects/1) or submit a [Pull Request](https://github.com/att-comdev/openstack-helm/pulls).
Openstack-Helm is intended to be packaged and served from your own Helm [repository](https://github.com/kubernetes/helm/blob/master/docs/chart_repository.md). However, for quick installation, evaluation, and convenience, you can use our online Helm repository. After you've configured your environment for [Minikube](https://github.com/att-comdev/openstack-helm/blob/master/docs/developer/minikube.md) (for hostPath) or [Bare Metal](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md) (for PVC support), you can add our most recent repository by using the following command:
```
$ helm repo add openstack-helm https://att-comdev.github.io/openstack-helm/charts/
```
To verify your Helm chart version, once the repository has been added, issue the following:
```
$ helm search | grep openstack-helm
local/bootstrap 0.1.0 openstack-helm namespace bootstrap
openstack-helm/bootstrap 0.1.0 openstack-helm namespace bootstrap
openstack-helm/ceph 0.1.0 A Helm chart for Kubernetes
openstack-helm/common 0.1.0 A base chart for all openstack charts
openstack-helm/glance 0.1.0 A Helm chart for glance
openstack-helm/horizon 0.1.0 A Helm chart for horizon
openstack-helm/keystone 0.1.0 A Helm chart for keystone
openstack-helm/mariadb 0.1.0 A helm chart for mariadb
openstack-helm/memcached 0.1.0 Chart for memcached
openstack-helm/openstack 0.1.0 A Helm chart for Kubernetes
openstack-helm/rabbitmq 0.1.0 A Helm chart for Kubernetes
$
```
**UPDATED:** Please see our new [developer documentation](https://github.com/att-comdev/openstack-helm/blob/master/docs/developer/minikube.md) for Minikube.
# Additional Details

@ -78,8 +78,8 @@ spec:
timeoutSeconds: 5
resources:
requests:
memory: "512Mi"
cpu: "1000m"
memory: {{ .Values.resources.osd.requests.memory | quote }}
cpu: {{ .Values.resources.osd.requests.cpu | quote }}
limits:
memory: "1024Mi"
cpu: "2000m"
memory: {{ .Values.resources.osd.limits.memory | quote }}
cpu: {{ .Values.resources.osd.limits.cpu | quote }}

@ -0,0 +1,72 @@
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: mds
name: ceph-mds
spec:
replicas: 1
template:
metadata:
name: ceph-mds
labels:
app: ceph
daemon: mds
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
ports:
- containerPort: 6800
env:
- name: CEPH_DAEMON
value: MDS
- name: CEPHFS_CREATE
value: "1"
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
tcpSocket:
port: 6800
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: 6800
timeoutSeconds: 5
resources:
requests:
memory: {{ .Values.resources.mds.requests.memory | quote }}
cpu: {{ .Values.resources.mds.requests.cpu | quote }}
limits:
memory: {{ .Values.resources.mds.limits.memory | quote }}
cpu: {{ .Values.resources.mds.limits.cpu | quote }}

@ -0,0 +1,64 @@
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: moncheck
name: ceph-mon-check
spec:
replicas: {{ .Values.replicas.mon_check }}
template:
metadata:
name: ceph-mon
labels:
app: ceph
daemon: moncheck
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: Always
ports:
- containerPort: 6789
env:
- name: CEPH_DAEMON
value: MON_HEALTH
- name: KV_TYPE
value: k8s
- name: MON_IP_AUTO_DETECT
value: "1"
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
resources:
requests:
memory: {{ .Values.resources.mon_check.requests.memory | quote }}
cpu: {{ .Values.resources.mon_check.requests.cpu | quote }}
limits:
memory: {{ .Values.resources.mon_check.limits.memory | quote }}
cpu: {{ .Values.resources.mon_check.limits.cpu | quote }}

@ -0,0 +1,77 @@
{{- if .Values.rgw.enabled }}
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: rgw
name: ceph-rgw
spec:
replicas: {{ .Values.replicas.rgw }}
template:
metadata:
name: ceph-rgw
labels:
app: ceph
daemon: rgw
spec:
hostNetwork: true
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-rgw
image: {{ .Values.images.daemon }}
ports:
- containerPort: {{ .Values.network.port.rgw_target }}
env:
- name: RGW_CIVETWEB_PORT
value: "{{ .Values.network.port.rgw_target }}"
- name: CEPH_DAEMON
value: RGW
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
initialDelaySeconds: 120
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
timeoutSeconds: 5
resources:
requests:
memory: {{ .Values.resources.rgw.requests.memory | quote }}
cpu: {{ .Values.resources.rgwrequests.cpu | quote }}
limits:
memory: {{ .Values.resources.rgw.limits.memory | quote }}
cpu: {{ .Values.resources.rgw.limits.cpu | quote }}
{{- end }}

@ -1,310 +0,0 @@
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: mds
name: ceph-mds
spec:
replicas: 1
template:
metadata:
name: ceph-mds
labels:
app: ceph
daemon: mds
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
ports:
- containerPort: 6800
env:
- name: CEPH_DAEMON
value: MDS
- name: CEPHFS_CREATE
value: "1"
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
tcpSocket:
port: 6800
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: 6800
timeoutSeconds: 5
resources:
requests:
memory: "10Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: moncheck
name: ceph-mon-check
spec:
replicas: 1
template:
metadata:
name: ceph-mon
labels:
app: ceph
daemon: moncheck
spec:
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: Always
ports:
- containerPort: 6789
env:
- name: CEPH_DAEMON
value: MON_HEALTH
- name: KV_TYPE
value: k8s
- name: MON_IP_AUTO_DETECT
value: "1"
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
resources:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
---
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: mon
name: ceph-mon
spec:
replicas: 3
template:
metadata:
name: ceph-mon
labels:
app: ceph
daemon: mon
annotations:
# alanmeadows: this soft requirement allows single
# host deployments to spawn several ceph-mon
# containers
scheduler.alpha.kubernetes.io/affinity: >
{
"podAntiAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [{
"labelSelector": {
"matchExpressions": [{
"key": "daemon",
"operator": "In",
"values":["mon"]
}]
},
"topologyKey": "kubernetes.io/hostname",
"weight": 10
}]
}
}
spec:
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
# imagePullPolicy: Always
lifecycle:
preStop:
exec:
# remove the mon on Pod stop.
command:
- "/remove-mon.sh"
ports:
- containerPort: 6789
env:
- name: CEPH_DAEMON
value: MON
- name: KV_TYPE
value: k8s
- name: NETWORK_AUTO_DETECT
value: "1"
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
tcpSocket:
port: 6789
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: 6789
timeoutSeconds: 5
resources:
requests:
memory: "50Mi"
cpu: "1000m"
limits:
memory: "100Mi"
cpu: "2000m"
---
# rgw not required: using if statement for deployment
{{- if .Values.rgw.enabled }}
kind: Deployment
apiVersion: extensions/v1beta1
metadata:
labels:
app: ceph
daemon: rgw
name: ceph-rgw
spec:
replicas: 3
template:
metadata:
name: ceph-rgw
labels:
app: ceph
daemon: rgw
spec:
hostNetwork: true
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-rgw
image: {{ .Values.images.daemon }}
ports:
- containerPort: {{ .Values.network.port.rgw_target }}
env:
- name: RGW_CIVETWEB_PORT
value: "{{ .Values.network.port.rgw_target }}"
- name: CEPH_DAEMON
value: RGW
- name: KV_TYPE
value: k8s
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
livenessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
initialDelaySeconds: 120
timeoutSeconds: 5
readinessProbe:
httpGet:
path: /
port: {{ .Values.network.port.rgw_target }}
timeoutSeconds: 5
resources:
requests:
memory: "500Mi"
cpu: ".5"
limits:
memory: "500Mi"
cpu: ".5"
{{ end }}
# end: rgw removed optionally

@ -15,6 +15,8 @@ spec:
app: ceph
daemon: mon
clusterIP: None
{{- if .Values.rgw.enabled }}
---
apiVersion: v1
kind: Service
@ -31,4 +33,4 @@ spec:
selector:
app: ceph
daemon: rgw
type: LoadBalancer
{{- end }}

@ -0,0 +1,105 @@
---
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
labels:
app: ceph
daemon: mon
name: ceph-mon
spec:
serviceName: {{ .Values.service.mon.name | quote }}
replicas: {{ .Values.replicas.mon }}
template:
metadata:
name: ceph-mon
labels:
app: ceph
daemon: mon
annotations:
# alanmeadows: this soft requirement allows single
# host deployments to spawn several ceph-mon
# containers
scheduler.alpha.kubernetes.io/affinity: >
{
"podAntiAffinity": {
"preferredDuringSchedulingIgnoredDuringExecution": [{
"labelSelector": {
"matchExpressions": [{
"key": "daemon",
"operator": "In",
"values":["mon"]
}]
},
"topologyKey": "kubernetes.io/hostname",
"weight": 10
}]
}
}
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default
volumes:
- name: ceph-conf
secret:
secretName: ceph-conf-combined
- name: ceph-bootstrap-osd-keyring
secret:
secretName: ceph-bootstrap-osd-keyring
- name: ceph-bootstrap-mds-keyring
secret:
secretName: ceph-bootstrap-mds-keyring
- name: ceph-bootstrap-rgw-keyring
secret:
secretName: ceph-bootstrap-rgw-keyring
- name: ceph-monfs
hostPath:
path: {{ .Values.storage.mon_directory }}
containers:
- name: ceph-mon
image: {{ .Values.images.daemon }}
imagePullPolicy: Always
lifecycle:
preStop:
exec:
# remove the mon on Pod stop.
command:
- "/remove-mon.sh"
ports:
- containerPort: 6789
env:
- name: CEPH_DAEMON
value: MON
- name: KV_TYPE
value: k8s
- name: NETWORK_AUTO_DETECT
value: "1"
- name: CLUSTER
value: ceph
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph
- name: ceph-bootstrap-osd-keyring
mountPath: /var/lib/ceph/bootstrap-osd
- name: ceph-bootstrap-mds-keyring
mountPath: /var/lib/ceph/bootstrap-mds
- name: ceph-bootstrap-rgw-keyring
mountPath: /var/lib/ceph/bootstrap-rgw
- name: ceph-monfs
mountPath: /var/lib/ceph/mon
livenessProbe:
tcpSocket:
port: 6789
initialDelaySeconds: 60
timeoutSeconds: 5
readinessProbe:
tcpSocket:
port: 6789
timeoutSeconds: 5
resources:
requests:
memory: {{ .Values.resources.mon.requests.memory | quote }}
cpu: {{ .Values.resources.mon.requests.cpu | quote }}
limits:
memory: {{ .Values.resources.mon.limits.memory | quote }}
cpu: {{ .Values.resources.mon.limits.cpu | quote }}

@ -10,7 +10,8 @@ parameters:
monitors: {{ .Values.storageclass.monitors | default "ceph-mon.ceph:6789" }}
adminId: {{ .Values.storageclass.admin_id }}
adminSecretName: {{ .Values.storageclass.admin_secret_name }}
## forcing namespace due to issue with -- default "{{ .Release.Namespace }}" }} --
# forcing namespace due to issue with default pipeline of "{{ .Release.Namespace }}" }}
# during helm lint
adminSecretNamespace: {{ .Values.storageclass.admin_secret_namespace | default "ceph" }}
pool: {{ .Values.storageclass.pool }}
userId: {{ .Values.storageclass.user_id }}

@ -7,6 +7,15 @@
# tunables available - parameterizing more of the elements
# in the manifests is a work in progress
replicas:
mon: 3
rgw: 3
mon_check: 1
service:
mon:
name: ceph-mon
images:
daemon: quay.io/attcomdev/ceph-daemon:latest
@ -23,11 +32,52 @@ network:
storage:
osd_directory: /var/lib/openstack-helm/ceph/osd
var_directory: /var/lib/openstack-helm/ceph/ceph
mon_directory: /var/lib/openstack-helm/ceph/mon
# rgw is optionall disabled
rgw:
enabled: false
rgw:
enabled: false
resources:
osd:
requests:
memory: "512Mi"
cpu: "1000m"
limits:
memory: "1024Mi"
cpu: "2000m"
mds:
requests:
memory: "10Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
mon:
requests:
memory: "50Mi"
cpu: "1000m"
limits:
memory: "100Mi"
cpu: "2000m"
mon_check:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
rgw:
requests:
memory: "5Mi"
cpu: "250m"
limits:
memory: "50Mi"
cpu: "500m"
# Setting this to false will assume you will
# setup and orchestrate your own secrets and
# configmaps outside of this helm chart
@ -63,7 +113,7 @@ secrets:
storageclass:
provision_storage_class: true
name: general
monitors: null
monitors: null
pool: rbd
admin_id: admin
admin_secret_name: pvc-ceph-conf-combined-storageclass

@ -2,6 +2,14 @@
{{- define "region"}}cluster{{- end}}
{{- define "tld"}}local{{- end}}
{{- define "fqdn" -}}
{{- $fqdn := .Release.Namespace -}}
{{- if .Values.endpoints.fqdn -}}
{{- $fqdn := .Values.endpoints.fqdn -}}
{{- end -}}
{{- $fqdn -}}
{{- end -}}
#-----------------------------------------
# hosts
#-----------------------------------------
@ -17,3 +25,14 @@
{{- define "keystone_api_endpoint_host_internal"}}keystone-api.{{.Release.Namespace}}.svc.{{ include "region" . }}.{{ include "tld" . }}{{- end}}
{{- define "keystone_api_endpoint_host_public"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{- end}}
{{- define "keystone_api_endpoint_host_admin_ext"}}keystone-api.{{ include "region" . }}.{{ include "tld" . }}{{- end}}
# glance defaults
{{- define "glance_registry_host"}}glance-registry.{{ include "fqdn" . }}{{- end}}
# nova defaults
{{- define "nova_metadata_host"}}nova-api.{{ include "fqdn" . }}{{- end}}
# neutron defaults
{{- define "neutron_db_host"}}{{ include "mariadb_host" . }}{{- end}}
{{- define "neutron_rabbit_host"}}{{- include "rabbitmq_host" .}}{{- end}}

@ -7,3 +7,6 @@ global:
region: cluster
tld: local
endpoints:
fqdn: null

@ -1,9 +1,9 @@
# Development of Openstack-Helm
Community development is extremely important to us. As an open source development team, we want the development of Openstack-Helm to be an easy experience. Please evaluate, and make recommendations. We want developers to feel welcomed to contribute to this project. Below are some instructions and suggestions to help you get started.
Community development is extremely important to us. As an open source development team, we want the development of Openstack-Helm to be an easy experience. Please evaluate, and make recommendations. We want developers to feel welcome to contribute to this project. Below are some instructions and suggestions to help you get started.
# Requirements
We've tried to minimize the amount of prerequisites required in order to get started. The main prerequisite is to install the most recent versions of Minikube and Helm.
We've tried to minimize the number of prerequisites required in order to get started. The main prerequisite is to install the most recent versions of Minikube and Helm.
**Kubernetes Minikube:**
Ensure that you have installed a recent version of [Kubernetes/Minikube](http://kubernetes.io/docs/getting-started-guides/minikube/).
@ -28,7 +28,8 @@ After installation, start Minikube with the flags listed below. Ensure that you
$ minikube start \
--network-plugin=cni \
--kubernetes-version v1.5.1 \
--disk-size 40g
--disk-size 40g \
--memory 4048
```
Next, deploy the [Calico](http://docs.projectcalico.org/master/getting-started/kubernetes/installation/hosted/hosted) manifest. This is not a requirement in cases where you want to use your own CNI-enabled SDN, however you are doing so at your own experience. Note which versions of Calico are recommended for the project in our [Installation Guide](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md#overview).
@ -74,7 +75,7 @@ kube-system tiller-deploy-3299276078-n98ct 1/1 Running 0
With Helm installed, you will need to start a local [Helm server](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_serve.md#helm-serve) (in the background), and point to a locally configured Helm [repository](https://github.com/kubernetes/helm/blob/7a15ad381eae794a36494084972e350306e498fd/docs/helm/helm_repo_index.md#helm-repo-index):
```
$ helm serve . &
$ helm serve &
$ helm repo add local http://localhost:8879/charts
"local" has been added to your repositories
```
@ -106,13 +107,13 @@ Perfect! Youre ready to install, develop, deploy, destroy, and repeat (when n
# Installation and Testing
After following the instructions above you're environment is in a state where you can enhance the current charts, or develop new charts for the project. If you need to make changes to a chart, simply re-run `make` against the project in the top-tier directory. The charts will be updated and automatically re-pushed to your local repository.
After following the instructions above your environment is in a state where you can enhance the current charts, or develop new charts for the project. If you need to make changes to a chart, simply re-run `make` against the project in the top-tier directory. The charts will be updated and automatically re-pushed to your local repository.
Consider the following when using Minikube and development mode:
* Persistent Storage used for Minikube development mode is `hostPath`. The Ceph PVC's included with this project are not intended to work with Minikube.
* There is *no need* to install the `common` `ceph` or `bootstrap` charts. These charts are required for deploying Ceph PVC's.
* Familiarize yourself wtih `values.yaml` included wtih the MariaDB chart. You will will want to have the `hostPath` directory created prior to deploying MariaDB.
* Familiarize yourself with `values.yaml` included with the MariaDB chart. You will want to have the `hostPath` directory created prior to deploying MariaDB.
* If Ceph development is required, you will need to follow the [getting started guide](https://github.com/att-comdev/openstack-helm/blob/master/docs/installation/getting-started.md) rather than this development mode documentation.
To deploy Openstack-Helm in development mode, ensure you've created a minikube-approved `hostPath` volume. Minikube is very specific about what is expected for `hostPath` volumes. The following volumes are acceptable for minikube deployments:
@ -166,7 +167,7 @@ $ helm install --name=neutron local/neutron --namespace=openstack
# Horizon Management
After each of the chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually.
After each chart is deployed, you may wish to change the typical service endpoint for Horizon to a `nodePort` service endpoint (this is unique to Minikube deployments). Use the `kubectl edit` command to edit this service manually.
```
$ sudo kubectl edit svc horizon -n openstack
@ -200,7 +201,7 @@ status:
```
**Accessing Horizon:**<br>
*Now you're ready to manage Openstack! Point your browser to the following:*<br>
*Now you're ready to manage OpenStack! Point your browser to the following:*<br>
***URL:*** *http://192.168.99.100:31537/* <br>
***User:*** *admin* <br>
***Pass:*** *password* <br>
@ -209,7 +210,7 @@ If you have any questions, comments, or find any bugs, please submit an issue so
# Troubleshooting
In order to protect your general sanity, we've included a currated list of verification and troubleshooting steps that may help you avoid some potential issues while developing Openstack-Helm.
In order to protect your general sanity, we've included a curated list of verification and troubleshooting steps that may help you avoid some potential issues while developing Openstack-Helm.
**MariaDB**<br>
To verify the state of MariaDB, use the following command:

@ -1,5 +0,0 @@
{{- define "joinListWithColon" -}}
{{ range $k, $v := . }}{{ if $k }},{{ end }}{{ $v }}{{ end }}
{{- end -}}
{{ define "keystone_auth" }}{'auth_url':'{{ .Values.keystone.auth_url }}', 'username':'{{ .Values.keystone.admin_user }}','password':'{{ .Values.keystone.admin_password }}','project_name':'{{ .Values.keystone.admin_project_name }}','domain_name':'default'}{{end}}

@ -5,5 +5,9 @@ metadata:
data:
ceph.client.{{ .Values.ceph.glance_user }}.keyring: |+
[client.{{ .Values.ceph.glance_user }}]
{{- if .Values.ceph.glance_keyring }}
key = {{ .Values.ceph.glance_keyring }}
{{- else }}
key = {{- include "secrets/ceph-client-key" . -}}
{{- end }}

@ -7,12 +7,17 @@ data:
[global]
rgw_thread_pool_size = 1024
rgw_num_rados_handles = 100
{{- if .Values.ceph.monitors }}
[mon]
{{ range .Values.ceph.monitors }}
[mon.{{ . }}]
host = {{ . }}
mon_addr = {{ . }}
{{ end }}
{{- else }}
mon_host = ceph-mon.ceph
{{- end }}
[client]
rbd_cache_enabled = true
rbd_cache_writethrough_until_flush = true

@ -12,7 +12,7 @@ data:
bind_port = {{ .Values.network.port.api }}
workers = {{ .Values.misc.workers }}
registry_host = glance-registry
registry_host = {{ include "glance_registry_host" . }}
# Enable Copy-on-Write
show_image_direct_url = True
@ -45,3 +45,4 @@ data:
rbd_store_user = {{ .Values.ceph.glance_user }}
rbd_store_ceph_conf = /etc/ceph/ceph.conf
rbd_store_chunk_size = 8

@ -6,12 +6,13 @@ data:
post.sh: |+
#!/bin/bash
set -ex
export HOME=/tmp
ansible localhost -vvv -m kolla_keystone_service -a "service_name=glance \
service_type=image \
description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \
url='{{ include "endpoint_glance_api_internal" . }}' \
interface=admin \
region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \
@ -21,7 +22,7 @@ data:
service_type=image \
description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \
url='{{ include "endpoint_glance_api_internal" . }}' \
interface=internal \
region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \
@ -31,7 +32,7 @@ data:
service_type=image \
description='Openstack Image' \
endpoint_region='{{ .Values.keystone.glance_region_name }}' \
url='http://glance-api:{{ .Values.network.port.api }}' \
url='{{ include "endpoint_glance_api_internal" . }}' \
interface=public \
region_name='{{ .Values.keystone.admin_region_name }}' \
auth='{{ include "keystone_auth" . }}'" \
@ -44,3 +45,4 @@ data:
region_name={{ .Values.keystone.admin_region_name }} \
auth='{{ include "keystone_auth" . }}'" \
-e "{ 'openstack_glance_auth': {{ include "keystone_auth" . }} }"

@ -32,6 +32,8 @@ spec:
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
restartPolicy: OnFailure
containers:
- name: glance-post
@ -51,3 +53,4 @@ spec:
- name: postsh
configMap:
name: glance-postsh

@ -33,7 +33,7 @@ network:
port:
api: 9292
registry: 9191
ip_address: "{{ .IP }}"
ip_address: "0.0.0.0"
database:
address: mariadb
@ -47,9 +47,12 @@ database:
ceph:
enabled: true
monitors: []
glance_user: "glance"
glance_user: "admin"
glance_pool: "images"
glance_keyring: ""
# a null value for the keyring will
# attempt to use the key from
# common/secrets/ceph-client-key
glance_keyring: null
misc:
workers: 8
@ -97,4 +100,28 @@ dependencies:
- mariadb
- keystone-api
- glance-api
- glance-registry
- glance-registry
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
glance:
hosts:
default: glance-api
type: image
path: null
scheme: 'http'
port:
api: 9292
registry: 9191
keystone:
hosts:
default: keystone-api
path: /v3
type: identity
scheme: 'http'
port:
admin: 35357
public: 5000

@ -38,6 +38,6 @@ endpoints:
type: identity
scheme: 'http'
port:
admin: 35356
admin: 35357
public: 5000

@ -15,7 +15,8 @@ set -ex
keystone-manage db_sync
kolla_keystone_bootstrap {{ .Values.keystone.admin_user }} {{ .Values.keystone.admin_password }} \
{{ .Values.keystone.admin_project_name }} admin \
{{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_admin" . }}:{{ .Values.network.port.admin }}/{{ .Values.keystone.version }} \
{{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_internal" . }}:{{ .Values.network.port.public }}/{{ .Values.keystone.version }} \
{{ .Values.keystone.scheme }}://{{ include "keystone_api_endpoint_host_public" . }}:{{ .Values.network.port.public }}/{{ .Values.keystone.version }} \
{{ include "endpoint_keystone_admin" . }} \
{{ include "endpoint_keystone_internal" . }} \
{{ include "endpoint_keystone_internal" . }} \
{{ .Values.keystone.admin_region_name }}

@ -9,11 +9,11 @@ connection = mysql+pymysql://{{ .Values.database.keystone_user }}:{{ .Values.dat
max_retries = -1
[memcache]
servers = {{ include "memcached_host" . }}
servers = {{ include "memcached_host" . }}:11211
[cache]
backend = dogpile.cache.memcached
memcache_servers = {{ include "memcached_host" . }}
memcache_servers = {{ include "memcached_host" . }}:11211
config_prefix = cache.keystone
distributed_lock = True
enabled = True

@ -17,7 +17,7 @@ images:
pull_policy: "IfNotPresent"
keystone:
version: v2.0
version: v3
scheme: http
admin_region_name: RegionOne
admin_user: admin
@ -67,3 +67,18 @@ dependencies:
- mariadb-seed
service:
- mariadb
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
keystone:
hosts:
default: keystone-api
path: /v3
type: identity
scheme: 'http'
port:
admin: 35357
public: 5000

4
maas/requirements.yaml Normal file

@ -0,0 +1,4 @@
dependencies:
- name: common
repository: http://localhost:8879/charts
version: 0.1.0

@ -0,0 +1,14 @@
#!/bin/bash
set -ex
if ! find "/etc/postgresql" -mindepth 1 -print -quit | grep -q .; then
pg_createcluster 9.5 main
fi
cp -r /etc/postgresql/9.5/main/*.conf /var/lib/postgresql/9.5/main/
pg_ctlcluster 9.5 main start
echo 'running postinst'
chmod 755 /var/lib/dpkg/info/maas-region-controller.postinst
/bin/sh /var/lib/dpkg/info/maas-region-controller.postinst configure

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: maas-region-bin
data:
start.sh: |
{{ tuple "bin/_start.sh.tpl" . | include "template" | indent 4 }}

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: maas-region-etc
data:
named.conf.options: |+
{{ tuple "etc/_region-dns-config.tpl" . | include "template" | indent 4 }}

@ -0,0 +1,10 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: maas-region-var
data:
maas-region-controller.postinst: |
{{ tuple "var/_maas-region-controller.postinst.tpl" . | include "template" | indent 4 }}
secret: |
{{ tuple "var/_secret.tpl" . | include "template" | indent 4 }}

@ -1,12 +1,55 @@
apiVersion: extensions/v1beta1
kind: Deployment
apiVersion: apps/v1beta1
kind: StatefulSet
metadata:
name: maas-region
spec:
serviceName: "{{ .Values.service_name }}"
template:
metadata:
labels:
app: maas-region
annotations:
pod.beta.kubernetes.io/init-containers: '[
{
"name": "init",
"image": "{{ .Values.images.maas_region }}",
"imagePullPolicy": "Always",
"command": [
"/bin/bash", "-c"
],
"args": [
"chmod +x /tmp/start.sh; /tmp/start.sh"
],
"volumeMounts": [
{
"name": "maas-config",
"mountPath": "/etc/maas/"
},
{
"name": "postgresql-config",
"mountPath": "/etc/postgresql"
},
{
"name": "postgresql-data",
"mountPath": "/var/lib/postgresql"
},
{
"name": "postgresql-run",
"mountPath": "/var/run/postgresql"
},
{
"name": "startsh",
"mountPath": "/tmp/start.sh",
"subPath": "start.sh"
},
{
"name": "maasregionpostinst",
"mountPath": "/var/lib/dpkg/info/maas-region-controller.postinst",
"subPath": "maas-region-controller.postinst"
}
]
}
]'
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
@ -18,3 +61,45 @@ spec:
- containerPort: {{ .Values.network.port.region_container }}
securityContext:
privileged: true
volumeMounts:
- name: postgresql-data
mountPath: /var/lib/postgresql
- name: postgresql-run
mountPath: /var/run/postgresql
- name: maas-lib
mountPath: /var/lib/maas
- name: maas-region-secret
mountPath: /var/lib/maas/secret
subPath: secret
- name: postgresql-config
mountPath: /etc/postgresql
- name: maas-dns-config
mountPath: /etc/bind/named.conf.options
subPath: named.conf.options
- name: maas-config
mountPath: /etc/maas/regiond.conf
subPath: regiond.conf
volumes:
- name: postgresql-data
hostPath:
path: /var/lib/postgresql
- name: postgresql-run
emptyDir: {}
- name: postgresql-config
emptyDir: {}
- name: maas-lib
emptyDir: {}
- name: maas-region-secret
configMap:
name: maas-region-var
- name: maas-config
emptyDir: {}
- name: maas-dns-config
configMap:
name: maas-region-etc
- name: startsh
configMap:
name: maas-region-bin
- name: maasregionpostinst
configMap:
name: maas-region-var

@ -0,0 +1,4 @@
options { directory "/var/cache/bind";
auth-nxdomain no;
listen-on-v6 { any; };
include "/etc/bind/maas/named.conf.options.inside.maas"; };

@ -1,10 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: maas-region-ui
name: {{ .Values.service_name }}
labels:
app: maas-region-ui
spec:
type: NodePort
ports:
- port: {{ .Values.network.port.service_gui }}
targetPort: {{ .Values.network.port.service_gui_target }}

@ -0,0 +1,149 @@
#!/bin/sh
set -ex
. /usr/share/debconf/confmodule
db_version 2.0
if [ -f /usr/share/dbconfig-common/dpkg/postinst.pgsql ]; then
. /usr/share/dbconfig-common/dpkg/postinst.pgsql
fi
RELEASE=`lsb_release -rs` || RELEASE=""
maas_sync_migrate_db(){
maas-region dbupgrade
}
restart_postgresql(){
invoke-rc.d --force postgresql restart || true
}
configure_maas_default_url() {
local ipaddr="$1"
# The given address is either "[IPv6_IP]" or "IPv4_IP" or "name", such as
# [2001:db8::3:1]:5555 or 127.0.0.1 or maas.example.com.
# The ugly sed splits the given thing as:
# (string of anything but ":", or [ipv6_ip]),
# optionally followed by :port.
local address=$(echo "$ipaddr" |
sed -rn 's/^([^:]*|\[[0-9a-fA-F:]*\])(|:[0-9]*)?$/\1/p')
local port=$(echo "$ipaddr" |
sed -rn 's/^([^:]*|\[[0-9a-fA-F:]*\])(|:[0-9]*)?$/\2/p')
test -n "$port" || port=":80"
ipaddr="${ipaddr}${port}"
maas-region local_config_set --maas-url "http://${ipaddr}/MAAS"
}
get_default_route_ip6() {
while read Src SrcPref Dest DestPref Gateway Metric RefCnt Use Flags Iface
do
[ "$SrcPref" = 00 ] && [ "$Iface" != lo ] && break
done < /proc/net/ipv6_route
if [ -n "$Iface" ]; then
LC_ALL=C /sbin/ip -6 addr list dev "$Iface" scope global permanent |
sed -n '/ inet6 /s/.*inet6 \([0-9a-fA-F:]*\).*/[\1]/p' | head -1
fi
}
get_default_route_ip4() {
while read Iface Destination Gateway Flags RefCnt Use Metric Mask MTU Window IRTT
do
[ "$Mask" = "00000000" ] && break
done < /proc/net/route
if [ -n "$Iface" ]; then
ipaddr=$(LC_ALL=C /sbin/ip -4 addr list dev "$Iface" scope global)
ipaddr=${ipaddr#* inet }
ipaddr=${ipaddr%%/*}
echo $ipaddr
fi
}
extract_default_maas_url() {
# Extract DEFAULT_MAAS_URL IP/host setting from config file $1.
grep "^DEFAULT_MAAS_URL" "$1" | cut -d"/" -f3
}
configure_migrate_maas_dns() {
# This only runs on upgrade. We only run this if the
# there are forwarders to migrate or no
# named.conf.options.inside.maas are present.
maas-region edit_named_options \
--migrate-conflicting-options --config-path \
/etc/bind/named.conf.options
invoke-rc.d bind9 restart || true
}
if [ "$1" = "configure" ] && [ -z "$2" ]; then
#########################################################
########## Configure DEFAULT_MAAS_URL #################
#########################################################
# Obtain IP address of default route and change DEFAULT_MAAS_URL
# if default-maas-url has not been preseeded. Prefer ipv4 addresses if
# present, and use "localhost" only if there is no default route in either
# address family.
db_get maas/default-maas-url
ipaddr="$RET"
if [ -z "$ipaddr" ]; then
#ipaddr=$(get_default_route_ip4)
ipaddr="maas-region-ui.{{ .Release.Namespace }}"
fi
if [ -z "$ipaddr" ]; then
#ipaddr=$(get_default_route_ip6)
ipaddr="maas-region-ui.{{ .Release.Namespace }}"
fi
# Fallback default is "localhost"
if [ -z "$ipaddr" ]; then
ipaddr=localhost
fi
# Set the IP address of the interface with default route
configure_maas_default_url "$ipaddr"
db_subst maas/installation-note MAAS_URL "$ipaddr"
db_set maas/default-maas-url "$ipaddr"
#########################################################
################ Configure Database ###################
#########################################################
# Need to for postgresql start so it doesn't fail on the installer
restart_postgresql
# Create the database
dbc_go maas-region-controller $@
maas-region local_config_set \
--database-host "localhost" --database-name "$dbc_dbname" \
--database-user "$dbc_dbuser" --database-pass "$dbc_dbpass"
# Only syncdb if we have selected to install it with dbconfig-common.
db_get maas-region-controller/dbconfig-install
if [ "$RET" = "true" ]; then
maas_sync_migrate_db
configure_migrate_maas_dns
fi
db_get maas/username
username="$RET"
if [ -n "$username" ]; then
db_get maas/password
password="$RET"
if [ -n "$password" ]; then
maas-region createadmin --username "$username" --password "$password" --email "$username@maas"
fi
fi
# Display installation note
db_input low maas/installation-note || true
db_go
fi
systemctl enable maas-regiond >/dev/null || true
systemctl restart maas-regiond >/dev/null || true
invoke-rc.d apache2 restart || true
if [ -f /lib/systemd/system/maas-rackd.service ]; then
systemctl restart maas-rackd >/dev/null || true
fi
db_stop

@ -0,0 +1 @@
3858f62230ac3c915f300c664312c63f

@ -3,8 +3,8 @@
# Declare variables to be passed into your templates.
images:
maas_region: quay.io/attcomdev/maas-region:1.0.1
maas_rack: quay.io/attcomdev/maas-rack:1.0.1
maas_region: quay.io/attcomdev/maas-region:2.1.2-1
maas_rack: quay.io/attcomdev/maas-rack:2.1.2
labels:
node_selector_key: openstack-control-plane
@ -17,3 +17,5 @@ network:
service_gui_target: 80
service_proxy: 8000
service_proxy_target: 8000
service_name: maas-region-ui

@ -3,7 +3,7 @@ kind: Deployment
metadata:
name: memcached
spec:
replicas: 1
replicas: {{ .Values.resources.memcached.replicas }}
template:
metadata:
labels:
@ -14,20 +14,15 @@ spec:
containers:
- name: memcached
image: {{ .Values.images.memcached }}
imagePullPolicy: Always
env:
- name: INTERFACE_NAME
value: "eth0"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "memcached -v -p {{ .Values.network.port }} -U 0 -c 8192 -m 1024"
imagePullPolicy: {{ .Values.images.pull_policy }}
command: ["sh", "-xec"]
args:
- |
exec memcached -v \
-p {{ .Values.network.port }} \
-U 0 \
-c {{ .Values.memcached.max_connections }} \
-m {{ .Values.memcached.memory }};
ports:
- containerPort: {{ .Values.network.port }}
readinessProbe:

@ -5,6 +5,7 @@
images:
memcached: quay.io/stackanetes/stackanetes-memcached:newton
pull_policy: "IfNotPresent"
labels:
node_selector_key: openstack-control-plane
@ -12,3 +13,11 @@ labels:
network:
port: 11211
memcached:
memory: 1024
max_connections: 8192
resources:
memcached:
replicas: 1

3
neutron/Chart.yaml Normal file

@ -0,0 +1,3 @@
description: A Helm chart for neutron
name: neutron
version: 0.1.0

@ -0,0 +1,4 @@
dependencies:
- name: common
repository: http://localhost:8879/charts
version: 0.1.0

@ -0,0 +1,18 @@
#!/bin/bash
set -ex
export HOME=/tmp
ansible localhost -vvv -m mysql_db -a "login_host='{{ include "neutron_db_host" . }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.neutron_database_name }}'"
ansible localhost -vvv -m mysql_user -a "login_host='{{ include "neutron_db_host" . }}' \
login_port='{{ .Values.database.port }}' \
login_user='{{ .Values.database.root_user }}' \
login_password='{{ .Values.database.root_password }}' \
name='{{ .Values.database.neutron_user }}' \
password='{{ .Values.database.neutron_password }}' \
host='%' \
priv='{{ .Values.database.neutron_database_name }}.*:ALL' append_privs='yes'"

@ -0,0 +1,16 @@
#!/bin/bash
set -x
chown neutron: /run/openvswitch/db.sock
# determine local-ip dynamically based on interface provided but only if tunnel_types is not null
{{- if .Values.ml2.agent.tunnel_types }}
IP=$(ip a s {{ .Values.network.interface.tunnel | default .Values.network.interface.default}} | grep 'inet ' | awk '{print $2}' | awk -F "/" '{print $1}')
cat <<EOF>/tmp/ml2-local-ip.ini
[ovs]
local_ip = $IP
EOF
{{- else }}
touch /tmp/ml2-local-ip.ini
{{- end }}
exec sudo -E -u neutron neutron-openvswitch-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2-conf.ini --config-file /tmp/ml2-local-ip.ini

@ -0,0 +1,10 @@
#!/bin/bash
set -ex
mkdir -p "/run/openvswitch"
if [[ ! -e "/run/openvswitch/conf.db" ]]; then
ovsdb-tool create "/run/openvswitch/conf.db"
fi
umask 000
exec /usr/sbin/ovsdb-server /run/openvswitch/conf.db -vconsole:emer -vconsole:err -vconsole:info --remote=punix:/run/openvswitch/db.sock

@ -0,0 +1,17 @@
#!/bin/bash
set -x
bridge=$1
port=$2
# note that only "br-ex" is definable right now
# and br-int and br-tun are assumed and handled
# by the agent
ovs-vsctl --no-wait --may-exist add-br $bridge
ovs-vsctl --no-wait --may-exist add-port $bridge $port
# handle any bridge mappings
{{- range $bridge, $port := .Values.ml2.ovs.auto_bridge_add }}
ovs-vsctl --no-wait --may-exist add-br {{ $bridge }}
ovs-vsctl --no-wait --may-exist add-port {{ $bridge }} {{ $port }}
{{- end}}

@ -0,0 +1,14 @@
#!/bin/bash
set -ex
# load tunnel kernel modules we may use and gre/vxlan
modprobe openvswitch
{{- if .Values.ml2.agent.tunnel_types }}
modprobe gre
modprobe vxlan
{{- end }}
ovs-vsctl --no-wait show
bash /tmp/openvswitch-ensure-configured.sh {{ .Values.network.external_bridge }} {{ .Values.network.interface.external | default .Values.network.interface.default }}
exec /usr/sbin/ovs-vswitchd unix:/run/openvswitch/db.sock --mlockall -vconsole:emer -vconsole:err -vconsole:info

@ -0,0 +1,41 @@
#!/bin/bash
set -ex
export HOME=/tmp
ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \
service_type=network \
description='Openstack Networking' \
endpoint_region={{ .Values.keystone.neutron_region_name }} \
url='{{ include "endpoint_neutron_api_internal" . }}' \
interface=admin \
region_name={{ .Values.keystone.admin_region_name }} \
auth='{{ include "keystone_auth" .}}'" \
-e "{'openstack_neutron_auth':{{ include "keystone_auth" .}}}"
ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \
service_type=network \
description='Openstack Networking' \
endpoint_region={{ .Values.keystone.neutron_region_name }} \
url='{{ include "endpoint_neutron_api_internal" . }}' \
interface=internal \
region_name={{ .Values.keystone.admin_region_name }} \
auth='{{ include "keystone_auth" .}}'" \
-e "{'openstack_neutron_auth':{{ include "keystone_auth" .}}}"
ansible localhost -vvv -m kolla_keystone_service -a "service_name=neutron \
service_type=network \
description='Openstack Networking' \
endpoint_region={{ .Values.keystone.neutron_region_name }} \
url='{{ include "endpoint_neutron_api_internal" . }}' \
interface=public \
region_name={{ .Values.keystone.admin_region_name }} \
auth='{{ include "keystone_auth" .}}'" \
-e "{'openstack_neutron_auth':{{ include "keystone_auth" .}}}"
ansible localhost -vvv -m kolla_keystone_user -a "project=service \
user={{ .Values.keystone.neutron_user }} \
password={{ .Values.keystone.neutron_password }} \
role=admin \
region_name={{ .Values.keystone.neutron_region_name }} \
auth='{{ include "keystone_auth" .}}'" \
-e "{'openstack_neutron_auth':{{ include "keystone_auth" .}}}"

@ -0,0 +1,17 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: neutron-bin
data:
init.sh: |
{{ tuple "bin/_init.sh.tpl" . | include "template" | indent 4 }}
neutron-openvswitch-agent.sh: |
{{ tuple "bin/_neutron-openvswitch-agent.sh.tpl" . | include "template" | indent 4 }}
openvswitch-db-server.sh: |
{{ tuple "bin/_openvswitch-db-server.sh.tpl" . | include "template" | indent 4 }}
openvswitch-ensure-configured.sh: |
{{ tuple "bin/_openvswitch-ensure-configured.sh.tpl" . | include "template" | indent 4 }}
openvswitch-vswitchd.sh: |
{{ tuple "bin/_openvswitch-vswitchd.sh.tpl" . | include "template" | indent 4 }}
post.sh: |
{{ tuple "bin/_post.sh.tpl" . | include "template" | indent 4 }}

@ -0,0 +1,19 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: neutron-etc
data:
dhcp-agent.ini: |
{{ tuple "etc/_dhcp-agent.ini.tpl" . | include "template" | indent 4 }}
l3-agent.ini: |
{{ tuple "etc/_l3-agent.ini.tpl" . | include "template" | indent 4 }}
metadata-agent.ini: |
{{ tuple "etc/_metadata-agent.ini.tpl" . | include "template" | indent 4 }}
ml2-conf.ini: |
{{ tuple "etc/_ml2-conf.ini.tpl" . | include "template" | indent 4 }}
neutron.conf: |
{{ tuple "etc/_neutron.conf.tpl" . | include "template" | indent 4 }}
resolv.conf: |
{{ tuple "etc/_resolv.conf.tpl" . | include "template" | indent 4 }}
dnsmasq.conf: ""

@ -0,0 +1,83 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: neutron-dhcp-agent
spec:
template:
metadata:
labels:
app: neutron-dhcp-agent
spec:
nodeSelector:
{{ .Values.labels.agent.dhcp.node_selector_key }}: {{ .Values.labels.agent.dhcp.node_selector_value }}
securityContext:
runAsUser: 0
dnsPolicy: ClusterFirst
hostNetwork: true
containers:
- name: neutron-dhcp-agent
image: {{ .Values.images.dhcp }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.dhcp | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "neutron-dhcp-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/dhcp-agent.ini"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.dhcp.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.dhcp.service }}"
- name: DEPENDENCY_DAEMONSET
value: "{{ include "joinListWithColon" .Values.dependencies.dhcp.daemonset }}"
volumeMounts:
- name: neutronconf
mountPath: /etc/neutron/neutron.conf
subPath: neutron.conf
- name: ml2confini
mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini
subPath: ml2-conf.ini
- name: dhcpagentini
mountPath: /etc/neutron/dhcp-agent.ini
subPath: dhcp-agent.ini
- name: dnsmasqconf
mountPath: /etc/neutron/dnsmasq.conf
subPath: dnsmasq.conf
- name: runopenvswitch
mountPath: /run/openvswitch
- name: socket
mountPath: /var/lib/neutron/openstack-helm
- name: resolvconf
mountPath: /etc/resolv.conf
subPath: resolv.conf
volumes:
- name: neutronconf
configMap:
name: neutron-etc
- name: ml2confini
configMap:
name: neutron-etc
- name: dhcpagentini
configMap:
name: neutron-etc
- name: dnsmasqconf
configMap:
name: neutron-etc
- name: runopenvswitch
hostPath:
path: /run/openvswitch
- name: resolvconf
configMap:
name: neutron-etc
- name: socket
hostPath:
path: /var/lib/neutron/openstack-helm

@ -0,0 +1,77 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: neutron-l3-agent
spec:
template:
metadata:
labels:
app: neutron-l3-agent
spec:
nodeSelector:
{{ .Values.labels.agent.l3.node_selector_key }}: {{ .Values.labels.agent.l3.node_selector_value }}
securityContext:
runAsUser: 0
dnsPolicy: ClusterFirst
hostNetwork: true
containers:
- name: neutron-l3-agent
image: {{ .Values.images.l3 }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.l3 | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "neutron-l3-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/l3-agent.ini --config-file /etc/neutron/plugins/ml2/ml2-conf.ini"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.l3.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.l3.service }}"
- name: DEPENDENCY_DAEMONSET
value: "{{ include "joinListWithColon" .Values.dependencies.l3.daemonset }}"
volumeMounts:
- name: neutronconf
mountPath: /etc/neutron/neutron.conf
subPath: neutron.conf
- name: ml2confini
mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini
subPath: ml2-conf.ini
- name: l3agentini
mountPath: /etc/neutron/l3-agent.ini
subPath: l3-agent.ini
- name: resolvconf
mountPath: /etc/resolv.conf
subPath: resolv.conf
- name: runopenvswitch
mountPath: /run/openvswitch
- name: socket
mountPath: /var/lib/neutron/stackanetes
volumes:
- name: neutronconf
configMap:
name: neutron-etc
- name: ml2confini
configMap:
name: neutron-etc
- name: resolvconf
configMap:
name: neutron-etc
- name: l3agentini
configMap:
name: neutron-etc
- name: runopenvswitch
hostPath:
path: /run/openvswitch
- name: socket
hostPath:
path: /var/lib/neutron/stackanetes

@ -0,0 +1,79 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: neutron-metadata-agent
spec:
template:
metadata:
labels:
app: neutron-metadata-agent
spec:
nodeSelector:
{{ .Values.labels.agent.metadata.node_selector_key }}: {{ .Values.labels.agent.metadata.node_selector_value }}
securityContext:
runAsUser: 0
dnsPolicy: ClusterFirst
hostNetwork: true
containers:
- name: neutron-metadata-agent
image: {{ .Values.images.metadata }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.metadata | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "neutron-metadata-agent --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/metadata-agent.ini"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.metadata.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.metadata.service }}"
- name: DEPENDENCY_DAEMONSET
value: "{{ include "joinListWithColon" .Values.dependencies.metadata.daemonset }}"
ports:
- containerPort: {{ .Values.network.port.metadata }}
volumeMounts:
- name: neutronconf
mountPath: /etc/neutron/neutron.conf
subPath: neutron.conf
- name: ml2confini
mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini
subPath: ml2-conf.ini
- name: metadataagentini
mountPath: /etc/neutron/metadata-agent.ini
subPath: metadata-agent.ini
- name: resolvconf
mountPath: /etc/resolv.conf
subPath: resolv.conf
- name: runopenvswitch
mountPath: /run/openvswitch
- name: socket
mountPath: /var/lib/neutron/stackanetes
volumes:
- name: neutronconf
configMap:
name: neutron-etc
- name: ml2confini
configMap:
name: neutron-etc
- name: metadataagentini
configMap:
name: neutron-etc
- name: resolvconf
configMap:
name: neutron-etc
- name: runopenvswitch
hostPath:
path: /run/openvswitch
- name: socket
hostPath:
path: /var/lib/neutron/openstack-helm

@ -0,0 +1,166 @@
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: neutron-openvswitch
spec:
template:
metadata:
labels:
app: neutron-openvswitch
spec:
nodeSelector:
{{ .Values.labels.ovs.node_selector_key }}: {{ .Values.labels.ovs.node_selector_value }}
securityContext:
runAsUser: 0
dnsPolicy: ClusterFirst
hostNetwork: true
containers:
- name: neutron-openvswitch-agent
image: {{ .Values.images.neutron_openvswitch_agent }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
# ensures this container can can see a br-int
# bridge before its marked as ready
readinessProbe:
exec:
command:
- bash
- -c
- 'ovs-vsctl list-br | grep -q br-int'
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/neutron-openvswitch-agent.sh"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.openvswitchagent.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.openvswitchagent.service }}"
- name: DEPENDENCY_CONTAINER
value: "{{ include "joinListWithColon" .Values.dependencies.openvswitchagent.container }}"
volumeMounts:
- name: neutronopenvswitchagentsh
mountPath: /tmp/neutron-openvswitch-agent.sh
subPath: neutron-openvswitch-agent.sh
- name: neutronconf
mountPath: /etc/neutron/neutron.conf
subPath: neutron.conf
- name: ml2confini
mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini
subPath: ml2-conf.ini
- name: libmodules
mountPath: /lib/modules
readOnly: true
- name: run
mountPath: /run
- mountPath: /etc/resolv.conf
name: resolvconf
subPath: resolv.conf
- name: openvswitch-db-server
image: {{ .Values.images.openvswitch_db_server }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/openvswitch-db-server.sh"
volumeMounts:
- name: openvswitchdbserversh
mountPath: /tmp/openvswitch-db-server.sh
subPath: openvswitch-db-server.sh
- mountPath: /etc/resolv.conf
name: resolvconf
subPath: resolv.conf
- name: varlibopenvswitch
mountPath: /var/lib/openvswitch/
- name: run
mountPath: /run
- name: openvswitch-vswitchd
image: {{ .Values.images.openvswitch_vswitchd }}
imagePullPolicy: {{ .Values.images.pull_policy }}
securityContext:
privileged: true
# ensures this container can speak to the ovs database
# successfully before its marked as ready
readinessProbe:
exec:
command:
- /usr/bin/ovs-vsctl
- show
env:
- name: INTERFACE_NAME
value: {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/openvswitch-vswitchd.sh"
- name: DEPENDENCY_CONTAINER
value: "openvswitch-db-server"
volumeMounts:
- name: openvswitchvswitchdsh
mountPath: /tmp/openvswitch-vswitchd.sh
subPath: openvswitch-vswitchd.sh
- name: openvswitchensureconfiguredsh
mountPath: /tmp/openvswitch-ensure-configured.sh
subPath: openvswitch-ensure-configured.sh
- name: libmodules
mountPath: /lib/modules
readOnly: true
- name: run
mountPath: /run
volumes:
- name: openvswitchdbserversh
configMap:
name: neutron-bin
- name: openvswitchvswitchdsh
configMap:
name: neutron-bin
- name: openvswitchensureconfiguredsh
configMap:
name: neutron-bin
- name: varlibopenvswitch
emptyDir: {}
- name: neutronopenvswitchagentsh
configMap:
name: neutron-bin
- name: neutronconf
configMap:
name: neutron-etc
- name: ml2confini
configMap:
name: neutron-etc
- name: resolvconf
configMap:
name: neutron-etc
- name: libmodules
hostPath:
path: /lib/modules
- name: run
hostPath:
path: /run

@ -0,0 +1,53 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: neutron-server
spec:
replicas: {{ .Values.replicas.server }}
template:
metadata:
labels:
app: neutron-server
spec:
nodeSelector:
{{ .Values.labels.server.node_selector_key }}: {{ .Values.labels.server.node_selector_value }}
containers:
- name: neutron-server
image: {{ .Values.images.server }}
imagePullPolicy: {{ .Values.images.pull_policy }}
env:
- name: INTERFACE_NAME
value: "eth0"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "neutron-server --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2-conf.ini"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.server.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.server.service }}"
ports:
- containerPort: {{ .Values.network.port.server }}
readinessProbe:
tcpSocket:
port: {{ .Values.network.port.server }}
volumeMounts:
- name: neutronconf
mountPath: /etc/neutron/neutron.conf
subPath: neutron.conf
- name: ml2confini
mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini
subPath: ml2-conf.ini
volumes:
- name: neutronconf
configMap:
name: neutron-etc
- name: ml2confini
configMap:
name: neutron-etc

@ -0,0 +1,5 @@
[DEFAULT]
dnsmasq_config_file = /etc/neutron/dnsmasq.conf
enable_isolated_metadata = true
force_metadata = true
interface_driver = openvswitch

@ -0,0 +1,4 @@
[DEFAULT]
agent_mode = legacy
enable_metadata_proxy = True
enable_isolated_metadata = True

@ -0,0 +1,31 @@
[DEFAULT]
debug = {{ .Values.metadata_agent.default.debug }}
# Neutron credentials for API access
auth_plugin = password
auth_url = {{ include "endpoint_keystone_admin" . }}
auth_uri = {{ include "endpoint_keystone_internal" . }}
auth_region = {{ .Values.keystone.neutron_region_name }}
admin_tenant_name = service
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ .Values.keystone.admin_user }}
password = {{ .Values.keystone.admin_password }}
endpoint_type = adminURL
# Nova metadata service IP and port
nova_metadata_ip = {{ include "nova_metadata_host" . }}
nova_metadata_port = {{ .Values.network.port.metadata }}
nova_metadata_protocol = http
# Metadata proxy shared secret
metadata_proxy_shared_secret = {{ .Values.neutron.metadata_secret }}
metadata_port = {{ .Values.network.port.metadata }}
# Workers and backlog requests
metadata_workers = {{ .Values.metadata.workers }}
# Caching
cache_url = memory://?default_ttl=5

@ -0,0 +1,43 @@
[ml2]
# Changing type_drivers after bootstrap can lead to database inconsistencies
type_drivers = {{ include "joinListWithColon" .Values.ml2.type_drivers }}
tenant_network_types = {{ .Values.ml2.tenant_network_types }}
mechanism_drivers = {{ include "joinListWithColon" .Values.ml2.mechanism_drivers }}
[ml2_type_flat]
flat_networks = {{ include "joinListWithColon" .Values.ml2.ml2_type_flat.flat_networks }}
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
# of GRE tunnel IDs that are available for tenant network allocation
tunnel_id_ranges = {{ .Values.ml2.ml2_type_gre.tunnel_id_ranges }}
[ml2_type_vxlan]
vni_ranges = {{ .Values.ml2.ml2_type_vxlan.vni_ranges }}
vxlan_group = {{ .Values.ml2.ml2_type_vxlan.vxlan_group }}
[ml2_type_vlan]
# (ListOpt) List of <physical_network>[:<vlan_min>:<vlan_max>] tuples
# specifying physical_network names usable for VLAN provider and
# tenant networks, as well as ranges of VLAN tags on each
# physical_network available for allocation as tenant networks.
network_vlan_ranges = {{ .Values.ml2.ml2_type_vlan.network_vlan_ranges }}
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
{{- if .Values.ml2.agent.tunnel_types }}
[agent]
tunnel_types = {{ .Values.ml2.agent.tunnel_types }}
l2_population = false
arp_responder = false
{{- end }}
[ovs]
bridge_mappings = {{ include "joinListWithColon" .Values.ml2.ovs.bridge_mappings }}
tenant_network_type = {{ .Values.ml2.agent.tunnel_types }}
[vxlan]
l2_population = true
ovsdb_interface = {{ .Values.network.interface.openvswitch | default .Values.network.interface.default }}

@ -0,0 +1,71 @@
[DEFAULT]
debug = {{ .Values.neutron.default.debug }}
use_syslog = False
use_stderr = True
bind_host = {{ .Values.network.ip_address }}
bind_port = {{ .Values.network.port.server }}
#lock_path = /var/lock/neutron
api_paste_config = /usr/share/neutron/api-paste.ini
api_workers = {{ .Values.neutron.workers }}
allow_overlapping_ips = True
core_plugin = ml2
service_plugins = router
interface_driver = openvswitch
metadata_proxy_socket = /var/lib/neutron/openstack-helm/metadata_proxy
allow_automatic_l3agent_failover = True
l3_ha = true
min_l3_agents_per_router = 1
max_l3_agents_per_router = 2
l3_ha_network_type = {{ .Values.neutron.default.l3_ha_network_type }}
dhcp_agents_per_network = 3
network_auto_schedule = True
router_auto_schedule = True
transport_url = rabbit://{{ .Values.rabbitmq.admin_user }}:{{ .Values.rabbitmq.admin_password }}@{{ .Values.rabbitmq.address }}:{{ .Values.rabbitmq.port }}
[nova]
auth_url = {{ include "endpoint_keystone_internal" . }}
auth_plugin = password
project_domain_id = default
user_domain_id = default
endpoint_type = internal
region_name = {{ .Values.keystone.nova_region_name }}
project_name = service
username = {{ .Values.keystone.nova_user }}
password = {{ .Values.keystone.nova_password }}
[oslo_concurrency]
lock_path = /var/lib/neutron/tmp
[ovs]
ovsdb_connection = unix:/var/run/openvswitch/db.sock
[agent]
root_helper = sudo /var/lib/kolla/venv/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
l2_population = true
arp_responder = true
[database]
connection = mysql+pymysql://{{ .Values.database.neutron_user }}:{{ .Values.database.neutron_password }}@{{ include "neutron_db_host" . }}/{{ .Values.database.neutron_database_name }}
max_retries = -1
[keystone_authtoken]
auth_url = {{ include "endpoint_keystone_internal" . }}
auth_type = password
project_domain_id = default
user_domain_id = default
project_name = service
username = {{ .Values.keystone.neutron_user }}
password = {{ .Values.keystone.neutron_password }}
[oslo_messaging_notifications]
driver = noop

@ -0,0 +1,5 @@
search {{ .Release.Namespace }}.svc.{{ .Values.network.dns.kubernetes_domain }} svc.{{ .Values.network.dns.kubernetes_domain }} {{ .Values.network.dns.kubernetes_domain }}
{{- range .Values.network.dns.servers }}
nameserver {{ . | title }}
{{- end }}
options ndots:5

@ -0,0 +1,43 @@
apiVersion: batch/v1
kind: Job
metadata:
name: neutron-db-sync
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: neutron-db-sync
image: {{ .Values.images.db_sync }}
imagePullPolicy: {{ .Values.images.pull_policy }}
env:
- name: INTERFACE_NAME
value: "eth0"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2-conf.ini upgrade head"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.db_sync.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.db_sync.service }}"
volumeMounts:
- name: neutronconf
mountPath: /etc/neutron/neutron.conf
subPath: neutron.conf
- name: ml2confini
mountPath: /etc/neutron/plugins/ml2/ml2-conf.ini
subPath: ml2-conf.ini
volumes:
- name: neutronconf
configMap:
name: neutron-etc
- name: ml2confini
configMap:
name: neutron-etc

@ -0,0 +1,37 @@
apiVersion: batch/v1
kind: Job
metadata:
name: neutron-init
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: neutron-init
image: {{ .Values.images.init }}
imagePullPolicy: {{ .Values.images.pull_policy }}
env:
- name: INTERFACE_NAME
value: "eth0"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/init.sh"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.init.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.init.service }}"
volumeMounts:
- name: initsh
mountPath: /tmp/init.sh
subPath: init.sh
volumes:
- name: initsh
configMap:
name: neutron-bin

@ -0,0 +1,39 @@
apiVersion: batch/v1
kind: Job
metadata:
name: neutron-post
spec:
template:
spec:
restartPolicy: OnFailure
containers:
- name: neutron-post
image: {{ .Values.images.post }}
imagePullPolicy: {{ .Values.images.pull_policy }}
env:
- name: INTERFACE_NAME
value: "eth0"
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: COMMAND
value: "bash /tmp/post.sh"
- name: DEPENDENCY_JOBS
value: "{{ include "joinListWithColon" .Values.dependencies.post.jobs }}"
- name: DEPENDENCY_SERVICE
value: "{{ include "joinListWithColon" .Values.dependencies.post.service }}"
- name: ANSIBLE_LIBRARY
value: /usr/share/ansible/
volumeMounts:
- name: postsh
mountPath: /tmp/post.sh
subPath: post.sh
volumes:
- name: postsh
configMap:
name: neutron-bin

@ -0,0 +1,9 @@
apiVersion: v1
kind: Service
metadata:
name: neutron-server
spec:
ports:
- port: {{ .Values.network.port.server }}
selector:
app: neutron-server

241
neutron/values.yaml Normal file

@ -0,0 +1,241 @@
# Default values for memcached.
# This is a YAML-formatted file.
# Declare name/value pairs to be passed into your templates.
# name: value
replicas:
server: 1
images:
init: quay.io/stackanetes/stackanetes-kolla-toolbox:barcelona
db_sync: quay.io/stackanetes/stackanetes-neutron-server:barcelona
server: quay.io/stackanetes/stackanetes-neutron-server:barcelona
dhcp: quay.io/stackanetes/stackanetes-neutron-dhcp-agent:barcelona
metadata: quay.io/stackanetes/stackanetes-neutron-metadata-agent:barcelona
l3: quay.io/stackanetes/stackanetes-neutron-l3-agent:barcelona
neutron_openvswitch_agent: quay.io/stackanetes/stackanetes-neutron-openvswitch-agent:barcelona
openvswitch_db_server: quay.io/attcomdev/openvswitch-vswitchd:latest
openvswitch_vswitchd: quay.io/attcomdev/openvswitch-vswitchd:latest
post: quay.io/stackanetes/stackanetes-kolla-toolbox:barcelona
entrypoint: quay.io/stackanetes/kubernetes-entrypoint:v0.1.0
pull_policy: "IfNotPresent"
labels:
# ovs is a special case, requiring a special
# label that can apply to both control hosts
# and compute hosts, until we get more sophisticated
# with our daemonset scheduling
ovs:
node_selector_key: openvswitch
node_selector_value: enabled
agent:
dhcp:
node_selector_key: openstack-control-plane
node_selector_value: enabled
l3:
node_selector_key: openstack-control-plane
node_selector_value: enabled
metadata:
node_selector_key: openstack-control-plane
node_selector_value: enabled
server:
node_selector_key: openstack-control-plane
node_selector_value: enabled
network:
dns:
kubernetes_domain: cluster.local
# this must list the skydns server first, and in calico
# this is consistently 10.96.0.10
servers:
- 10.96.0.10
- 8.8.8.8
external_bridge: br-ex
ip_address: 0.0.0.0
interface:
external: enp12s0f0
default: enp11s0f0
port:
server: 9696
metadata: 8775
memcached:
address: "memcached:11211"
rabbitmq:
address: rabbitmq
admin_user: rabbitmq
admin_password: password
port: 5672
keystone:
admin_user: "admin"
admin_password: "password"
admin_project_name: "admin"
admin_region_name: "RegionOne"
domain_name: "default"
tenant_name: "admin"
neutron_user: "neutron"
neutron_password: "password"
neutron_region_name: "RegionOne"
nova_user: "nova"
nova_password: "password"
nova_region_name: "RegionOne"
database:
port: 3306
root_user: root
root_password: password
neutron_database_name: neutron
neutron_password: password
neutron_user: neutron
metadata_agent:
default:
debug: 'True'
neutron:
workers: 4
default:
l3_ha_network_type: gre
debug: 'True'
metadata:
workers: 4
ml2:
tenant_network_types: "flat"
agent:
tunnel_types: null
type_drivers:
- flat
mechanism_drivers:
- openvswitch
- l2population
ml2_type_vxlan:
vni_ranges: "1:1000"
vxlan_group: 239.1.1.1
ml2_type_gre:
tunnel_id_ranges: "1:1000"
ml2_type_flat:
flat_networks:
- "*"
ml2_type_vlan:
network_vlan_ranges: "physnet1:1100:1110"
ovs:
auto_bridge_add:
br-physnet1: enp11s0f0
bridge_mappings:
- "physnet1:br-physnet1"
dependencies:
server:
jobs:
- neutron-db-sync
- mariadb-seed
service:
- rabbitmq
- mariadb
- keystone-api
- memcached
dhcp:
service:
- neutron-server
- rabbitmq
- nova-api
jobs:
- neutron-init
- nova-post
daemonset:
- neutron-openvswitch
metadata:
jobs:
- neutron-init
- nova-post
service:
- neutron-server
- rabbitmq
- nova-api
daemonset:
- neutron-openvswitch
openvswitchagent:
jobs:
- neutron-post
- nova-post
service:
- keystone-api
- rabbitmq
- neutron-server
container:
- openvswitch-db-server
- openvswitch-vswitchd
l3:
jobs:
- nova-init
- neutron-init
- nova-post
service:
- neutron-server
- rabbitmq
- nova-api
daemonset:
- neutron-openvswitch
db_sync:
jobs:
- neutron-init
- mariadb-seed
service:
- mariadb
init:
jobs:
- mariadb-seed
service:
- mariadb
post:
jobs:
- neutron-db-sync
service:
- keystone-api
- neutron-server
# typically overriden by environmental
# values, but should include all endpoints
# required by this chart
endpoints:
glance:
hosts:
default: glance-api
type: image
path: null
scheme: 'http'
port:
api: 9292
registry: 9191
nova:
hosts:
default: nova-api
path: "/v2/%(tenant_id)s"
type: compute
scheme: 'http'
port:
api: 8774
metadata: 8775
novncproxy: 6080
keystone:
hosts:
default: keystone-api
path: /v3
type: identity
scheme: 'http'
port:
admin: 35357
public: 5000
neutron:
hosts:
default: neutron-server
path: null
type: network
scheme: 'http'
port:
api: 9696