Merge remote-tracking branch 'upstream/master' into maas-region-cleanup

This commit is contained in:
Larry Rensing 2017-01-30 23:36:37 +00:00
commit 109d447453
14 changed files with 100 additions and 39 deletions

33
.github/ISSUE_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,33 @@
<!-- Thanks for filing an issue! Before submitting your issue, please answer the following questions.-->
**Is this a bug report or feature request?** (choose one):
<!--
If this is a BUG REPORT, please:
- Fill in as much of the template as possible. If you leave out
information, we may automatically close out your pull request.
If this is a FEATURE REQUEST, please:
- Describe *in detail* the feature/behavior/change you'd like to see.
Detailed responses allow our community to address your concerns in a timely manner.
If we can't determine what you're asking for, we may close your issue. If you feel
we haven't adequately addressed your issue, please feel free to reopen your issue
and explain your issue in more detail.
-->
**Kubernetes Version** (output of `kubectl version`):
**Helm Client and Tiller Versions** (output of `helm version`):
**Development or Deployment Environment?**:
**Release Tag or Master**:
**Expected Behavior**:
**What Actually Happened**:
**How to Reproduce the Issue** (as minimally as possible):
**Any Additional Comments**:

14
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,14 @@
<!--
Thanks for contributing to OpenStack-Helm! Please be thorough
when filling out your pull request. If the purpose for your pull
request is not clear, we may close your pull request and ask you
to resubmit.
-->
**What is the purpose of this pull request?**:
**What issue does this pull request address?**: Fixes #
**Notes for reviewers to consider**:
**Specific reviewers for pull request**:

View File

@ -1,6 +1,9 @@
# Openstack-Helm
Join us on [freenode](https://freenode.net/): `#openstack-helm`
**Join us on [Slack](http://slack.k8s.io/):** `#openstack-helm`<br>
**Join us on [Freenode](https://freenode.net/):** `#openstack-helm`<br>
**Community Meetings:** [Every other Tuesday @ 3PM UTC](https://calendar.google.com/calendar/embed?src=rnd4tpeoncig91pvs05il4p29o%40group.calendar.google.com&ctz=America/New_York) (Provided by [Zoom](https://zoom.us/j/562328746))<br>
**Community Agenda Items:** [Google Docs](https://docs.google.com/document/d/1Vm2OnMzjSru3cuvxh4Oa7R_z7staU-7ivGy8foOzDCs/edit#heading=h.bfc0dkav9gk2)
Openstack-Helm is a fully self-contained Helm-based OpenStack deployment on Kubernetes. It will provide baremetal provisioning, persistent storage, full-stack resiliency, full-stack scalability, performance monitoring and tracing, and an optional development pipeline (using Jenkins). This project, along with the tools used within are community-based and open sourced.

View File

@ -32,7 +32,7 @@ spec:
secret:
secretName: ceph-bootstrap-rgw-keyring
containers:
- name: ceph-mon
- name: ceph-mds
image: {{ .Values.images.daemon }}
imagePullPolicy: {{ .Values.images.pull_policy }}
ports:

View File

@ -42,8 +42,8 @@ spec:
value: MON_HEALTH
- name: KV_TYPE
value: k8s
- name: NETWORK_AUTO_DETECT
value: "4"
- name: MON_IP_AUTO_DETECT
value: "1"
- name: CLUSTER
value: ceph
volumeMounts:
@ -61,4 +61,4 @@ spec:
cpu: {{ .Values.resources.mon_check.requests.cpu | quote }}
limits:
memory: {{ .Values.resources.mon_check.limits.memory | quote }}
cpu: {{ .Values.resources.mon_check.limits.cpu | quote }}
cpu: {{ .Values.resources.mon_check.limits.cpu | quote }}

View File

@ -16,7 +16,6 @@ spec:
app: ceph
daemon: rgw
spec:
hostNetwork: true
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
serviceAccount: default

View File

@ -72,10 +72,16 @@ spec:
value: MON
- name: KV_TYPE
value: k8s
- name: NETWORK_AUTO_DETECT
value: "4"
- name: CLUSTER
value: ceph
- name: NETWORK_AUTO_DETECT
value: "0"
- name: CEPH_PUBLIC_NETWORK
value: {{ .Values.network.public | quote }}
- name: MON_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
volumeMounts:
- name: ceph-conf
mountPath: /etc/ceph

View File

@ -17,7 +17,7 @@ service:
name: ceph-mon
images:
daemon: quay.io/attcomdev/ceph-daemon:latest
daemon: docker.io/library/ceph/daemon:tag-build-master-jewel-ubuntu-16.04
pull_policy: IfNotPresent
labels:
@ -25,6 +25,7 @@ labels:
node_selector_value: enabled
network:
public: "10.25.0.0/16"
port:
mon: 6789
rgw_ingress: 80

View File

@ -15,7 +15,7 @@ api_paste_config = /etc/cinder/api-paste.ini
glance_api_servers = "{{ .Values.glance.proto }}://{{ .Values.glance.host }}:{{ .Values.glance.port }}"
glance_api_version = {{ .Values.glance.version }}
enabled_backends = {{ include "joinListWithColon" .Values.backends.enabled }}
enabled_backends = {{ include "joinListWithComma" .Values.backends.enabled }}
auth_strategy = keystone
os_region_name = {{ .Values.keystone.cinder_region_name }}

View File

@ -1,4 +1,4 @@
{{- define "joinListWithColon" -}}
{{- define "joinListWithComma" -}}
{{ range $k, $v := . }}{{ if $k }},{{ end }}{{ $v }}{{ end }}
{{- end -}}

View File

@ -34,15 +34,15 @@
},
{
"name": "DEPENDENCY_SERVICE",
"value": "{{ include "joinListWithColon" $deps.service }}"
"value": "{{ include "joinListWithComma" $deps.service }}"
},
{
"name": "DEPENDENCY_JOBS",
"value": "{{ include "joinListWithColon" $deps.jobs }}"
"value": "{{ include "joinListWithComma" $deps.jobs }}"
},
{
"name": "DEPENDENCY_DAEMONSET",
"value": "{{ include "joinListWithColon" $deps.daemonset }}"
"value": "{{ include "joinListWithComma" $deps.daemonset }}"
},
{
"name": "DEPENDENCY_CONTAINER",

View File

@ -27,20 +27,19 @@ If your environment meets all of the prerequisites above, you can simply use the
```
# Clone the project:
$ git clone https://github.com/att-comdev/openstack-helm.git && cd openstack-helm
git clone https://github.com/att-comdev/openstack-helm.git && cd openstack-helm
# Get a list of the current tags:
$ git tag -l
0.1.0
git tag -l
# Checkout the tag you want to work with (if desired, or use master for development):
$ git checkout 0.1.0
git checkout 0.1.0
# Start a local Helm Server:
$ helm serve &
helm serve &
# You may need to change these params for your environment. Look up use of --iso-url if needed:
$ minikube start \
minikube start \
--network-plugin=cni \
--kubernetes-version v1.5.1 \
--disk-size 40g \
@ -53,25 +52,25 @@ $ minikube start \
kubectl create -f http://docs.projectcalico.org/v2.0/getting-started/kubernetes/installation/hosted/calico.yaml
# Initialize Helm/Deploy Tiller:
$ helm init
helm init
# Package the Openstack-Helm Charts, and push them to your local Helm repository:
$ make
make
# Label the Minikube as an Openstack Control Plane node:
$ kubectl label nodes openstack-control-plane=enabled --all --namespace=openstack
kubectl label nodes openstack-control-plane=enabled --all --namespace=openstack
# Deploy each chart:
$ helm install --name mariadb --set development.enabled=true local/mariadb --namespace=openstack
$ helm install --name=memcached local/memcached --namespace=openstack
$ helm install --name=rabbitmq local/rabbitmq --namespace=openstack
$ helm install --name=keystone local/keystone --namespace=openstack
$ helm install --name=cinder local/cinder --namespace=openstack
$ helm install --name=glance local/glance --namespace=openstack
$ helm install --name=heat local/heat --namespace=openstack
$ helm install --name=nova local/nova --namespace=openstack
$ helm install --name=neutron local/neutron --namespace=openstack
$ helm install --name=horizon local/horizon --namespace=openstack
helm install --name mariadb --set development.enabled=true local/mariadb --namespace=openstack
helm install --name=memcached local/memcached --namespace=openstack
helm install --name=rabbitmq local/rabbitmq --namespace=openstack
helm install --name=keystone local/keystone --namespace=openstack
helm install --name=cinder local/cinder --namespace=openstack
helm install --name=glance local/glance --namespace=openstack
helm install --name=heat local/heat --namespace=openstack
helm install --name=nova local/nova --namespace=openstack
helm install --name=neutron local/neutron --namespace=openstack
helm install --name=horizon local/horizon --namespace=openstack
```
# Getting Started

View File

@ -190,11 +190,17 @@ Please ensure that you have verified and completed the steps above to prevent is
Although Ceph is mentioned throughout this guide, our deployment is flexible to allow you the option of bringing any type of persistent storage. Although most of these verification steps are the same, if not very similar, we will use Ceph as our example throughout this guide.
## Node Labels
First, we must label our nodes according to their role. Although we are labeling `all` nodes, you are free to label only the nodes you wish. You must have at least one, although a minimum of three are recommended.
First, we must label our nodes according to their role. Although we are labeling `all` nodes, you are free to label only the nodes you wish. You must have at least one, although a minimum of three are recommended. Nodes are labeled according to their Openstack roles:
**Storage Nodes:** `ceph-storage`
**Control Plane:** `openstack-control-plane`
**Compute Nodes:** `openvswitch`, `openstack-compute-node`
```
admin@kubenode01:~$ kubectl label nodes openstack-control-plane=enabled --all
admin@kubenode01:~$ kubectl label nodes ceph-storage=enabled --all
admin@kubenode01:~$ kubectl label nodes openvswitch=enabled --all
admin@kubenode01:~$ kubectl label nodes openstack-compute-node=enabled --all
```
## Obtaining the Project
@ -262,7 +268,7 @@ Please ensure that you use ``--purge`` whenever deleting a project.
## Ceph Installation and Verification
Install the first service, which is Ceph. If all instructions have been followed as mentioned above, this installation should go smoothly. Use the following command to install Ceph:
```
admin@kubenode01:~$ helm install --name=ceph local/ceph --namespace=ceph
admin@kubenode01:~$ helm install --set network.public=$osd_public_network --name=ceph local/ceph --namespace=ceph
```
## Bootstrap Installation

View File

@ -1,11 +1,11 @@
[ml2]
# Changing type_drivers after bootstrap can lead to database inconsistencies
type_drivers = {{ include "joinListWithColon" .Values.ml2.type_drivers }}
type_drivers = {{ include "joinListWithComma" .Values.ml2.type_drivers }}
tenant_network_types = {{ .Values.ml2.tenant_network_types }}
mechanism_drivers = {{ include "joinListWithColon" .Values.ml2.mechanism_drivers }}
mechanism_drivers = {{ include "joinListWithComma" .Values.ml2.mechanism_drivers }}
[ml2_type_flat]
flat_networks = {{ include "joinListWithColon" .Values.ml2.ml2_type_flat.flat_networks }}
flat_networks = {{ include "joinListWithComma" .Values.ml2.ml2_type_flat.flat_networks }}
[ml2_type_gre]
# (ListOpt) Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges
@ -35,7 +35,7 @@ arp_responder = false
{{- end }}
[ovs]
bridge_mappings = {{ include "joinListWithColon" .Values.ml2.ovs.bridge_mappings }}
bridge_mappings = {{ include "joinListWithComma" .Values.ml2.ovs.bridge_mappings }}
tenant_network_type = {{ .Values.ml2.agent.tunnel_types }}
[vxlan]