Merge pull request #13 from larryrensing/master

Changes LGTM. Merging.
This commit is contained in:
Brandon B. Jozsa 2016-11-30 13:10:56 -05:00 committed by GitHub
commit 7bb51a6b66
8 changed files with 164 additions and 0 deletions

26
maas/.helmignore Normal file

@ -0,0 +1,26 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj
secrets/
patches/
*.py
Makefile

3
maas/Chart.yaml Executable file

@ -0,0 +1,3 @@
description: Chart to run MaaS
name: maas
version: 0.1.0

28
maas/README.md Normal file

@ -0,0 +1,28 @@
# aic-helm/maas
This chart installs a working version of MaaS on kubernetes.
### Quickstart
To deploy your MaaS chart:
```
helm install maas --namespace=maas
```
To verify the helm deployment was successful:
```
# helm ls
NAME REVISION UPDATED STATUS CHART
opining-ocelot 1 Wed Nov 23 19:48:41 2016 DEPLOYED maas-0.1.0
```
To check that all resources are working as intended:
```
# kubectl get all --namespace=maas
NAME CLUSTER-IP EXTERNAL-IP PORT(S) AGE
svc/maas-region-ui 10.109.228.165 <nodes> 80/TCP,8000/TCP 2m
NAME READY STATUS RESTARTS AGE
po/maas-rack-2449935402-ppn34 1/1 Running 0 2m
po/maas-region-638716514-miczz 1/1 Running 0 2m
```

@ -0,0 +1,19 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: maas-rack
spec:
template:
metadata:
labels:
app: maas-rack-controller
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
hostNetwork: true
containers:
- name: maas-rack
image: {{ .Values.images.maas_rack }}
imagePullPolicy: Always
securityContext:
privileged: true

@ -0,0 +1,20 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: maas-region
spec:
template:
metadata:
labels:
app: maas-region
spec:
nodeSelector:
{{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }}
containers:
- name: maas-region
image: {{ .Values.images.maas_region }}
imagePullPolicy: Always
ports:
- containerPort: {{ .Values.network.port.region_container }}
securityContext:
privileged: true

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: maas-region-ui
labels:
app: maas-region-ui
spec:
ports:
- port: {{ .Values.network.port.service_gui }}
targetPort: {{ .Values.network.port.service_gui_target }}
protocol: TCP
name: gui
- port: {{ .Values.network.port.service_proxy }}
targetPort: {{ .Values.network.port.service_proxy_target }}
protocol: TCP
name: proxy
selector:
app: maas-region

31
maas/tests/test-pxe-client.sh Executable file

@ -0,0 +1,31 @@
#!/bin/bash -x
# this helps create a qemu client (not using kvm acceleration
# so it doesn't conflict with virtualbox users) that can be
# used to test that maas is working
cat <<EOF>/tmp/maas-net.xml
<!-- Network Management VLAN -->
<network>
<name>maas</name>
<bridge name="maas"/>
<forward mode="bridge"/>
</network>
EOF
virsh net-create /tmp/maas-net.xml
# purge an existing image if one exists
if [ -e /tmp/maas-node-test.qcow2 ]; then
sudo rm /tmp/maas-node-test.qcow2
sudo qemu-img create -f qcow2 -o preallocation=metadata /tmp/maas-node-test.qcow2 32G
fi;
virt-install \
--name=maas-node-test \
--connect=qemu:///system --ram=1024 --vcpus=1 --virt-type=qemu\
--pxe --boot network,hd \
--os-variant=ubuntutrusty --graphics vnc --noautoconsole --os-type=linux --accelerate \
--disk=/tmp/maas-node-test.qcow2,bus=virtio,cache=none,sparse=true,size=32 \
--network=network=maas,model=e1000 \
--force

19
maas/values.yaml Normal file

@ -0,0 +1,19 @@
# Default values for maas.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
images:
maas_region: quay.io/attcomdev/maas-region:1.0.1
maas_rack: quay.io/attcomdev/maas-rack:1.0.1
labels:
node_selector_key: openstack-control-plane
node_selector_value: enabled
network:
port:
region_container: 80
service_gui: 80
service_gui_target: 80
service_proxy: 8000
service_proxy_target: 8000