diff --git a/examples/kubernetes/README.md b/examples/kubernetes/README.md index 09ce65bc0..de7f0fe82 100644 --- a/examples/kubernetes/README.md +++ b/examples/kubernetes/README.md @@ -5,7 +5,7 @@ Here you can find some examples on running stolon inside kubernetes There're two examples. The difference between them is how the keepers pods are deployed (the definitions of the other components is identical): * Using a [statefulset](statefulset) (called `petset` in k8s 1.4) -* Using [replication controllers](rc) (one per keeper). +* **DEPRECATED** Using [replication controllers](rc) (one per keeper). ## Docker image diff --git a/examples/kubernetes/rc/README.md b/examples/kubernetes/rc/README.md index d6a8868b7..e7e0ac47a 100644 --- a/examples/kubernetes/rc/README.md +++ b/examples/kubernetes/rc/README.md @@ -1,5 +1,7 @@ # Stolon inside kubernetes +**DEPRECATED EXAMPLE**: Please use the [statefulset](../statefulset/README.md) example. Using persistent volumes with replication controller/replica sets won't guarantee at most once pod existence and can lead to data corruption with some persistent volume kind that can end attached in rw mode to more than one node at a time. + This is a simple example that uses replication controller for all stolon components. Since the keeper requires a persistent data directory we define a replication controller with replicas=1 for each keeper. The keeper id is fixed inside the pod template definition so it won't be generated as a unique id but will have a more meaningful name (`keeper0`, `keeper1` etc...). diff --git a/examples/kubernetes/statefulset/stolon-keeper.yaml b/examples/kubernetes/statefulset/stolon-keeper.yaml index 52f25393a..2ec85a73c 100644 --- a/examples/kubernetes/statefulset/stolon-keeper.yaml +++ b/examples/kubernetes/statefulset/stolon-keeper.yaml @@ -1,8 +1,8 @@ -# TODO(sgotti) update this to StatefulSet (PetSet where renamed to StatefulSet) when k8s 1.5 is released -## apiVersion: apps/v1beta1 -## kind: StatefulSet -apiVersion: apps/v1alpha1 -kind: PetSet +# PetSet was renamed to StatefulSet in k8s 1.5 +# apiVersion: apps/v1alpha1 +# kind: PetSet +apiVersion: apps/v1beta1 +kind: StatefulSet metadata: name: stolon-keeper spec: @@ -48,7 +48,7 @@ spec: value: "stolon" - name: STKEEPER_PG_SU_PASSWORDFILE value: "/etc/secrets/stolon/password" - ## Uncomment this to enable debug logs + # Uncomment this to enable debug logs #- name: STKEEPER_DEBUG # value: "true" ports: @@ -62,13 +62,13 @@ spec: - name: stolon secret: secretName: stolon - ## Define your own volumeClaimTemplate. This example uses a dynamic provisioning with a storage class of type "anything" that works also with minikube and will provision volume using the hostPath provider, but this shouldn't be used in production and won't work in multi-node cluster. - ## In production you should use your own defined storage-class and configure your persistent volumes (statically or dynamic using a provisioner, see k8s doc). + # Define your own volumeClaimTemplate. This example uses dynamic PV provisioning with a storage class named "standard" (so it will works by default with minikube) + # In production you should use your own defined storage-class and configure your persistent volumes (statically or dynamically using a provisioner, see related k8s doc). volumeClaimTemplates: - metadata: name: data annotations: - volume.alpha.kubernetes.io/storage-class: anything + volume.alpha.kubernetes.io/storage-class: standard spec: accessModes: [ "ReadWriteOnce" ] resources: