Merge pull request #7882 from hashicorp/multi-cluster-k8s

Documentation for wan fed via mgw on k8s
This commit is contained in:
Luke Kysow 2020-05-14 09:57:39 -07:00 committed by GitHub
commit 79a604b795
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
24 changed files with 971 additions and 186 deletions

View File

@ -99,6 +99,10 @@
/docs/guides/kubernetes-reference https://learn.hashicorp.com/consul/day-1-operations/kubernetes-reference 301!
/docs/guides/outage.html https://learn.hashicorp.com/consul/day-2-operations/outage 301!
/docs/guides/outage https://learn.hashicorp.com/consul/day-2-operations/outage 301!
/docs/platform/k8s/minikube.html https://learn.hashicorp.com/consul/kubernetes/minikube 301!
/docs/platform/k8s/aks.html https://learn.hashicorp.com/consul/kubernetes/azure-k8s 301!
/docs/platform/k8s/eks.html https://learn.hashicorp.com/consul/kubernetes/aws-k8s 301!
/docs/platform/k8s/gke.html https://learn.hashicorp.com/consul/kubernetes/google-cloud-k8s 301!
# Glob Redirects, Ported from Varnish
@ -159,31 +163,25 @@
/docs/connect/platform/nomad.html /docs/connect/nomad 301!
/docs/connect/platform/nomad /docs/connect/nomad 301!
/docs/platform/k8s/run.html /docs/k8s/installation 301!
/docs/platform/k8s/run /docs/k8s/installation 301!
/docs/platform/k8s/aks.html /docs/k8s/installation/aks 301!
/docs/platform/k8s/aks /docs/k8s/installation/aks 301!
/docs/platform/k8s/gke.html /docs/k8s/installation/gke 301!
/docs/platform/k8s/gke /docs/k8s/installation/gke 301!
/docs/platform/k8s/minikube.html /docs/k8s/installation/minikube 301!
/docs/platform/k8s/minikube /docs/k8s/installation/minikube 301!
/docs/platform/k8s/consul-enterprise.html /docs/k8s/installation/consul-enterprise 301!
/docs/platform/k8s/consul-enterprise /docs/k8s/installation/consul-enterprise 301!
/docs/platform/k8s/clients-outside-kubernetes.html /docs/k8s/installation/clients-outside-kubernetes 301!
/docs/platform/k8s/clients-outside-kubernetes /docs/k8s/installation/clients-outside-kubernetes 301!
/docs/platform/k8s/servers-outside-kubernetes.html /docs/k8s/installation/servers-outside-kubernetes 301!
/docs/platform/k8s/servers-outside-kubernetes /docs/k8s/installation/servers-outside-kubernetes 301!
/docs/platform/k8s/predefined-pvcs.html /docs/k8s/installation/predefined-pvcs 301!
/docs/platform/k8s/predefined-pvcs /docs/k8s/installation/predefined-pvcs 301!
/docs/platform/k8s/operations.html /docs/k8s/operations 301!
/docs/platform/k8s/operations /docs/k8s/operations 301!
/docs/platform/k8s/upgrading.html /docs/k8s/operations/upgrading 301!
/docs/platform/k8s/upgrading /docs/k8s/operations/upgrading 301!
/docs/platform/k8s/tls-on-existing-cluster.html /docs/k8s/operations/tls-on-existing-cluster 301!
/docs/platform/k8s/tls-on-existing-cluster /docs/k8s/operations/tls-on-existing-cluster 301!
/docs/platform/k8s/uninstalling.html /docs/k8s/operations/upgrading 301!
/docs/platform/k8s/uninstalling /docs/k8s/operations/upgrading 301!
/docs/platform/k8s/* /docs/k8s/:splat 301!
/docs/platform/k8s/run.html /docs/k8s/installation/overview 301!
/docs/platform/k8s/run /docs/k8s/installation/overview 301!
/docs/platform/k8s/consul-enterprise.html /docs/k8s/installation/deployment-configurations/consul-enterprise 301!
/docs/platform/k8s/consul-enterprise /docs/k8s/installation/deployment-configurations/consul-enterprise 301!
/docs/platform/k8s/clients-outside-kubernetes.html /docs/k8s/installation/deployment-configurations/clients-outside-kubernetes 301!
/docs/platform/k8s/clients-outside-kubernetes /docs/k8s/installation/deployment-configurations/clients-outside-kubernetes 301!
/docs/platform/k8s/servers-outside-kubernetes.html /docs/k8s/installation/deployment-configurations/servers-outside-kubernetes 301!
/docs/platform/k8s/servers-outside-kubernetes /docs/k8s/installation/deployment-configurations/servers-outside-kubernetes 301!
/docs/platform/k8s/predefined-pvcs.html /docs/k8s/installation/platforms/self-hosted-kubernetes 301!
/docs/platform/k8s/predefined-pvcs /docs/k8s/installation/platforms/self-hosted-kubernetes 301!
/docs/platform/k8s/operations.html /docs/k8s/operations 301!
/docs/platform/k8s/operations /docs/k8s/operations 301!
/docs/platform/k8s/upgrading.html /docs/k8s/operations/upgrading 301!
/docs/platform/k8s/upgrading /docs/k8s/operations/upgrading 301!
/docs/platform/k8s/tls-on-existing-cluster.html /docs/k8s/operations/tls-on-existing-cluster 301!
/docs/platform/k8s/tls-on-existing-cluster /docs/k8s/operations/tls-on-existing-cluster 301!
/docs/platform/k8s/uninstalling.html /docs/k8s/operations/upgrading 301!
/docs/platform/k8s/uninstalling /docs/k8s/operations/upgrading 301!
/docs/platform/k8s/* /docs/k8s/:splat 301!
/docs/partnerships/index.html /docs/partnerships 301!
/docs/enterprise/backups/index.html /docs/enterprise/backups 301!

View File

@ -141,7 +141,7 @@ export default [
'acl-rules',
'acl-legacy',
'acl-migrate-tokens',
{ category: 'auth-methods', content: ['kubernetes', 'jwt', 'oidc']},
{ category: 'auth-methods', content: ['kubernetes', 'jwt', 'oidc'] },
],
},
{
@ -179,16 +179,51 @@ export default [
category: 'k8s',
content: [
{
name: 'Installation',
category: 'installation',
content: [
'aks',
'eks',
'gke',
'minikube',
'consul-enterprise',
'clients-outside-kubernetes',
'servers-outside-kubernetes',
'predefined-pvcs',
'overview',
{
category: 'platforms',
name: 'Platform Guides',
content: [
{
title: 'Minikube',
href:
'https://learn.hashicorp.com/consul/kubernetes/minikube?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=mk',
},
{
title: 'AKS (Azure)',
href:
'https://learn.hashicorp.com/consul/kubernetes/azure-k8s?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=aks',
},
{
title: 'EKS (AWS)',
href:
'https://learn.hashicorp.com/consul/kubernetes/aws-k8s?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=eks',
},
{
title: 'GKE (Google Cloud)',
href:
'https://learn.hashicorp.com/consul/kubernetes/google-cloud-k8s?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=gke',
},
'self-hosted-kubernetes',
],
},
{
category: 'deployment-configurations',
name: 'Deployment Configurations',
content: [
'clients-outside-kubernetes',
'servers-outside-kubernetes',
'consul-enterprise',
],
},
{
category: 'multi-cluster',
name: 'Multi-Cluster Federation <sup> Beta </sup>',
content: ['overview', 'kubernetes', 'vms-and-kubernetes'],
},
],
},
{
@ -196,9 +231,9 @@ export default [
name: 'Operations',
content: ['upgrading', 'tls-on-existing-cluster', 'uninstalling'],
},
'dns',
'service-sync',
'connect',
'service-sync',
'dns',
'ambassador',
'helm',
],

View File

@ -285,8 +285,8 @@ If you have tried the above troubleshooting steps and are still stuck, DataWire
[ambassador]: https://www.getambassador.io/
[ingress controller]: https://blog.getambassador.io/kubernetes-ingress-nodeport-load-balancers-and-ingress-controllers-6e29f1c44f2d
[proxies]: /docs/connect/proxies
[service sync]: /docs/platform/k8s/service-sync
[connect sidecar]: /docs/platform/k8s/connect
[service sync]: /docs/k8s/service-sync
[connect sidecar]: /docs/k8s/connect
[install]: https://www.getambassador.io/user-guide/consul-connect-ambassador/
[ambassador-service.yaml]: https://www.getambassador.io/yaml/ambassador/ambassador-service.yaml
[request access]: https://d6e.co/slack

View File

@ -1,7 +1,7 @@
---
layout: docs
page_title: Connect Sidecar - Kubernetes
sidebar_title: Connect Sidecar
page_title: Connect Service Mesh - Kubernetes
sidebar_title: Connect - Service Mesh
description: >-
Connect is a feature built into to Consul that enables automatic
service-to-service authorization and connection encryption across your Consul
@ -9,7 +9,7 @@ description: >-
other services.
---
# Connect Sidecar on Kubernetes
# Connect Service Mesh on Kubernetes
[Connect](/docs/connect) is a feature built into to Consul that enables
automatic service-to-service authorization and connection encryption across
@ -21,12 +21,12 @@ your cluster, making configuration for Kubernetes automatic.
This functionality is provided by the
[consul-k8s project](https://github.com/hashicorp/consul-k8s) and can be
automatically installed and configured using the
[Consul Helm chart](/docs/platform/k8s/run).
[Consul Helm chart](/docs/k8s/installation/overview).
## Usage
When the
[Connect injector is installed](/docs/platform/k8s/connect#installation-and-configuration),
[Connect injector is installed](/docs/k8s/connect#installation-and-configuration),
the Connect sidecar is automatically added to all pods. This sidecar can both
accept and establish connections using Connect, enabling the pod to communicate
to clients and dependencies exclusively over authorized and encrypted
@ -78,7 +78,7 @@ spec:
The only change for Connect is the addition of the
`consul.hashicorp.com/connect-inject` annotation. This enables injection
for this pod. The injector can also be
[configured](/docs/platform/k8s/connect#installation-and-configuration)
[configured](/docs/k8s/connect#installation-and-configuration)
to automatically inject unless explicitly disabled, but the default
installation requires opt-in using the annotation shown above.
@ -131,7 +131,7 @@ spec:
```
Pods must specify upstream dependencies with the
[`consul.hashicorp.com/connect-service-upstreams` annotation](/docs/platform/k8s/connect#consul-hashicorp-com-connect-service-upstreams).
[`consul.hashicorp.com/connect-service-upstreams` annotation](/docs/k8s/connect#consul-hashicorp-com-connect-service-upstreams).
This annotation declares the names of any upstream dependencies and a
local port for the proxy to listen on. When a connection is established to that local
port, the proxy establishes a connection to the target service
@ -253,7 +253,7 @@ Annotations can be used to configure the injection behavior.
feature, information about the protocol the service uses is required. Users
can define the protocol directly using this annotation on the pod spec, or by
defining a default value for all services using the Helm chart's
[defaultProtocol](/docs/platform/k8s/helm#v-connectinject-centralconfig-defaultprotocol)
[defaultProtocol](/docs/k8s/helm#v-connectinject-centralconfig-defaultprotocol)
option. Specific annotations will always override the default value.
- `consul.hashicorp.com/service-tags` - A comma separated list of tags that will
@ -323,15 +323,15 @@ provided by the
[consul-k8s project](https://github.com/hashicorp/consul-k8s).
This enables the automatic pod mutation shown in the usage section above.
Installation of the mutating admission webhook is automated using the
[Helm chart](/docs/platform/k8s/run).
[Helm chart](/docs/k8s/installation/overview).
To install the Connect injector, enable the Connect injection feature using
[Helm values](/docs/platform/k8s/helm#configuration-values) and
[Helm values](/docs/k8s/helm#configuration-values) and
upgrade the installation using `helm upgrade` for existing installs or
`helm install` for a fresh install. The Connect injector **also requires**
[client agents](/docs/platform/k8s/helm#v-client) are enabled on
[client agents](/docs/k8s/helm#v-client) are enabled on
the node with pods that are using Connect and that
[gRPC is enabled](/docs/platform/k8s/helm#v-client-grpc).
[gRPC is enabled](/docs/k8s/helm#v-client-grpc).
```yaml
connectInject:
@ -503,7 +503,7 @@ See [consul.hashicorp.com/connect-service-upstreams](#consul-hashicorp-com-conne
### Verifying the Installation
To verify the installation, run the
["Accepting Inbound Connections"](/docs/platform/k8s/connect#accepting-inbound-connections)
["Accepting Inbound Connections"](/docs/k8s/connect#accepting-inbound-connections)
example from the "Usage" section above. After running this example, run
`kubectl get pod static-server -o yaml`. In the raw YAML output, you should
see injected Connect containers and an annotation

View File

@ -20,12 +20,12 @@ Once configured, DNS requests in the form `<consul-service-name>.service.consul`
resolve for services in Consul. This will work from all Kubernetes namespaces.
-> **Note:** If you want requests to just `<consul-service-name>` (without the `.service.consul`) to resolve, then you'll need
to turn on [Consul to Kubernetes Service Sync](/docs/platform/k8s/service-sync#consul-to-kubernetes).
to turn on [Consul to Kubernetes Service Sync](/docs/k8s/service-sync#consul-to-kubernetes).
## Consul DNS Cluster IP
To configure KubeDNS or CoreDNS you'll first need the `ClusterIP` of the Consul
DNS service created by the [Helm chart](/docs/platform/k8s/helm).
DNS service created by the [Helm chart](/docs/k8s/helm).
The default name of the Consul DNS service will be `consul-consul-dns`. Use
that name to get the `ClusterIP`:

View File

@ -111,7 +111,7 @@ and consider if they're appropriate for your deployment.
- `verify` ((#v-global-verify)) (`boolean: true`) - If true, `verify_outgoing`, `verify_server_hostname`,
and `verify_incoming_rpc` will be set to `true` for Consul servers and clients.
Set this to false to incrementally roll out TLS on an existing Consul cluster.
Please see [Configuring TLS on an Existing Cluster](/docs/platform/k8s/tls-on-existing-cluster)
Please see [Configuring TLS on an Existing Cluster](/docs/k8s/tls-on-existing-cluster)
for more details.
- `httpsOnly` ((#v-global-httpsonly)) (`boolean: true`) - If true, the Helm chart will configure Consul
@ -310,12 +310,12 @@ and consider if they're appropriate for your deployment.
```
- `externalServers` ((#v-externalservers)) - Configuration for Consul servers when the servers are running outside of Kubernetes.
When running external servers, configuring these values is recommended
if setting `global.tls.enableAutoEncrypt` to true (requires consul-k8s >= 0.13.0)
or `global.acls.manageSystemACLs` to true (requires consul-k8s >= 0.14.0).
When running external servers, configuring these values is recommended
if setting `global.tls.enableAutoEncrypt` to true (requires consul-k8s >= 0.13.0)
or `global.acls.manageSystemACLs` to true (requires consul-k8s >= 0.14.0).
- `enabled` ((#v-externalservers-enabled)) (`boolean: false`) - If true, the Helm chart will be configured to talk to the external servers.
If setting this to true, you must also set `server.enabled` to false.
If setting this to true, you must also set `server.enabled` to false.
- `hosts` ((#v-externalservers-hosts)) (`array<string>: null`) - An array of external Consul server hosts that are used to make
HTTPS connections from the components in this Helm chart.
@ -340,10 +340,10 @@ and consider if they're appropriate for your deployment.
You could retrieve this value from your `kubeconfig` by running:
```shell
kubectl config view \
-o jsonpath="{.clusters[?(@.name=='<your cluster name>')].cluster.server}"
```
```shell
kubectl config view \
-o jsonpath="{.clusters[?(@.name=='<your cluster name>')].cluster.server}"
```
- `client` ((#v-client)) - Values that configure running a Consul client on Kubernetes nodes.
@ -363,7 +363,7 @@ and consider if they're appropriate for your deployment.
- `grpc` ((#v-client-grpc)) (`boolean: true`) - If true, agents will enable their GRPC listener on
port 8502 and expose it to the host. This will use slightly more resources, but is
required for [Connect](/docs/platform/k8s/connect).
required for [Connect](/docs/k8s/connect).
- `exposeGossipPorts` ((#v-client-exposegossipports)) (`boolean: false`) - If true, the Helm chart
will expose the clients' gossip ports as hostPorts. This is only necessary if pod IPs in the k8s cluster are not directly routable and the Consul servers are outside of the k8s cluster.
@ -482,7 +482,7 @@ and consider if they're appropriate for your deployment.
- `enabled` ((#v-dns-enabled)) (`boolean: global.enabled`) - If true, a `consul-dns` service will be
created that exposes port 53 for TCP and UDP to the running Consul agents (servers and
clients). This can then be used to [configure kube-dns](/docs/platform/k8s/dns).
clients). This can then be used to [configure kube-dns](/docs/k8s/dns).
The Helm chart _does not_ automatically configure kube-dns.
- `clusterIP` ((#v-dns-clusterip)) (`string: null`) - If defined, this value configures the cluster
@ -491,17 +491,17 @@ and consider if they're appropriate for your deployment.
- `annotations` ((#v-dns-annotations)) (`string: null`) - Extra annotations to attach to the DNS
service. This should be a multi-line string of annotations to apply to the DNS service.
- `syncCatalog` ((#v-synccatalog)) - Values that configure the [service sync](/docs/platform/k8s/service-sync) process.
- `syncCatalog` ((#v-synccatalog)) - Values that configure the [service sync](/docs/k8s/service-sync) process.
- `enabled` ((#v-synccatalog-enabled)) (`boolean: false`) - If true, the chart will install all the
resources necessary for the catalog sync process to run.
- `image` ((#v-synccatalog-image)) (`string: global.imageK8S`) - The name of the Docker image
(including any tag) for [consul-k8s](/docs/platform/k8s#getting-started-with-consul-and-kubernetes)
(including any tag) for [consul-k8s](/docs/k8s#getting-started-with-consul-and-kubernetes)
to run the sync program.
- `default` ((#v-synccatalog-default)) (`boolean: true`) - If true, all valid services in K8S are
synced by default. If false, the service must be [annotated](/docs/platform/k8s/service-sync#sync-enable-disable) properly to sync. In either case an annotation can override the default.
synced by default. If false, the service must be [annotated](/docs/k8s/service-sync#sync-enable-disable) properly to sync. In either case an annotation can override the default.
- `toConsul` ((#v-synccatalog-toconsul)) (`boolean: true`) - If true, will sync Kubernetes services
to Consul. This can be disabled to have a one-way sync.
@ -606,7 +606,7 @@ and consider if they're appropriate for your deployment.
- `additionalSpec` ((#v-ui-service-additionalspec)) (`string: null`) - Additional Service spec
values. This should be a multi-line string mapping directly to a Kubernetes `Service` object.
- `connectInject` ((#v-connectinject)) - Values that configure running the [Connect injector](/docs/platform/k8s/connect).
- `connectInject` ((#v-connectinject)) - Values that configure running the [Connect injector](/docs/k8s/connect).
- `enabled` ((#v-connectinject-enabled)) (`boolean: false`) - If true, the chart will install all the
resources necessary for the Connect injector process to run. This will enable the injector but will
@ -616,7 +616,7 @@ and consider if they're appropriate for your deployment.
(including any tag) for the [consul-k8s](https://github.com/hashicorp/consul-k8s) binary.
- `default` ((#v-connectinject-default)) (`boolean: false`) - If true, the injector will inject the
Connect sidecar into all pods by default. Otherwise, pods must specify the. [injection annotation](/docs/platform/k8s/connect#consul-hashicorp-com-connect-inject)
Connect sidecar into all pods by default. Otherwise, pods must specify the. [injection annotation](/docs/k8s/connect#consul-hashicorp-com-connect-inject)
to opt-in to Connect injection. If this is true, pods can use the same annotation
to explicitly opt-out of injection.
@ -717,7 +717,7 @@ and consider if they're appropriate for your deployment.
configuration feature. Pods that have a Connect proxy injected will have their service automatically registered in this central configuration.
- `defaultProtocol` ((#v-connectinject-centralconfig-defaultprotocol)) (`string: null`) - If
defined, this value will be used as the default protocol type for all services registered with the central configuration. This can be overridden by using the [protocol annotation](/docs/platform/k8s/connect#consul-hashicorp-com-connect-service-protocol) directly on any pod spec.
defined, this value will be used as the default protocol type for all services registered with the central configuration. This can be overridden by using the [protocol annotation](/docs/k8s/connect#consul-hashicorp-com-connect-service-protocol) directly on any pod spec.
- `proxyDefaults` ((#v-connectinject-centralconfig-proxydefaults)) (`string: "{}"`) - This value is
a raw json string that will be applied to all Connect proxy sidecar pods. It can include any valid configuration for the configured proxy.

View File

@ -71,5 +71,5 @@ There are several ways to try Consul with Kubernetes in different environments.
**Documentation**
- [Installing Consul](/docs/platform/k8s/run) covers how to install Consul using the Helm chart.
- [Helm Chart Reference](/docs/platform/k8s/helm) describes the different options for configuring the Helm chart.
- [Installing Consul](/docs/k8s/installation/overview) covers how to install Consul using the Helm chart.
- [Helm Chart Reference](/docs/k8s/helm) describes the different options for configuring the Helm chart.

View File

@ -1,18 +0,0 @@
---
layout: docs
page_title: Consul on Azure Cloud
sidebar_title: Azure Kubernetes Service (AKS)
description: Consul can run directly on Azure Kubernetes Service (AKS).
---
# Consul on Azure Kubernetes Service
Consul can run directly on Azure Kubernetes Service (AKS). To get hands-on experience, you can follow the [Deploy Consul on Azure Kubernetes Service (AKS)](https://learn.hashicorp.com/consul/kubernetes/azure-k8s?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=aks) guide.
The Learn guide includes:
- AKS configuration recommendations
- How to install Consul with the official Helm chart
- Deploying two services in the Consul Connect service mesh.
To complete this guide, you will need a Azure Cloud account.

View File

@ -24,7 +24,7 @@ needs to have permissions to list pods in the namespace where Consul servers
are deployed.
The auto-join string below will join a Consul server cluster that is
started using the [official Helm chart](/docs/platform/k8s/helm):
started using the [official Helm chart](/docs/k8s/helm):
```shell
$ consul agent -retry-join 'provider=k8s label_selector="app=consul,component=server"'

View File

@ -65,12 +65,12 @@ global:
externalServers:
enabled: true
hosts:
- "provider=my-cloud config=val ..."
- 'provider=my-cloud config=val ...'
```
In most cases, `externalServers.hosts` will be the same as `client.join`, however, both keys must be set because
they are used for different purposes: one for Serf LAN and the other for HTTPS connections.
Please see the [reference documentation](https://www.consul.io/docs/platform/k8s/helm.html#v-externalservers-hosts)
Please see the [reference documentation](https://www.consul.io/docs/k8s/helm.html#v-externalservers-hosts)
for more info. If your HTTPS port is different from Consul's default `8501`, you must also set
`externalServers.httpsPort`.
@ -104,12 +104,12 @@ global:
The bootstrap token requires the following minimal permissions:
* `acl:write`
* `operator:write` if enabling Consul namespaces
* `agent:read` if using WAN federation over mesh gateways
- `acl:write`
- `operator:write` if enabling Consul namespaces
- `agent:read` if using WAN federation over mesh gateways
Next, configure external servers. The Helm chart will use this configuration to talk to the Consul server's API
to create policies, tokens, and an auth method. If you are [enabling Consul Connect](/docs/platform/k8s/connect.html),
to create policies, tokens, and an auth method. If you are [enabling Consul Connect](/docs/k8s/connect.html),
`k8sAuthMethodHost` should be set to the address of your Kubernetes API server
so that the Consul servers can validate a Kubernetes service account token when using the [Kubernetes auth method](https://www.consul.io/docs/acl/auth-methods/kubernetes.html)
with `consul login`.
@ -118,8 +118,8 @@ with `consul login`.
externalServers:
enabled: true
hosts:
- "provider=my-cloud config=val ..."
k8sAuthMethodHost: "https://kubernetes.example.com:443"
- 'provider=my-cloud config=val ...'
k8sAuthMethodHost: 'https://kubernetes.example.com:443'
```
Your resulting Helm configuration will end up looking similar to this:
@ -138,12 +138,12 @@ client:
# IPs. If false, the pod IPs must be routable from the external servers.
exposeGossipPorts: true
join:
- "provider=my-cloud config=val ..."
- 'provider=my-cloud config=val ...'
externalServers:
enabled: true
hosts:
- "provider=my-cloud config=val ..."
k8sAuthMethodHost: "https://kubernetes.example.com:443"
- 'provider=my-cloud config=val ...'
k8sAuthMethodHost: 'https://kubernetes.example.com:443'
```
### Bootstrapping ACLs via the Helm chart
@ -162,10 +162,10 @@ client:
# IPs. If false, the pod IPs must be routable from the external servers.
exposeGossipPorts: true
join:
- "provider=my-cloud config=val ..."
- 'provider=my-cloud config=val ...'
externalServers:
enabled: true
hosts:
- "provider=my-cloud config=val ..."
k8sAuthMethodHost: "https://kubernetes.example.com:443"
- 'provider=my-cloud config=val ...'
k8sAuthMethodHost: 'https://kubernetes.example.com:443'
```

View File

@ -1,19 +0,0 @@
---
layout: 'docs'
page_title: 'Running Consul on Amazon EKS'
sidebar_title: 'Amazon Elastic Kubernetes Service (EKS)'
description: |-
Consul can run directly on Amazon Elastic Kubernetes Service (EKS).
---
# Consul on Amazon Elastic Kubernetes Service
Consul can run directly on Amazon Elastic Kubernetes Service (EKS). To get hands-on experience, you can use the [Deploy Consul on Amazon Elastic Kubernetes Service (EKS)](https://learn.hashicorp.com/consul/kubernetes/aws-k8s?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=eks) guide.
The Learn guide includes:
- EKS configuration recommendations
- How to install Consul on Amazon EKS with the official Helm chart
- Accessing Consul with the Consul UI, CLI, and API.
You will need a Amazon Web Services account to complete this guide.

View File

@ -1,18 +0,0 @@
---
layout: docs
page_title: Running Consul on Google Cloud
sidebar_title: Google Kubernetes Service (GKE)
description: Consul can run directly on Google Kubernetes Engine (GKE).
---
# Consul on Google Kubernetes Engine
Consul can run directly on Google Kubernetes Engine (GKE). To get hands-on experience, you can use the [Deploy Consul on Google Kubernetes Engine (GKE)](https://learn.hashicorp.com/consul/kubernetes/google-cloud-k8s?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=gke) guide.
The Learn guide includes:
- GKE configuration recommendations
- How to install Consul on GKE with the official Helm chart
- Accessing Consul with the Consul UI, CLI, and API.
You will need a Google Cloud account to complete this guide.

View File

@ -1,16 +0,0 @@
---
layout: docs
page_title: Running Consul on Minikube
sidebar_title: Minikube
description: Consul can run directly on Minikube for testing.
---
# Consul on Minikube
Consul can run directly on Minikube for testing. To get hands-on experience, you can use the [Consul on Minikube via Helm](https://learn.hashicorp.com/consul/kubernetes/minikube?utm_source=consul.io&utm_medium=docs&utm_content=k8s&utm_term=mk) guide.
The Learn guide includes how to:
- Install Consul on Minikube with the official Helm chart. After
- Access Consul with the Consul UI, CLI, and API
- Deploy services in the Consul Connect service mesh.

View File

@ -0,0 +1,402 @@
---
layout: docs
page_title: Federation Between Kubernetes Clusters
sidebar_title: Federation Between Kubernetes Clusters
description: >-
Federating multiple Kubernetes clusters.
---
# Federation Between Kubernetes Clusters
-> **1.8.0+:** This feature is available in Consul versions 1.8.0 and higher
~> This topic requires familiarity with [Mesh Gateways](/docs/connect/mesh_gateway) and [WAN Federation Via Mesh Gateways](/docs/connect/wan-federation-via-mesh-gateways).
-> Looking for a step-by-step guide? Please follow our Learn Guide: [Secure and Route Service Mesh Communication Across Kubernetes](https://learn.hashicorp.com/consul/kubernetes/mesh-gateways).
This page describes how to federate multiple Kubernetes clusters. See [Multi-Cluster Overview](/docs/k8s/installation/multi-cluster/overview)
for more information on use-cases and how it works.
## Primary Datacenter
Consul treats each Kubernetes cluster as a separate Consul datacenter.
In order to federate clusters, one cluster must be designated the
primary datacenter. This datacenter will be
responsible for creating the certificate authority that signs the TLS certificates
Connect uses to encrypt and authorize traffic. It also handles validating global ACL tokens. All other clusters
that are federated are considered secondaries.
#### First Time Installation
If you haven't installed Consul on your cluster, continue reading below. If you've
already installed Consul on a cluster and want to upgrade it to
support federation, see [Upgrading An Existing Cluster](#upgrading-an-existing-cluster).
You will need to use the following `config.yaml` file for your primary cluster,
with the possible modifications listed below.
```yaml
global:
name: consul
image: consul:1.8.0-beta1
datacenter: dc1
# TLS configures whether Consul components use TLS.
tls:
# TLS must be enabled for federation in Kubernetes.
enabled: true
federation:
enabled: true
# This will cause a Kubernetes secret to be created that
# can be imported by secondary datacenters to configure them
# for federation.
createFederationSecret: true
acls:
manageSystemACLs: true
# If ACLs are enabled, we must create a token for secondary
# datacenters to replicate ACLs.
createReplicationToken: true
# Gossip encryption secures the protocol Consul uses to quickly
# discover new nodes and detect failure.
gossipEncryption:
secretName: consul-gossip-encryption-key
secretKey: key
connectInject:
# Consul Connect service mesh must be enabled for federation.
enabled: true
meshGateway:
# Mesh gateways are gateways between datacenters. They must be enabled
# for federation in Kubernetes since the communication between datacenters
# goes through the mesh gateways.
enabled: true
```
Modifications:
1. Note the Docker image:
```yaml
global:
image: consul:1.8.0-beta1
```
Kubernetes support for federation is currently only available in Consul 1.8.0 beta.
1. The Consul datacenter name is `dc1`. The datacenter name in each federated
cluster **must be unique**.
1. ACLs are enabled in the above config file. They can be disabled by setting:
```yaml
global:
acls:
manageSystemACLs: false
createReplicationToken: false
```
ACLs secure Consul by requiring every API call to present an ACL token that
is validated to ensure it has the proper permissions. If you are only testing Consul,
this is not required.
1. Gossip encryption is enabled in the above config file. To disable it, comment
out or delete the `gossipEncryption` key:
```yaml
global:
# gossipEncryption:
# secretName: consul-gossip-encryption-key
# secretKey: key
```
Gossip encryption encrypts the communication layer used to discover other
nodes in the cluster and report on failure. If you are only testing Consul,
this is not required.
**NOTE:** This config assumes you've already
created a Kubernetes secret called `consul-gossip-encryption-key`. See
[the docs for this setting](/docs/k8s/helm#v-global-gossipencryption) for
more information on how to create this secret.
1. The default mesh gateway configuration
creates a Kubernetes Load Balancer service. If you wish to customize the
mesh gateway, for example using a Node Port service or a custom DNS entry,
see the [Helm reference](/docs/k8s/helm#v-meshgateway) for that setting.
With your `config.yaml` ready to go, follow our [Installation Guide](/docs/k8s/installation/overview
to install Consul on your primary cluster and then skip ahead to the [Federation Secret](#federation-secret)
section.
#### Upgrading An Existing Cluster
If you have an existing cluster, you will need to upgrade it to ensure it has
the following config:
```yaml
global:
image: consul:1.8.0-beta1
tls:
enabled: true
federation:
enabled: true
createFederationSecret: true
acls:
manageSystemACLs: true
createReplicationToken: true
meshGateway:
enabled: true
```
1. `global.image` must be set to `consul:1.8.0-beta1` because Kubernetes support
for federation is currently only available in Consul 1.8.0 beta.
1. `global.tls.enabled` must be `true`. See [Configuring TLS on an Existing Cluster](/docs/k8s/operations/tls-on-existing-cluster)
for more information on safely upgrading a cluster to use TLS.
If you've set `enableAutoEncrypt: true`, this is also supported.
1. `global.federation.enabled` must be set to `true`. This is a new config setting.
1. If using ACLs, you'll already have `global.acls.manageSystemACLs: true`. For the
primary cluster, you'll also need to set `global.acls.createReplicationToken: true`.
This ensures that an ACL token is created that secondary clusters can use to authenticate
with the primary.
1. Mesh Gateways are enabled with the default configuration. The default configuration
creates a Kubernetes Load Balancer service. If you wish to customize the
mesh gateway, see the [Helm reference](/docs/k8s/helm#v-meshgateway) for that setting.
With the above settings added to your existing config, follow the [Upgrading](/localhost:3000/docs/k8s/operations/upgrading)
guide to upgrade your cluster and then come back to the [Federation Secret](#federation-secret) section.
## Federation Secret
The federation secret is a Kubernetes secret containing information needed
for secondary datacenters/clusters to federate with the primary. This secret is created
automatically by setting:
```yaml
global:
federation:
createFederationSecret: true
```
After the installation into your primary cluster you will need to export
this secret:
```sh
$ kubectl get secret consul-federation -o yaml > consul-federation-secret.yaml
```
!> **Security note:** The federation secret makes it possible to gain
full admin privileges in Consul. This secret must be kept securely, i.e.
it should be deleted from your filesystem after importing it to your secondary
cluster and you should use RBAC permissions to ensure only administrators
can read it from Kubernetes.
~> **Secret doesn't exist?** If you haven't set `global.name` to `consul` then the name of the secret will
be your Helm release name suffixed with `-consul-federation` e.g. `helm-release-consul-federation`.
Now you're ready to import the secret into your secondary cluster(s).
Switch `kubectl` context to your secondary Kubernetes cluster. In this example
our context for our secondary cluster is `dc2`:
```sh
$ kubectl config use-context dc2
Switched to context "dc2".
```
And import the secret:
```sh
$ kubectl apply -f consul-federation-secret.yaml
secret/consul-federation configured
```
#### Federation Secret Contents
The automatically generated federation secret contains:
- **Server certificate authority certificate** - This is the certificate authority
used to sign Consul server-to-server communication. This is required by secondary
clusters because they must communicate with the Consul servers in the primary cluster.
- **Server certificate authority key** - This is the signing key for the server certificate
authority. This is required by secondary clusters because they need to create
server certificates for each Consul server using the same certificate authority
as the primary.
!> **Security note:** The certificate authority key would enable an attacker to compromise Consul,
it should be kept securely.
- **Consul server config** - This is a JSON snippet that must be used as part of the server config for secondary datacenters.
It sets:
- [`primary_datacenter`](/docs/agent/options.html#primary_datacenter) to the name of the primary datacenter.
- [`primary_gateways`](/docs/agent/options.html#primary_gateways) to an array of IPs or hostnames
for the mesh gateways in the primary datacenter. These are the addresses that
Consul servers in secondary clusters will use to communicate with the primary
datacenter.
Even if there are multiple secondary datacenters, only the primary gateways
need to be configured. Upon first connection with a primary datacenter, the
addresses for other secondary datacenters will be discovered.
- **ACL replication token** - If ACLs are enabled, secondary datacenters need
an ACL token in order to authenticate with the primary datacenter. This ACL
token is also used to replicate ACLs from the primary datacenter so that
components in each datacenter can authenticate with one another.
- **Gossip encryption key** - If gossip encryption is enabled, secondary datacenters
need the gossip encryption key in order to be part of the gossip pool.
Gossip is the method by which Consul discovers the addresses and health of other
nodes.
!> **Security note:** This gossip encryption key would enable an attacker to compromise Consul,
it should be kept securely.
## Secondary Cluster(s)
With the primary cluster up and running, and the [federation secret](/docs/installation/multi-cluster#federation-secret) imported
into the secondary cluster, we can now install Consul into the secondary
cluster.
You will need to use the following `config.yaml` file for your secondary cluster(s),
with the possible modifications listed below.
-> **NOTE: ** You must use a separate Helm config file for each cluster (primary and secondaries) since their
settings are different.
```yaml
global:
name: consul
image: consul:1.8.0-beta1
datacenter: dc2
tls:
enabled: true
# Here we're using the shared certificate authority from the primary
# datacenter that was exported via the federation secret.
caCert:
secretName: consul-federation
secretKey: caCert
caKey:
secretName: consul-federation
secretKey: caKey
acls:
manageSystemACLs: true
# Here we're importing the replication token that was
# exported from the primary via the federation secret.
replicationToken:
secretName: consul-federation
secretKey: replicationToken
federation:
enabled: true
gossipEncryption:
secretName: consul-federation
secretKey: gossipEncryptionKey
connectInject:
enabled: true
meshGateway:
enabled: true
server:
# Here we're including the server config exported from the primary
# via the federation secret. This config includes the addresses of
# the primary datacenter's mesh gateways so Consul can begin federation.
extraVolumes:
- type: secret
name: consul-federation
items:
- key: serverConfigJSON
path: config.json
load: true
```
Modifications:
1. Note `global.image: consul:1.8.0-beta1`. Kubernetes support for federation is currently only available
in Consul 1.8.0 beta.
1. The Consul datacenter name is `dc2`. The primary datacenter's name was `dc1`.
The datacenter name in **each** federated cluster **must be unique**.
1. ACLs are enabled in the above config file. They can be disabled by removing
the whole `acls` block:
```yaml
acls:
manageSystemACLs: false
replicationToken:
secretName: consul-federation
secretKey: replicationToken
```
If ACLs are enabled in one datacenter, they must be enabled in all datacenters
because in order to communicate with that one datacenter ACL tokens are required.
1. Gossip encryption is enabled in the above config file. To disable it, don't
set the `gossipEncryption` key:
```yaml
global:
# gossipEncryption:
# secretName: consul-federation
# secretKey: gossipEncryptionKey
```
If gossip encryption is enabled in one datacenter, it must be enabled in all datacenters
because in order to communicate with that one datacenter the encryption key is required.
1. The default mesh gateway configuration
creates a Kubernetes Load Balancer service. If you wish to customize the
mesh gateway, for example using a Node Port service or a custom DNS entry,
see the [Helm reference](/docs/k8s/helm#v-meshgateway) for that setting.
With your `config.yaml` ready to go, follow our [Installation Guide](/docs/k8s/installation/overview)
to install Consul on your secondary cluster(s).
## Verifying Federation
To verify that both datacenters are federated, run the
`consul members -wan` command on one of the Consul server pods:
```sh
$ kubectl exec statefulset/consul-server -- consul members -wan
Node Address Status Type Build Protocol DC Segment
consul-server-0.dc1 10.32.4.216:8302 alive server 1.8.0 2 dc1 <all>
consul-server-0.dc2 192.168.2.173:8302 alive server 1.8.0 2 dc2 <all>
consul-server-1.dc1 10.32.5.161:8302 alive server 1.8.0 2 dc1 <all>
consul-server-1.dc2 192.168.88.64:8302 alive server 1.8.0 2 dc2 <all>
consul-server-2.dc1 10.32.1.175:8302 alive server 1.8.0 2 dc1 <all>
consul-server-2.dc2 192.168.35.174:8302 alive server 1.8.0 2 dc2 <all>
```
In this example (run from `dc1`), you can see that this datacenter knows about
the servers in dc2 and that they have status `alive`.
You can also use the `consul catalog services` command with the `-datacenter` flag to ensure
each datacenter can read each other's services. In this example, our `kubectl`
context is `dc1` and we're querying for the list of services in `dc2`:
```sh
$ kubectl exec statefulset/consul-server -- consul catalog services -datacenter dc2
consul
mesh-gateway
```
You can switch kubectl contexts and run the same command in `dc2` with the flag
`-datacenter dc1` to ensure `dc2` can communicate with `dc1`.
### Consul UI
We can also use the Consul UI to verify federation.
See [Viewing the Consul UI](docs/k8s/installation/overview#viewing-the-consul-ui)
for instructions on how to view the UI.
~> NOTE: If ACLs are enabled, your kubectl context must be in the primary datacenter
to retrieve the bootstrap token mentioned in the UI documentation.
With the UI open, you'll be able to switch between datacenters via the dropdown
in the top left:
![Consul Datacenter Dropdown](/img/consul-datacenter-dropdown.png 'Consul Datacenter Dropdown')
## Next Steps
With your Kubernetes clusters federated, try out using Consul service mesh to
route between services deployed on each cluster by following our Learn Guide: [Secure and Route Service Mesh Communication Across Kubernetes](https://learn.hashicorp.com/consul/kubernetes/mesh-gateways#deploy-microservices).
You can also read our in-depth documentation on [Consul Service Mesh In Kubernetes](/docs/k8s/connect).

View File

@ -0,0 +1,73 @@
---
layout: docs
page_title: Multi-Cluster Federation Overview
sidebar_title: Overview
description: >-
Installing on multiple Kubernetes clusters.
---
# Multi-Cluster Federation Overview
In Consul, federation is the act of joining two or more Consul datacenters.
When datacenters are joined, Consul servers in each datacenter can communicate
with one another. This enables the following features:
- Services on all clusters can make calls to each other through Consul Service Mesh.
- [Intentions](/docs/connect/intentions) can be used to enforce rules about which services can communicate across all clusters.
- [L7 Routing Rules](/docs/connect/l7-traffic-management) can enable multi-cluster failover
and traffic splitting.
- The Consul UI has a drop-down menu that lets you navigate between datacenters.
## Traditional WAN Federation vs. WAN Federation Via Mesh Gateways
Consul provides two mechanisms for WAN (Wide Area Network) federation:
1. Traditional WAN Federation
1. WAN Federation Via Mesh Gateways (newly available in Consul 1.8.0)
### Traditional WAN Federation
With traditional WAN federation, all Consul servers must be exposed on the wide area
network. In the Kubernetes context this is often difficult to set up. It would require that
each Consul server pod is running on a Kubernetes node with an IP address that is routable from
all other Kubernetes clusters. Often Kubernetes clusters are deployed into private
subnets that other clusters cannot route to without additional network devices and configuration.
The Kubernetes solution to the problem of exposing pods is load balancer services but these can't be used
with traditional WAN federation because it requires proxying both UDP and TCP and Kubernetes load balancers only proxy TCP.
In addition, each Consul server would need its own load balancer because each
server needs a unique address. This would increase cost and complexity.
![Traditional WAN Federation](/img/traditional-wan-federation.png 'Traditional WAN Federation')
### WAN Federation Via Mesh Gateways
To solve the problems that occurred with traditional WAN federation,
Consul 1.8.0 now supports WAN federation **via mesh gateways**. This mechanism
only requires that mesh gateways are exposed with routable addresses, not Consul servers. We can front
the mesh gateway pods with a single Kubernetes service and all traffic flows between
datacenters through the mesh gateways.
![WAN Federation Via Mesh Gateway](/img/mesh-gateway-wan-federation.png 'WAN Federation Via Mesh Gateway')
## Network Requirements
Clusters/datacenters can be federated even if they have overlapping pod IP spaces or if they're
on different cloud providers or platforms. Kubernetes clusters can even be
federated with Consul datacenters running on virtual machines (and vice versa).
Because the communication between clusters is end-to-end encrypted, mesh gateways
can even be exposed on the public internet.
The only requirement is that the mesh gateways for each cluster can route to
one another. For example, if using a load balancer service in front of each cluster's
mesh gateway, the load balancer IP must be routable from the other mesh gateway pods.
If using a public load balancer, this is guaranteed. If using a private load balancer
then you'll need to make sure that its IP is routable from your other clusters.
## Next Steps
Now that you have an overview of federation, proceed to either the
[Federation Between Kubernetes Clusters](/docs/k8s/installation/multi-cluster/kubernetes)
or [Federation Between VMs and Kubernetes](/docs/k8s/installation/multi-cluster/vms-and-kubernetes)
pages depending on your use case.

View File

@ -0,0 +1,295 @@
---
layout: docs
page_title: Federation Between VMs and Kubernetes
sidebar_title: Federation Between VMs and Kubernetes
description: >-
Federating Kubernetes clusters and VMs.
---
# Federation Between VMs and Kubernetes
-> **1.8.0+:** This feature is available in Consul versions 1.8.0 and higher
~> This topic requires familiarity with [Mesh Gateways](/docs/connect/mesh_gateway) and [WAN Federation Via Mesh Gateways](/docs/connect/wan-federation-via-mesh-gateways).
Consul datacenters running on non-kubernetes platforms like VMs or bare metal can
be federated with Kubernetes datacenters. Just like with Kubernetes, one datacenter
must be the [primary](/docs/k8s/installation/multi-cluster/installation#primary-datacenter).
## Kubernetes as the Primary
If your primary datacenter is running on Kubernetes, use the Helm config from the
[Primary Datacenter](/docs/k8s/installation/multi-cluster/kubernetes#primary-datacenter) section to install Consul.
Once installed, you'll need to export the following information from the primary Kubernetes
cluster:
1. The certificate authority cert:
```sh
kubectl get secrets/consul-ca-cert --template='{{index .data "tls.crt" }}' |
base64 -D > consul-agent-ca.pem
```
and the certificate authority signing key:
```sh
kubectl get secrets/consul-ca-key --template='{{index .data "tls.key" }}' |
base64 -D > consul-agent-ca-key.pem
```
With the `consul-agent-ca.pem` and `consul-agent-ca-key.pem` files you can
create certificates for your servers and clients running on VMs that share the
same certificate authority as your Kubernetes servers.
You can use the `consul tls` commands to generate those certificates:
```sh
# NOTE: consul-agent-ca.pem and consul-agent-ca-key.pem must be in the current
# directory.
$ consul tls cert create -server -dc=vm-dc
==> WARNING: Server Certificates grants authority to become a
server and access all state in the cluster including root keys
and all ACL tokens. Do not distribute them to production hosts
that are not server nodes. Store them as securely as CA keys.
==> Using consul-agent-ca.pem and consul-agent-ca-key.pem
==> Saved vm-dc-server-consul-0.pem
==> Saved vm-dc-server-consul-0-key.pem
```
See the help for output of `consul tls cert create -h` to see more options
for generating server certificates.
These certificates can be used in your server config file:
```hcl
# server.hcl
cert_file = "vm-dc-server-consul-0.pem"
key_file = "vm-dc-server-consul-0-key.pem"
ca_file = "consul-agent-ca.pem"
```
For clients, you can generate TLS certs with:
```sh
$ consul tls cert create -client
==> Using consul-agent-ca.pem and consul-agent-ca-key.pem
==> Saved dc1-client-consul-0.pem
==> Saved dc1-client-consul-0-key.pem
```
Or use the [auto_encrypt](/docs/agent/options.html#auto_encrypt) feature.
1. The WAN addresses of the mesh gateways:
```sh
$ kubectl exec statefulset/consul-server -- sh -c \
'curl -sk https://localhost:8501/v1/catalog/service/mesh-gateway | jq ".[].ServiceTaggedAddresses.wan"'
{
"Address": "1.2.3.4",
"Port": 443
}
{
"Address": "1.2.3.4",
"Port": 443
}
```
In this example, the addresses are the same because both mesh gateway pods are
fronted by the same Kubernetes load balancer.
These addresses will be used in the server config for the `primary_gateways`
setting:
```hcl
primary_gateways = ["1.2.3.4:443"]
```
1. If ACLs are enabled, you'll also need the replication ACL token:
```sh
$ kubectl get secrets/consul-acl-replication-acl-token --template='{{.data.token}}'
e7924dd1-dc3f-f644-da54-81a73ba0a178
```
This token will be used in the server config for the replication token.
You must also create your own agent policy and token.
```hcl
acls {
tokens {
agent = "<your agent token>"
replication = "e7924dd1-dc3f-f644-da54-81a73ba0a178"
}
}
```
1. If gossip encryption is enabled, you'll need the key as well. The command
to retrieve the key will depend on which Kubernetes secret you've stored it in.
This key will be used in server and client configs for the `encrypt` setting:
```hcl
encrypt = "uF+GsbI66cuWU21kiXLze5JLEX5j4iDFlDTb0ZWNpDI="
```
A final example server config file might look like:
```hcl
# From above
cert_file = "vm-dc-server-consul-0.pem"
key_file = "vm-dc-server-consul-0-key.pem"
ca_file = "consul-agent-ca.pem"
primary_gateways = ["1.2.3.4:443"]
acl {
enabled = true
default_policy = "deny"
down_policy = "extend-cache"
tokens {
agent = "e7924dd1-dc3f-f644-da54-81a73ba0a178"
replication = "e7924dd1-dc3f-f644-da54-81a73ba0a178"
}
}
encrypt = "uF+GsbI66cuWU21kiXLze5JLEX5j4iDFlDTb0ZWNpDI="
# Other server settings
server = true
datacenter = "vm-dc"
data_dir = "/opt/consul"
enable_central_service_config = true
primary_datacenter = "dc1"
connect {
enabled = true
enable_mesh_gateway_wan_federation = true
}
verify_incoming_rpc = true
verify_outgoing = true
verify_server_hostname = true
ports {
https = 8501
http = -1
grpc = 8502
}
```
## Kubernetes as the Secondary
If you're running your primary datacenter on VMs then you'll need to manually
construct the [Federation Secret](#federation-secret) in order to federate
Kubernetes clusters as secondaries.
-> Your VM cluster must be running mesh gateways, and have mesh gateway WAN
federation enabled. See [WAN Federation via Mesh Gateways](/docs/connect/wan-federation-via-mesh-gateways).
You'll need:
1. The root certificate authority cert placed in `consul-agent-ca.pem`.
1. The root certificate authority key placed in `consul-agent-ca-key.pem`.
1. The IP addresses of the mesh gateways running in your VM datacenter. These must
be routable from the Kubernetes cluster.
1. If ACLs are enabled you must create an ACL replication token with the following rules:
```hcl
acl = "write"
operator = "write"
agent_prefix "" {
policy = "read"
}
node_prefix "" {
policy = "write"
}
service_prefix "" {
policy = "read"
intentions = "read"
}
```
This token is used for ACL replication and for automatic ACL management in Kubernetes.
If you're running Consul Enterprise you'll need the rules:
```hcl
acl = "write"
operator = "write"
agent_prefix "" {
policy = "read"
}
node_prefix "" {
policy = "write"
}
namespace_prefix "" {
service_prefix "" {
policy = "read"
intentions = "read"
}
}
```
1. If gossip encryption is enabled, you'll need the key.
With that data ready, you can create the Kubernetes federation secret:
```sh
kubectl create secret generic consul-federation \
--from-literal=caCert=$(cat consul-agent-ca.pem) \
--from-literal=caKey=$(cat consul-agent-ca-key.pem)
# If ACLs are enabled uncomment.
# --from-literal=replicationToken="<your acl replication token>" \
# If using gossip encryption uncomment.
# --from-literal=gossipEncryptionKey="<your gossip encryption key>"
```
Then use the following Helm config file:
```yaml
global:
name: consul
image: consul:1.8.0-beta1
datacenter: dc2
tls:
enabled: true
caCert:
secretName: consul-federation
secretKey: caCert
caKey:
secretName: consul-federation
secretKey: caKey
# Delete this acls section if ACLs are disabled.
acls:
manageSystemACLs: true
replicationToken:
secretName: consul-federation
secretKey: replicationToken
federation:
enabled: true
# Delete this gossipEncryption section if gossip encryption is disabled.
gossipEncryption:
secretName: consul-federation
secretKey: gossipEncryptionKey
connectInject:
enabled: true
meshGateway:
enabled: true
server:
extraConfig: |
{
"primary_datacenter": "<your VM datacenter name>",
"primary_gateways": ["<ip of your VM mesh gateway>", "<other ip>", ...]
}
```
-> **NOTE: ** You must fill out the `server.extraConfig` section with the datacenter
name of your primary datacenter running on VMs and with the IPs of your mesh
gateways running on VMs.
With your config file ready to go, follow our [Installation Guide](/docs/k8s/installation/overview
to install Consul on your secondary cluster(s).
## Next Steps
Read the [Verifying Federation](/docs/k8s/installation/multi-cluster/kubernetes#verifying-federation)
section to verify that federation is working as expected.

View File

@ -1,7 +1,7 @@
---
layout: docs
page_title: Installing Consul on Kubernetes - Kubernetes
sidebar_title: Installation
sidebar_title: Overview
description: >-
Consul can run directly on Kubernetes, both in server or client mode. For
pure-Kubernetes workloads, this enables Consul to also exist purely within
@ -18,7 +18,7 @@ a server running inside or outside of Kubernetes.
This page starts with a large how-to section for various specific tasks.
To learn more about the general architecture of Consul on Kubernetes, scroll
down to the [architecture](/docs/platform/k8s/run.html#architecture) section.
down to the [architecture](/docs/k8s/installation/overview.html#architecture) section.
If you would like to get hands-on experience testing Consul as a service mesh
for Kubernetes, check the guides in the [Getting Started with Consul service
mesh](https://learn.hashicorp.com/consul/gs-consul-service-mesh/understand-consul-service-mesh?utm_source=WEBSITE&utm_medium=WEB_IO&utm_offer=ARTICLE_PAGE&utm_content=DOCS) track.
@ -26,7 +26,7 @@ mesh](https://learn.hashicorp.com/consul/gs-consul-service-mesh/understand-consu
## Helm Chart Installation
The recommended way to run Consul on Kubernetes is via the
[Helm chart](/docs/platform/k8s/helm). This will install and configure
[Helm chart](/docs/k8s/helm). This will install and configure
all the necessary components to run Consul. The configuration enables you
to run just a server cluster, just a client cluster, or both. Using the Helm
chart, you can have a full Consul deployment up and running in minutes.
@ -42,7 +42,7 @@ upgrade, etc. the Consul cluster.
The Helm chart has no required configuration and will install a Consul
cluster with sane defaults out of the box. Prior to going to production,
it is highly recommended that you
[learn about the configuration options](/docs/platform/k8s/helm#configuration-values).
[learn about the configuration options](/docs/k8s/helm#configuration-values).
~> **Security Warning:** By default, the chart will install an insecure configuration
of Consul. This provides a less complicated out-of-box experience for new users,
@ -96,7 +96,7 @@ elected and every node will have a running Consul agent.
If you want to customize your installation,
create a `config.yaml` file to override the default settings.
You can learn what settings are available by running `helm inspect values hashicorp/consul`
or by reading the [Helm Chart Reference](/docs/platform/k8s/helm).
or by reading the [Helm Chart Reference](/docs/k8s/helm).
For example, if you want to enable the [Consul Connect](/docs/k8s/connect) feature,
use the following config file:
@ -118,14 +118,17 @@ NAME: consul
```
If you've already installed Consul and want to make changes, you'll need to run
`helm upgrade`. See the [Upgrading Consul on Kubernetes](/docs/platform/k8s/run#upgrading-consul-on-kubernetes)
section for more details.
`helm upgrade`. See [Upgrading](/docs/k8s/operations/upgrading) for more details.
## Viewing the Consul UI
The Consul UI is enabled by default when using the Helm chart.
For security reasons, it isn't exposed via a `LoadBalancer` Service by default so you must
use `kubectl port-forward` to visit the UI:
use `kubectl port-forward` to visit the UI.
#### TLS Disabled
If running with TLS disabled, the Consul UI will be accessible via http on port 8500:
```
$ kubectl port-forward service/consul-server 8500:8500
@ -134,8 +137,42 @@ $ kubectl port-forward service/consul-server 8500:8500
Once the port is forwarded navigate to [http://localhost:8500](http://localhost:8500).
#### TLS Enabled
If running with TLS enabled, the Consul UI will be accessible via https on port 8501:
```
$ kubectl port-forward service/consul-server 8501:8501
...
```
Once the port is forwarded navigate to [https://localhost:8501](https://localhost:8501).
~> You'll need to click through an SSL warning from your browser because the
Consul certificate authority is self-signed and not in the browser's trust store.
#### ACLs Enabled
If ACLs are enabled, you will need to input an ACL token into the UI in order
to see all resources and make modifications.
To retrieve the bootstrap token that has full permissions, run:
```sh
$ kubectl get secrets/consul-bootstrap-acl-token --template={{.data.token}} | base64 -D
e7924dd1-dc3f-f644-da54-81a73ba0a178%
```
Then paste the token into the UI under the ACLs tab (without the `%`).
~> NOTE: If using multi-cluster federation, your kubectl context must be in the primary datacenter
to retrieve the bootstrap token since secondary datacenters use a separate token
with less permissions.
### Exposing the UI via a service
If you want to expose the UI via a Kubernetes Service, configure
the [`ui.service` chart values](/docs/platform/k8s/helm#v-ui-service).
the [`ui.service` chart values](/docs/k8s/helm#v-ui-service).
This service will allow requests to the Consul servers so it should
not be open to the world.
@ -148,7 +185,7 @@ has important caching behavior, and allows you to use the simpler
[`/agent` endpoints for services and checks](/api/agent).
For Consul installed via the Helm chart, a client agent is installed on
each Kubernetes node. This is explained in the [architecture](/docs/platform/k8s/run#client-agents)
each Kubernetes node. This is explained in the [architecture](/docs/k8s/installation/overview#client-agents)
section. To access the agent, you may use the
[downward API](https://kubernetes.io/docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/).
@ -260,7 +297,7 @@ The clients expose the Consul HTTP API via a static port (default 8500)
bound to the host port. This enables all other pods on the node to connect
to the node-local agent using the host IP that can be retrieved via the
Kubernetes downward API. See
[accessing the Consul HTTP API](/docs/platform/k8s/run#accessing-the-consul-http-api)
[accessing the Consul HTTP API](/docs/k8s/installation/overview#accessing-the-consul-http-api)
for an example.
There is a major limitation to this: there is no way to bind to a local-only

View File

@ -1,11 +1,21 @@
---
layout: docs
page_title: Predefined PVCs
sidebar_title: Predefined Persistent Volume Claims
description: Using predefined Persistent Volume Claims
page_title: Self Hosted Kubernetes
sidebar_title: Self Hosted Kubernetes
description: Installing Consul on Self Hosted Kubernetes
---
# Predefined Persistent Volume Claims (PVCs)
# Self Hosted Kubernetes
Except for creating persistent volumes (see below), installing Consul on your
self-hosted Kubernetes cluster is the same process as installing Consul on a
cloud-hosted Kubernetes cluster. See the [Installation Overview](/docs/k8s/installation/overview)
for install instructions.
## Predefined Persistent Volume Claims (PVCs)
If running a self-hosted Kubernetes installation, you mean need to pre-create
the persistent volumes for the stateful set that the Consul servers run in.
The only way to use a pre-created PVC is to name them in the format Kubernetes expects:
@ -26,6 +36,3 @@ data-vault-consul-consul-server-2
data-vault-consul-consul-server-3
data-vault-consul-consul-server-4
```
If you are using your own storage, you'll need to configure a storage class. See the
documentation for configuring storage classes [here](https://kubernetes.io/docs/concepts/storage/storage-classes/).

View File

@ -35,7 +35,7 @@ This upgrade will trigger a rolling update of the clients, as well as any
other `consul-k8s` components, such as sync catalog or client snapshot deployments.
1. Perform a rolling upgrade of the servers, as described in
[Upgrade Consul Servers](/docs/platform/k8s/upgrading#upgrading-consul-servers).
[Upgrade Consul Servers](/docs/k8s/upgrading#upgrading-consul-servers).
1. Repeat steps 1 and 2, turning on TLS verification by setting `global.tls.verify`
to `true`.
@ -72,7 +72,7 @@ applications to it.
```
In this configuration, we're setting `server.updatePartition` to the number of
server replicas as described in [Upgrade Consul Servers](/docs/platform/k8s/upgrading#upgrading-consul-servers)
server replicas as described in [Upgrade Consul Servers](/docs/k8s/upgrading#upgrading-consul-servers)
and `client.updateStrategy` to `OnDelete` to manually trigger an upgrade of the clients.
1. Run `helm upgrade` with the above config file. The upgrade will trigger an update of all
@ -95,7 +95,7 @@ applications to it.
the sidecar proxy. Also, Kubernetes should schedule these applications on the new node pool.
1. Perform a rolling upgrade of the servers described in
[Upgrade Consul Servers](/docs/platform/k8s/upgrading#upgrading-consul-servers).
[Upgrade Consul Servers](/docs/k8s/upgrading#upgrading-consul-servers).
1. If everything is healthy, delete the old node pool.

View File

@ -15,7 +15,7 @@ services are available to Consul agents and services in Consul can be available
as first-class Kubernetes services. This functionality is provided by the
[consul-k8s project](https://github.com/hashicorp/consul-k8s) and can be
automatically installed and configured using the
[Consul Helm chart](/docs/platform/k8s/run).
[Consul Helm chart](/docs/k8s/installation/overview).
**Why sync Kubernetes services to Consul?** Kubernetes services synced to the
Consul catalog enable Kubernetes services to be accessed by any node that
@ -35,7 +35,7 @@ The service sync is done using an external long-running process in the
[consul-k8s project](https://github.com/hashicorp/consul-k8s). This process
can run either in or out of a Kubernetes cluster. However, running this within
the Kubernetes cluster is generally easier since it is automated using the
[Helm chart](/docs/platform/k8s/helm).
[Helm chart](/docs/k8s/helm).
The Consul server cluster can run either in or out of a Kubernetes cluster.
The Consul server cluster does not need to be running on the same machine
@ -44,7 +44,7 @@ with the address to a Consul agent as well as any additional access
information such as ACL tokens.
To install the sync process, enable the catalog sync feature using
[Helm values](/docs/platform/k8s/helm#configuration-values) and
[Helm values](/docs/k8s/helm#configuration-values) and
upgrade the installation using `helm upgrade` for existing installs or
`helm install` for a fresh install.
@ -74,7 +74,7 @@ syncCatalog:
toK8S: false
```
See the [Helm configuration](/docs/platform/k8s/helm#v-synccatalog)
See the [Helm configuration](/docs/k8s/helm#v-synccatalog)
for more information.
### Authentication
@ -156,7 +156,7 @@ is routable and configured by some other system.
ClusterIP services are synced by default as of `consul-k8s` version 0.3.0. In
many Kubernetes clusters, ClusterIPs may not be accessible outside of the cluster,
so you may end up with services registered in Consul that are not routable. To
skip syncing ClusterIP services, set [`syncClusterIPServices`](/docs/platform/k8s/helm#v-synccatalog-syncclusteripservices)
skip syncing ClusterIP services, set [`syncClusterIPServices`](/docs/k8s/helm#v-synccatalog-syncclusteripservices)
to `false` in the Helm chart values file.
### Sync Enable/Disable
@ -398,11 +398,11 @@ With Consul To Kubernetes syncing enabled, DNS requests of the form `<consul-ser
will be serviced by Consul DNS. From a different Kubernetes namespace than where Consul
is deployed, the DNS request would need to be `<consul-service-name>.<consul-namespace>`.
-> **Note:** Consul to Kubernetes syncing **isn't required** if you've enabled [Consul DNS on Kubernetes](/docs/platform/k8s/dns)
-> **Note:** Consul to Kubernetes syncing **isn't required** if you've enabled [Consul DNS on Kubernetes](/docs/k8s/dns)
_and_ all you need to do is address services in the form `<consul-service-name>.service.consul`, i.e. you don't need Kubernetes `Service` objects created.
~> **Requires Consul DNS via CoreDNS in Kubernetes:** This feature requires that
[Consul DNS](/docs/platform/k8s/dns) is configured within Kubernetes.
[Consul DNS](/docs/k8s/dns) is configured within Kubernetes.
Additionally, **[CoreDNS](https://kubernetes.io/docs/tasks/administer-cluster/dns-custom-nameservers/#config-coredns)
is required (instead of kube-dns)** to resolve an
issue with resolving `externalName` services pointing to custom domains.

BIN
website/public/img/consul-datacenter-dropdown.png (Stored with Git LFS) Normal file

Binary file not shown.

BIN
website/public/img/mesh-gateway-wan-federation.png (Stored with Git LFS) Normal file

Binary file not shown.

BIN
website/public/img/traditional-wan-federation.png (Stored with Git LFS) Normal file

Binary file not shown.