blob: 45746218384757a9fdae1a6396617766a9951819 [file] [log] [blame]
apiVersion: install.istio.io/v1alpha1
kind: IstioOperator
spec:
addonComponents:
grafana:
enabled: false
k8s:
replicaCount: 1
istiocoredns:
enabled: false
# Dashboard to monitor service mesh. Requires at least prometheus to be
# enabled (see below) and a secret defining the user credentials.
kiali:
enabled: true
k8s:
replicaCount: 1
# Required for the kiali dashboard.
# TODO: Try to use Prometheus from gerrit-monitoring setup.
prometheus:
enabled: true
k8s:
replicaCount: 1
tracing:
enabled: false
components:
base:
enabled: true
citadel:
enabled: false
k8s:
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
cni:
enabled: false
egressGateways:
- enabled: false
k8s:
env:
- name: ISTIO_META_ROUTER_MODE
value: sni-dnat
hpaSpec:
maxReplicas: 5
metrics:
- resource:
name: cpu
targetAverageUtilization: 80
type: Resource
minReplicas: 1
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: istio-egressgateway
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
service:
ports:
- name: http2
port: 80
- name: https
port: 443
- name: tls
port: 15443
targetPort: 15443
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
name: istio-egressgateway
ingressGateways:
- enabled: true
k8s:
env:
- name: ISTIO_META_ROUTER_MODE
value: sni-dnat
hpaSpec:
maxReplicas: 5
metrics:
- resource:
name: cpu
targetAverageUtilization: 80
type: Resource
minReplicas: 1
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: istio-ingressgateway
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
service:
ports:
- name: status-port
port: 15021
targetPort: 15021
- name: http2
port: 80
targetPort: 8080
- name: https
port: 443
targetPort: 8443
# - name: tls
# port: 15443
# targetPort: 15443
- name: ssh
port: 29418
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
name: istio-ingressgateway
istiodRemote:
enabled: false
pilot:
enabled: true
k8s:
env:
- name: POD_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 1
periodSeconds: 3
timeoutSeconds: 5
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
policy:
enabled: false
k8s:
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
hpaSpec:
maxReplicas: 5
metrics:
- resource:
name: cpu
targetAverageUtilization: 80
type: Resource
minReplicas: 1
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: istio-policy
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
telemetry:
enabled: false
k8s:
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: GOMAXPROCS
value: "6"
hpaSpec:
maxReplicas: 5
metrics:
- resource:
name: cpu
targetAverageUtilization: 80
type: Resource
minReplicas: 1
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: istio-telemetry
replicaCount: 1
resources:
limits:
cpu: 4800m
memory: 4G
requests:
cpu: 1000m
memory: 1G
strategy:
rollingUpdate:
maxSurge: 100%
maxUnavailable: 25%
hub: docker.io/istio
profile: default
meshConfig:
defaultConfig:
proxyMetadata: {}
enablePrometheusMerge: false
# Log access logs to stdout in istio-proxy sidecars.
# TODO: This should probably be changed in the future, as soon as a more
# production ready logging solution is used.
accessLogFile: "/dev/stdout"
tag: 1.6.3
values:
base:
validationURL: ""
clusterResources: true
gateways:
istio-egressgateway:
autoscaleEnabled: true
env: {}
name: istio-egressgateway
secretVolumes:
- mountPath: /etc/istio/egressgateway-certs
name: egressgateway-certs
secretName: istio-egressgateway-certs
- mountPath: /etc/istio/egressgateway-ca-certs
name: egressgateway-ca-certs
secretName: istio-egressgateway-ca-certs
type: ClusterIP
zvpn: {}
istio-ingressgateway:
applicationPorts: ""
autoscaleEnabled: true
debug: info
domain: ""
env: {}
meshExpansionPorts:
- name: tcp-pilot-grpc-tls
port: 15011
targetPort: 15011
- name: tcp-istiod
port: 15012
targetPort: 15012
- name: tcp-citadel-grpc-tls
port: 8060
targetPort: 8060
- name: tcp-dns-tls
port: 853
targetPort: 8853
name: istio-ingressgateway
secretVolumes:
- mountPath: /etc/istio/ingressgateway-certs
name: ingressgateway-certs
secretName: istio-ingressgateway-certs
- mountPath: /etc/istio/ingressgateway-ca-certs
name: ingressgateway-ca-certs
secretName: istio-ingressgateway-ca-certs
type: LoadBalancer
zvpn: {}
global:
arch:
amd64: 2
ppc64le: 2
s390x: 2
configValidation: true
controlPlaneSecurityEnabled: true
defaultNodeSelector: {}
defaultPodDisruptionBudget:
enabled: true
defaultResources:
requests:
cpu: 10m
enableHelmTest: false
imagePullPolicy: ""
imagePullSecrets: []
istioNamespace: istio-system
istiod:
enableAnalysis: false
enabled: true
# Gardener does not support the third-party-jwt policy.
jwtPolicy: first-party-jwt
logAsJson: false
logging:
level: default:info
meshExpansion:
enabled: false
useILB: false
meshNetworks: {}
mountMtlsCerts: false
multiCluster:
clusterName: ""
enabled: false
network: ""
omitSidecarInjectorConfigMap: false
oneNamespace: false
operatorManageWebhooks: false
pilotCertProvider: istiod
priorityClassName: ""
proxy:
autoInject: enabled
clusterDomain: cluster.local
componentLogLevel: misc:error
enableCoreDump: false
envoyStatsd:
enabled: false
excludeIPRanges: ""
excludeInboundPorts: ""
excludeOutboundPorts: ""
image: proxyv2
includeIPRanges: '*'
# Use this value, if more detailed logging output is needed, e.g. for
# debugging.
logLevel: warning
privileged: false
readinessFailureThreshold: 30
readinessInitialDelaySeconds: 1
readinessPeriodSeconds: 2
resources:
limits:
cpu: 2000m
memory: 1024Mi
requests:
cpu: 100m
memory: 128Mi
statusPort: 15020
tracer: zipkin
proxy_init:
image: proxyv2
resources:
limits:
cpu: 100m
memory: 50Mi
requests:
cpu: 10m
memory: 10Mi
sds:
token:
aud: istio-ca
sts:
servicePort: 0
tracer:
datadog:
address: $(HOST_IP):8126
lightstep:
accessToken: ""
address: ""
stackdriver:
debug: false
maxNumberOfAnnotations: 200
maxNumberOfAttributes: 200
maxNumberOfMessageEvents: 200
zipkin:
address: ""
trustDomain: cluster.local
useMCP: false
grafana:
accessMode: ReadWriteMany
contextPath: /grafana
dashboardProviders:
dashboardproviders.yaml:
apiVersion: 1
providers:
- disableDeletion: false
folder: istio
name: istio
options:
path: /var/lib/grafana/dashboards/istio
orgId: 1
type: file
datasources:
datasources.yaml:
apiVersion: 1
env: {}
envSecrets: {}
image:
repository: grafana/grafana
tag: 6.7.4
nodeSelector: {}
persist: false
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
security:
enabled: false
passphraseKey: passphrase
secretName: grafana
usernameKey: username
service:
annotations: {}
externalPort: 3000
name: http
type: ClusterIP
storageClassName: ""
tolerations: []
istiocoredns:
coreDNSImage: coredns/coredns
coreDNSPluginImage: istio/coredns-plugin:0.2-istio-1.1
coreDNSTag: 1.6.2
istiodRemote:
injectionURL: ""
kiali:
contextPath: /kiali
createDemoSecret: false
dashboard:
auth:
strategy: login
grafanaInClusterURL: http://grafana:3000
jaegerInClusterURL: http://tracing/jaeger
passphraseKey: passphrase
secretName: kiali
usernameKey: username
viewOnlyMode: false
hub: quay.io/kiali
nodeSelector: {}
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
security:
cert_file: /kiali-cert/cert-chain.pem
enabled: false
private_key_file: /kiali-cert/key.pem
service:
annotations: {}
tag: v1.18
mixer:
adapters:
kubernetesenv:
enabled: true
prometheus:
enabled: true
metricsExpiryDuration: 10m
stackdriver:
auth:
apiKey: ""
appCredentials: false
serviceAccountPath: ""
enabled: false
tracer:
enabled: false
sampleProbability: 1
stdio:
enabled: false
outputAsJson: false
useAdapterCRDs: false
policy:
adapters:
kubernetesenv:
enabled: true
useAdapterCRDs: false
autoscaleEnabled: true
image: mixer
sessionAffinityEnabled: false
telemetry:
autoscaleEnabled: true
env:
GOMAXPROCS: "6"
image: mixer
loadshedding:
latencyThreshold: 100ms
mode: enforce
nodeSelector: {}
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
replicaCount: 1
sessionAffinityEnabled: false
tolerations: []
pilot:
appNamespaces: []
autoscaleEnabled: true
autoscaleMax: 5
autoscaleMin: 1
configMap: true
configNamespace: istio-config
cpu:
targetAverageUtilization: 80
enableProtocolSniffingForInbound: true
enableProtocolSniffingForOutbound: true
env: {}
image: pilot
keepaliveMaxServerConnectionAge: 30m
nodeSelector: {}
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
policy:
enabled: false
replicaCount: 1
tolerations: []
traceSampling: 1
prometheus:
contextPath: /prometheus
hub: docker.io/prom
nodeSelector: {}
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
provisionPrometheusCert: true
retention: 6h
scrapeInterval: 15s
security:
enabled: true
tag: v2.15.1
tolerations: []
sidecarInjectorWebhook:
enableNamespacesByDefault: false
injectLabel: istio-injection
objectSelector:
autoInject: true
enabled: false
rewriteAppHTTPProbe: false
telemetry:
enabled: true
v1:
enabled: false
v2:
enabled: true
metadataExchange: {}
prometheus:
enabled: true
stackdriver:
configOverride: {}
enabled: false
logging: false
monitoring: false
topology: false
tracing:
jaeger:
accessMode: ReadWriteMany
hub: docker.io/jaegertracing
memory:
max_traces: 50000
persist: false
spanStorageType: badger
storageClassName: ""
tag: "1.16"
nodeSelector: {}
opencensus:
exporters:
stackdriver:
enable_tracing: true
hub: docker.io/omnition
resources:
limits:
cpu: "1"
memory: 2Gi
requests:
cpu: 200m
memory: 400Mi
tag: 0.1.9
podAntiAffinityLabelSelector: []
podAntiAffinityTermLabelSelector: []
provider: jaeger
service:
annotations: {}
externalPort: 9411
name: http-query
type: ClusterIP
zipkin:
hub: docker.io/openzipkin
javaOptsHeap: 700
maxSpans: 500000
node:
cpus: 2
probeStartupDelay: 10
queryPort: 9411
resources:
limits:
cpu: 1000m
memory: 2048Mi
requests:
cpu: 150m
memory: 900Mi
tag: 2.20.0
version: ""