Update Prometheus chart to 12.0.0.

This also changes the helm chart repository, since the old one was
deprecated. Further, the new version adapts the resources to not
contain deprecated APIs.

Change-Id: Idd3f1ed48e22da303fd62d9c2ee63ccb959ed948
diff --git a/charts/prometheus/VERSION b/charts/prometheus/VERSION
index 4471145..4044f90 100644
--- a/charts/prometheus/VERSION
+++ b/charts/prometheus/VERSION
@@ -1 +1 @@
-9.5.4
+12.0.0
diff --git a/charts/prometheus/prometheus.yaml b/charts/prometheus/prometheus.yaml
index 6aa5823..7cda71c 100644
--- a/charts/prometheus/prometheus.yaml
+++ b/charts/prometheus/prometheus.yaml
@@ -15,24 +15,33 @@
   alertmanager:
     create: true
     name:
-  kubeStateMetrics:
-    create: false
-    name:
+    annotations: {}
   nodeExporter:
     create: false
     name:
+    annotations: {}
   pushgateway:
     create: false
     name:
+    annotations: {}
   server:
     create: true
     name:
+    annotations: {}
 
 alertmanager:
   ## If false, alertmanager will not be installed
   ##
   enabled: true
 
+  ## Use a ClusterRole (and ClusterRoleBinding)
+  ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY
+  ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but want alertmanager to operate on their own namespaces, instead of clusterwide.
+  useClusterRole: true
+
+  ## Set to a rolename to use existing role - skipping role creation - but still doing serviceaccount and rolebinding to the rolename set here.
+  useExistingRole: false
+
   ## alertmanager container name
   ##
   name: alertmanager
@@ -40,8 +49,8 @@
   ## alertmanager container image
   ##
   image:
-    repository: prom/alertmanager
-    tag: v0.18.0
+    repository: quay.io/prometheus/alertmanager
+    tag: v0.21.0
     pullPolicy: IfNotPresent
 
   ## alertmanager priorityClassName
@@ -52,14 +61,17 @@
   ##
   extraArgs: {}
 
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
   ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
   ## so that the various internal URLs are still able to access as they are in the default case.
   ## (Optional)
   prefixURL: ""
 
   ## External URL which can access alertmanager
-  ## Maybe same with Ingress host name
-  baseURL: "/"
+  baseURL: "http://localhost:9093"
 
   ## Additional alertmanager container environment variable
   ## For instance to add a http_proxy
@@ -151,6 +163,13 @@
   ##
   affinity: {}
 
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
   ## Use an alternate scheduler, e.g. "stork".
   ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
   ##
@@ -195,14 +214,35 @@
     ##
     # storageClass: "-"
 
+    ## alertmanager data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
     ## Subdirectory of alertmanager data Persistent Volume to mount
     ## Useful if the volume's root directory is not empty
     ##
     subPath: ""
 
+  emptyDir:
+    ## alertmanager emptyDir volume size limit
+    ##
+    sizeLimit: ""
+
   ## Annotations to be added to alertmanager pods
   ##
   podAnnotations: {}
+    ## Tell prometheus to use a specific set of alertmanager pods
+    ## instead of all alertmanager pods found in the same namespace
+    ## Useful if you deploy multiple releases within the same namespace
+    ##
+    ## prometheus.io/probe: alertmanager-teamA
+
+  ## Labels to be added to Prometheus AlertManager pods
+  ##
+  podLabels: {}
 
   ## Specify if a Pod Security Policy for node-exporter must be created
   ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
@@ -222,12 +262,18 @@
   ##
   replicaCount: 1
 
+  ## Annotations to be added to deployment
+  ##
+  deploymentAnnotations: {}
+
   statefulSet:
     ## If true, use a statefulset instead of a deployment for pod management.
     ## This allows to scale replicas to more than 1 pod
     ##
     enabled: false
 
+    annotations: {}
+    labels: {}
     podManagementPolicy: OrderedReady
 
     ## Alertmanager headless service to use for the statefulset
@@ -238,7 +284,7 @@
 
       ## Enabling peer mesh service end points for enabling the HA alert manager
       ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md
-      # enableMeshPeer : true
+      enableMeshPeer: false
 
       servicePort: 80
 
@@ -286,136 +332,78 @@
 ## Ref: https://github.com/jimmidyson/configmap-reload
 ##
 configmapReload:
-  ## configmap-reload container name
-  ##
-  name: configmap-reload
-
-  ## configmap-reload container image
-  ##
-  image:
-    repository: jimmidyson/configmap-reload
-    tag: v0.2.2
-    pullPolicy: IfNotPresent
-
-  ## Additional configmap-reload container arguments
-  ##
-  extraArgs: {}
-  ## Additional configmap-reload volume directories
-  ##
-  extraVolumeDirs: []
-
-
-  ## Additional configmap-reload mounts
-  ##
-  extraConfigmapMounts: []
-    # - name: prometheus-alerts
-    #   mountPath: /etc/alerts.d
-    #   subPath: ""
-    #   configMap: prometheus-alerts
-    #   readOnly: true
-
-
-  ## configmap-reload resource requests and limits
-  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
-  ##
-  resources: {}
+  prometheus:
+    ## If false, the configmap-reload container will not be deployed
+    ##
+    enabled: true
+    ## configmap-reload container name
+    ##
+    name: configmap-reload
+    ## configmap-reload container image
+    ##
+    image:
+      repository: jimmidyson/configmap-reload
+      tag: v0.4.0
+      pullPolicy: IfNotPresent
+    ## Additional configmap-reload container arguments
+    ##
+    extraArgs: {}
+    ## Additional configmap-reload volume directories
+    ##
+    extraVolumeDirs: []
+    ## Additional configmap-reload mounts
+    ##
+    extraConfigmapMounts: []
+      # - name: prometheus-alerts
+      #   mountPath: /etc/alerts.d
+      #   subPath: ""
+      #   configMap: prometheus-alerts
+      #   readOnly: true
+    ## configmap-reload resource requests and limits
+    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+    ##
+    resources: {}
+  alertmanager:
+    ## If false, the configmap-reload container will not be deployed
+    ##
+    enabled: true
+    ## configmap-reload container name
+    ##
+    name: configmap-reload
+    ## configmap-reload container image
+    ##
+    image:
+      repository: jimmidyson/configmap-reload
+      tag: v0.4.0
+      pullPolicy: IfNotPresent
+    ## Additional configmap-reload container arguments
+    ##
+    extraArgs: {}
+    ## Additional configmap-reload volume directories
+    ##
+    extraVolumeDirs: []
+    ## Additional configmap-reload mounts
+    ##
+    extraConfigmapMounts: []
+      # - name: prometheus-alerts
+      #   mountPath: /etc/alerts.d
+      #   subPath: ""
+      #   configMap: prometheus-alerts
+      #   readOnly: true
+    ## configmap-reload resource requests and limits
+    ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
+    ##
+    resources: {}
 
 kubeStateMetrics:
-  ## If false, kube-state-metrics will not be installed
+  ## If false, kube-state-metrics sub-chart will not be installed
   ##
   enabled: false
 
-  ## kube-state-metrics container name
-  ##
-  name: kube-state-metrics
-
-  ## kube-state-metrics container image
-  ##
-  image:
-    repository: quay.io/coreos/kube-state-metrics
-    tag: v1.6.0
-    pullPolicy: IfNotPresent
-
-  ## kube-state-metrics priorityClassName
-  ##
-  priorityClassName: ""
-
-  ## kube-state-metrics container arguments
-  ##
-  args: {}
-
-  ## Node tolerations for kube-state-metrics scheduling to nodes with taints
-  ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
-  ##
-  tolerations: []
-    # - key: "key"
-    #   operator: "Equal|Exists"
-    #   value: "value"
-    #   effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)"
-
-  ## Node labels for kube-state-metrics pod assignment
-  ## Ref: https://kubernetes.io/docs/user-guide/node-selection/
-  ##
-  nodeSelector: {}
-
-  ## Annotations to be added to kube-state-metrics pods
-  ##
-  podAnnotations: {}
-
-  ## Specify if a Pod Security Policy for node-exporter must be created
-  ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
-  ##
-  podSecurityPolicy:
-    annotations: {}
-      ## Specify pod annotations
-      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor
-      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
-      ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl
-      ##
-      # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*'
-      # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default'
-      # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default'
-
-  pod:
-    labels: {}
-
-  replicaCount: 1
-
-  ## kube-state-metrics resource requests and limits
-  ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
-  ##
-  resources: {}
-    # limits:
-    #   cpu: 10m
-    #   memory: 16Mi
-    # requests:
-    #   cpu: 10m
-    #   memory: 16Mi
-
-  ## Security context to be added to kube-state-metrics pods
-  ##
-  securityContext:
-    runAsUser: 65534
-    runAsNonRoot: true
-
-  service:
-    annotations:
-      prometheus.io/scrape: "true"
-    labels: {}
-
-    # Exposed as a headless service:
-    # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services
-    clusterIP: None
-
-    ## List of IP addresses at which the kube-state-metrics service is available
-    ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips
-    ##
-    externalIPs: []
-
-    loadBalancerIP: ""
-    loadBalancerSourceRanges: []
-    servicePort: 80
-    type: ClusterIP
+## kube-state-metrics sub-chart configurable values
+## Please see https://github.com/helm/charts/tree/master/stable/kube-state-metrics
+##
+# kube-state-metrics:
 
 nodeExporter:
   ## If false, node-exporter will not be installed
@@ -437,8 +425,8 @@
   ## node-exporter container image
   ##
   image:
-    repository: prom/node-exporter
-    tag: v0.18.0
+    repository: quay.io/prometheus/node-exporter
+    tag: v1.0.1
     pullPolicy: IfNotPresent
 
   ## Specify if a Pod Security Policy for node-exporter must be created
@@ -468,6 +456,10 @@
   ##
   extraArgs: {}
 
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
   ## Additional node-exporter hostPath mounts
   ##
   extraHostPathMounts: []
@@ -506,6 +498,13 @@
   pod:
     labels: {}
 
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
   ## node-exporter resource limits & requests
   ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/
   ##
@@ -546,26 +545,46 @@
   ## Prometheus server container name
   ##
   enabled: true
+
+  ## Use a ClusterRole (and ClusterRoleBinding)
+  ## - If set to false - we define a RoleBinding in the defined namespaces ONLY
+  ##
+  ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled.
+  ##     This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide.
+  ##
+  ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus.
+  ##
+  # useExistingClusterRoleName: nameofclusterrole
+  ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges.
+  # namespaces:
+  #   - yournamespace
+
   name: server
   sidecarContainers:
 
   ## Prometheus server container image
   ##
   image:
-    repository: prom/prometheus
-    tag: v2.13.1
+    repository: quay.io/prometheus/prometheus
+    tag: v2.22.1
     pullPolicy: IfNotPresent
 
   ## prometheus server priorityClassName
   ##
   priorityClassName: ""
 
+  ## EnableServiceLinks indicates whether information about services should be injected
+  ## into pod's environment variables, matching the syntax of Docker links.
+  ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0.
+  ##
+  enableServiceLinks: true
+
   ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug
   ## so that the various internal URLs are still able to access as they are in the default case.
   ## (Optional)
   prefixURL: ""
 
-  ## External URL which can access alertmanager
+  ## External URL which can access prometheus
   ## Maybe same with Ingress host name
   baseURL: ""
 
@@ -584,14 +603,19 @@
   ##     secretKeyRef:
   ##       name: mysecret
   ##       key: username
-  env: {}
+  env: []
 
-  ## This flag controls access to the administrative HTTP API which includes functionality such as deleting time
-  ## series. This is disabled by default.
-  enableAdminApi: false
-
-  ## This flag controls BD locking
-  skipTSDBLock: false
+  extraFlags:
+    - web.enable-lifecycle
+    ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as
+    ## deleting time series. This is disabled by default.
+    # - web.enable-admin-api
+    ##
+    ## storage.tsdb.no-lockfile flag controls BD locking
+    # - storage.tsdb.no-lockfile
+    ##
+    ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL)
+    # - storage.tsdb.wal-compression
 
   ## Path to a configuration file on prometheus server container FS
   configPath: /etc/config/prometheus.yml
@@ -606,6 +630,12 @@
     ## How frequently to evaluate rules
     ##
     evaluation_interval: 1m
+  ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write
+  ##
+  remoteWrite: []
+  ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read
+  ##
+  remoteRead: []
 
   ## Additional Prometheus server container arguments
   ##
@@ -698,6 +728,12 @@
   # strategy:
   #   type: Recreate
 
+  ## hostAliases allows adding entries to /etc/hosts inside the containers
+  hostAliases: []
+  #   - ip: "127.0.0.1"
+  #     hostnames:
+  #       - "example.com"
+
   ## Node tolerations for server scheduling to nodes with taints
   ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
   ##
@@ -716,6 +752,13 @@
   ##
   affinity: {}
 
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
   ## Use an alternate scheduler, e.g. "stork".
   ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/
   ##
@@ -760,12 +803,21 @@
     ##
     # storageClass: "-"
 
+    ## Prometheus server data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
     ## Subdirectory of Prometheus server data Persistent Volume to mount
     ## Useful if the volume's root directory is not empty
     ##
     subPath: ""
 
   emptyDir:
+    ## Prometheus server emptyDir volume size limit
+    ##
     sizeLimit: ""
 
   ## Annotations to be added to Prometheus server pods
@@ -799,6 +851,10 @@
   ##
   replicaCount: 1
 
+  ## Annotations to be added to deployment
+  ##
+  deploymentAnnotations: {}
+
   statefulSet:
     ## If true, use a statefulset instead of a deployment for pod management.
     ## This allows to scale replicas to more than 1 pod
@@ -815,15 +871,22 @@
       annotations: {}
       labels: {}
       servicePort: 80
+      ## Enable gRPC port on service to allow auto discovery with thanos-querier
+      gRPC:
+        enabled: false
+        servicePort: 10901
+        # nodePort: 10901
 
   ## Prometheus server readiness and liveness probe initial delay and timeout
   ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
   ##
   readinessProbeInitialDelay: 30
+  readinessProbePeriodSeconds: 5
   readinessProbeTimeout: 30
   readinessProbeFailureThreshold: 3
   readinessProbeSuccessThreshold: 1
   livenessProbeInitialDelay: 30
+  livenessProbePeriodSeconds: 15
   livenessProbeTimeout: 30
   livenessProbeFailureThreshold: 3
   livenessProbeSuccessThreshold: 1
@@ -839,6 +902,15 @@
       cpu: 500m
       memory: 512Mi
 
+  ## Vertical Pod Autoscaler config
+  ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
+  verticalAutoscaler:
+    ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs)
+    enabled: false
+    # updateMode: "Auto"
+    # containerPolicies:
+    # - containerName: 'prometheus-server'
+
   ## Security context to be added to server pods
   ##
   securityContext:
@@ -863,6 +935,18 @@
     sessionAffinity: None
     type: ClusterIP
 
+    ## Enable gRPC port on service to allow auto discovery with thanos-querier
+    gRPC:
+      enabled: false
+      servicePort: 10901
+      # nodePort: 10901
+    ## If using a statefulSet (statefulSet.enabled=true), configure the
+    ## service to connect to a specific replica to have a consistent view
+    ## of the data.
+    statefulsetReplica:
+      enabled: false
+      replica: 0
+
   ## Prometheus server pod termination grace period
   ##
   terminationGracePeriodSeconds: 300
@@ -889,7 +973,7 @@
   ##
   image:
     repository: prom/pushgateway
-    tag: v0.8.0
+    tag: v1.3.0
     pullPolicy: IfNotPresent
 
   ## pushgateway priorityClassName
@@ -901,6 +985,10 @@
   ## for example: persistence.file: /data/pushgateway.data
   extraArgs: {}
 
+  ## Additional InitContainers to initialize the pod
+  ##
+  extraInitContainers: []
+
   ingress:
     ## If true, pushgateway Ingress will be created
     ##
@@ -952,6 +1040,10 @@
   ##
   podAnnotations: {}
 
+  ## Labels to be added to pushgateway pods
+  ##
+  podLabels: {}
+
   ## Specify if a Pod Security Policy for node-exporter must be created
   ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/
   ##
@@ -968,6 +1060,16 @@
 
   replicaCount: 1
 
+  ## Annotations to be added to deployment
+  ##
+  deploymentAnnotations: {}
+  ## PodDisruptionBudget settings
+  ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/
+  ##
+  podDisruptionBudget:
+    enabled: false
+    maxUnavailable: 1
+
   ## pushgateway resource requests and limits
   ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/
   ##
@@ -1007,7 +1109,6 @@
 
   persistentVolume:
     ## If true, pushgateway will create/use a Persistent Volume Claim
-    ## If false, use emptyDir
     ##
     enabled: false
 
@@ -1035,7 +1136,7 @@
     ##
     size: 2Gi
 
-    ## alertmanager data Persistent Volume Storage Class
+    ## pushgateway data Persistent Volume Storage Class
     ## If defined, storageClassName: <storageClass>
     ## If set to "-", storageClassName: "", which disables dynamic provisioning
     ## If undefined (the default) or set to null, no storageClassName spec is
@@ -1044,7 +1145,14 @@
     ##
     # storageClass: "-"
 
-    ## Subdirectory of alertmanager data Persistent Volume to mount
+    ## pushgateway data Persistent Volume Binding Mode
+    ## If defined, volumeBindingMode: <volumeBindingMode>
+    ## If undefined (the default) or set to null, no volumeBindingMode spec is
+    ##   set, choosing the default mode.
+    ##
+    # volumeBindingMode: ""
+
+    ## Subdirectory of pushgateway data Persistent Volume to mount
     ## Useful if the volume's root directory is not empty
     ##
     subPath: ""
@@ -1210,254 +1318,311 @@
           target_label: instance
           replacement: #@ "loki-{}".format(data.values.namespace)
 
-      # A scrape configuration for running Prometheus on a Kubernetes cluster.
-      # This uses separate scrape configs for cluster components (i.e. API server, node)
-      # and services to allow each to use different authentication configs.
-      #
-      # Kubernetes labels will be added as Prometheus labels on metrics via the
-      # `labelmap` relabeling action.
-
-      # Scrape config for API servers.
-      #
-      # Kubernetes exposes API servers as endpoints to the default/kubernetes
-      # service so this uses `endpoints` role and uses relabelling to only keep
-      # the endpoints associated with the default/kubernetes service using the
-      # default named port `https`. This works for single API server deployments as
-      # well as HA API server deployments.
-      # - job_name: 'kubernetes-apiservers'
-
-      #   kubernetes_sd_configs:
-      #     - role: endpoints
-
-      #   # Default to scraping over https. If required, just disable this or change to
-      #   # `http`.
-      #   scheme: https
-
-      #   # This TLS & bearer token file config is used to connect to the actual scrape
-      #   # endpoints for cluster components. This is separate to discovery auth
-      #   # configuration because discovery & scraping are two separate concerns in
-      #   # Prometheus. The discovery auth config is automatic if Prometheus runs inside
-      #   # the cluster. Otherwise, more config options have to be provided within the
-      #   # <kubernetes_sd_config>.
-      #   tls_config:
-      #     ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-      #     # If your node certificates are self-signed or use a different CA to the
-      #     # master CA, then disable certificate verification below. Note that
-      #     # certificate verification is an integral part of a secure infrastructure
-      #     # so this should only be disabled in a controlled environment. You can
-      #     # disable certificate verification by uncommenting the line below.
-      #     #
-      #     insecure_skip_verify: true
-      #   bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-      #   # Keep only the default/kubernetes service endpoints for the https port. This
-      #   # will add targets for each API server which Kubernetes adds an endpoint to
-      #   # the default/kubernetes service.
-      #   relabel_configs:
-      #     - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
-      #       action: keep
-      #       regex: default;kubernetes;https
-
-      # - job_name: 'kubernetes-nodes'
-
-      #   # Default to scraping over https. If required, just disable this or change to
-      #   # `http`.
-      #   scheme: https
-
-      #   # This TLS & bearer token file config is used to connect to the actual scrape
-      #   # endpoints for cluster components. This is separate to discovery auth
-      #   # configuration because discovery & scraping are two separate concerns in
-      #   # Prometheus. The discovery auth config is automatic if Prometheus runs inside
-      #   # the cluster. Otherwise, more config options have to be provided within the
-      #   # <kubernetes_sd_config>.
-      #   tls_config:
-      #     ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-      #     # If your node certificates are self-signed or use a different CA to the
-      #     # master CA, then disable certificate verification below. Note that
-      #     # certificate verification is an integral part of a secure infrastructure
-      #     # so this should only be disabled in a controlled environment. You can
-      #     # disable certificate verification by uncommenting the line below.
-      #     #
-      #     insecure_skip_verify: true
-      #   bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-      #   kubernetes_sd_configs:
-      #     - role: node
-
-      #   relabel_configs:
-      #     - action: labelmap
-      #       regex: __meta_kubernetes_node_label_(.+)
-      #     - target_label: __address__
-      #       replacement: kubernetes.default.svc:443
-      #     - source_labels: [__meta_kubernetes_node_name]
-      #       regex: (.+)
-      #       target_label: __metrics_path__
-      #       replacement: /api/v1/nodes/$1/proxy/metrics
-
-
-      # - job_name: 'kubernetes-nodes-cadvisor'
-
-      #   # Default to scraping over https. If required, just disable this or change to
-      #   # `http`.
-      #   scheme: https
-
-      #   # This TLS & bearer token file config is used to connect to the actual scrape
-      #   # endpoints for cluster components. This is separate to discovery auth
-      #   # configuration because discovery & scraping are two separate concerns in
-      #   # Prometheus. The discovery auth config is automatic if Prometheus runs inside
-      #   # the cluster. Otherwise, more config options have to be provided within the
-      #   # <kubernetes_sd_config>.
-      #   tls_config:
-      #     ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
-      #     # If your node certificates are self-signed or use a different CA to the
-      #     # master CA, then disable certificate verification below. Note that
-      #     # certificate verification is an integral part of a secure infrastructure
-      #     # so this should only be disabled in a controlled environment. You can
-      #     # disable certificate verification by uncommenting the line below.
-      #     #
-      #     insecure_skip_verify: true
-      #   bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
-
-      #   kubernetes_sd_configs:
-      #     - role: node
-
-      #   # This configuration will work only on kubelet 1.7.3+
-      #   # As the scrape endpoints for cAdvisor have changed
-      #   # if you are using older version you need to change the replacement to
-      #   # replacement: /api/v1/nodes/$1:4194/proxy/metrics
-      #   # more info here https://github.com/coreos/prometheus-operator/issues/633
-      #   relabel_configs:
-      #     - action: labelmap
-      #       regex: __meta_kubernetes_node_label_(.+)
-      #     - target_label: __address__
-      #       replacement: kubernetes.default.svc:443
-      #     - source_labels: [__meta_kubernetes_node_name]
-      #       regex: (.+)
-      #       target_label: __metrics_path__
-      #       replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
-
-      # Scrape config for service endpoints.
-      #
-      # The relabeling allows the actual service scrape endpoint to be configured
-      # via the following annotations:
-      #
-      # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
-      # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
-      # to set this to `https` & most likely set the `tls_config` of the scrape config.
-      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
-      # * `prometheus.io/port`: If the metrics are exposed on a different port to the
-      # service then set this appropriately.
-      # - job_name: 'kubernetes-service-endpoints'
-
-      #   kubernetes_sd_configs:
-      #     - role: endpoints
-
-      #   relabel_configs:
-      #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
-      #       action: keep
-      #       regex: true
-      #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
-      #       action: replace
-      #       target_label: __scheme__
-      #       regex: (https?)
-      #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
-      #       action: replace
-      #       target_label: __metrics_path__
-      #       regex: (.+)
-      #     - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
-      #       action: replace
-      #       target_label: __address__
-      #       regex: ([^:]+)(?::\d+)?;(\d+)
-      #       replacement: $1:$2
-      #     - action: labelmap
-      #       regex: __meta_kubernetes_service_label_(.+)
-      #     - source_labels: [__meta_kubernetes_namespace]
-      #       action: replace
-      #       target_label: kubernetes_namespace
-      #     - source_labels: [__meta_kubernetes_service_name]
-      #       action: replace
-      #       target_label: kubernetes_name
-      #     - source_labels: [__meta_kubernetes_pod_node_name]
-      #       action: replace
-      #       target_label: kubernetes_node
-
-      # - job_name: 'prometheus-pushgateway'
-      #   honor_labels: true
-
-      #   kubernetes_sd_configs:
-      #     - role: service
-
-      #   relabel_configs:
-      #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
-      #       action: keep
-      #       regex: pushgateway
-
-      # Example scrape config for probing services via the Blackbox Exporter.
-      #
-      # The relabeling allows the actual service scrape endpoint to be configured
-      # via the following annotations:
-      #
-      # * `prometheus.io/probe`: Only probe services that have a value of `true`
-      # - job_name: 'kubernetes-services'
-
-      #   metrics_path: /probe
-      #   params:
-      #     module: [http_2xx]
-
-      #   kubernetes_sd_configs:
-      #     - role: service
-
-      #   relabel_configs:
-      #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
-      #       action: keep
-      #       regex: true
-      #     - source_labels: [__address__]
-      #       target_label: __param_target
-      #     - target_label: __address__
-      #       replacement: blackbox
-      #     - source_labels: [__param_target]
-      #       target_label: instance
-      #     - action: labelmap
-      #       regex: __meta_kubernetes_service_label_(.+)
-      #     - source_labels: [__meta_kubernetes_namespace]
-      #       target_label: kubernetes_namespace
-      #     - source_labels: [__meta_kubernetes_service_name]
-      #       target_label: kubernetes_name
-
-      # Example scrape config for pods
-      #
-      # The relabeling allows the actual pod scrape endpoint to be configured via the
-      # following annotations:
-      #
-      # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
-      # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
-      # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
-      # - job_name: 'kubernetes-pods'
-
-      #   kubernetes_sd_configs:
-      #     - role: pod
-
-      #   relabel_configs:
-      #     - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
-      #       action: keep
-      #       regex: true
-      #     - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
-      #       action: replace
-      #       target_label: __metrics_path__
-      #       regex: (.+)
-      #     - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
-      #       action: replace
-      #       regex: ([^:]+)(?::\d+)?;(\d+)
-      #       replacement: $1:$2
-      #       target_label: __address__
-      #     - action: labelmap
-      #       regex: __meta_kubernetes_pod_label_(.+)
-      #     - source_labels: [__meta_kubernetes_namespace]
-      #       action: replace
-      #       target_label: kubernetes_namespace
-      #     - source_labels: [__meta_kubernetes_pod_name]
-      #       action: replace
-      #       target_label: kubernetes_pod_name
+    # # A scrape configuration for running Prometheus on a Kubernetes cluster.
+    # # This uses separate scrape configs for cluster components (i.e. API server, node)
+    # # and services to allow each to use different authentication configs.
+    # #
+    # # Kubernetes labels will be added as Prometheus labels on metrics via the
+    # # `labelmap` relabeling action.
+    # # Scrape config for API servers.
+    # #
+    # # Kubernetes exposes API servers as endpoints to the default/kubernetes
+    # # service so this uses `endpoints` role and uses relabelling to only keep
+    # # the endpoints associated with the default/kubernetes service using the
+    # # default named port `https`. This works for single API server deployments as
+    # # well as HA API server deployments.
+    # - job_name: 'kubernetes-apiservers'
+    #   kubernetes_sd_configs:
+    #     - role: endpoints
+    #   # Default to scraping over https. If required, just disable this or change to
+    #   # `http`.
+    #   scheme: https
+    #   # This TLS & bearer token file config is used to connect to the actual scrape
+    #   # endpoints for cluster components. This is separate to discovery auth
+    #   # configuration because discovery & scraping are two separate concerns in
+    #   # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+    #   # the cluster. Otherwise, more config options have to be provided within the
+    #   # <kubernetes_sd_config>.
+    #   tls_config:
+    #     ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    #     # If your node certificates are self-signed or use a different CA to the
+    #     # master CA, then disable certificate verification below. Note that
+    #     # certificate verification is an integral part of a secure infrastructure
+    #     # so this should only be disabled in a controlled environment. You can
+    #     # disable certificate verification by uncommenting the line below.
+    #     #
+    #     insecure_skip_verify: true
+    #   bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+    #   # Keep only the default/kubernetes service endpoints for the https port. This
+    #   # will add targets for each API server which Kubernetes adds an endpoint to
+    #   # the default/kubernetes service.
+    #   relabel_configs:
+    #     - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name]
+    #       action: keep
+    #       regex: default;kubernetes;https
+    # - job_name: 'kubernetes-nodes'
+    #   # Default to scraping over https. If required, just disable this or change to
+    #   # `http`.
+    #   scheme: https
+    #   # This TLS & bearer token file config is used to connect to the actual scrape
+    #   # endpoints for cluster components. This is separate to discovery auth
+    #   # configuration because discovery & scraping are two separate concerns in
+    #   # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+    #   # the cluster. Otherwise, more config options have to be provided within the
+    #   # <kubernetes_sd_config>.
+    #   tls_config:
+    #     ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    #     # If your node certificates are self-signed or use a different CA to the
+    #     # master CA, then disable certificate verification below. Note that
+    #     # certificate verification is an integral part of a secure infrastructure
+    #     # so this should only be disabled in a controlled environment. You can
+    #     # disable certificate verification by uncommenting the line below.
+    #     #
+    #     insecure_skip_verify: true
+    #   bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+    #   kubernetes_sd_configs:
+    #     - role: node
+    #   relabel_configs:
+    #     - action: labelmap
+    #       regex: __meta_kubernetes_node_label_(.+)
+    #     - target_label: __address__
+    #       replacement: kubernetes.default.svc:443
+    #     - source_labels: [__meta_kubernetes_node_name]
+    #       regex: (.+)
+    #       target_label: __metrics_path__
+    #       replacement: /api/v1/nodes/$1/proxy/metrics
+    # - job_name: 'kubernetes-nodes-cadvisor'
+    #   # Default to scraping over https. If required, just disable this or change to
+    #   # `http`.
+    #   scheme: https
+    #   # This TLS & bearer token file config is used to connect to the actual scrape
+    #   # endpoints for cluster components. This is separate to discovery auth
+    #   # configuration because discovery & scraping are two separate concerns in
+    #   # Prometheus. The discovery auth config is automatic if Prometheus runs inside
+    #   # the cluster. Otherwise, more config options have to be provided within the
+    #   # <kubernetes_sd_config>.
+    #   tls_config:
+    #     ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+    #     # If your node certificates are self-signed or use a different CA to the
+    #     # master CA, then disable certificate verification below. Note that
+    #     # certificate verification is an integral part of a secure infrastructure
+    #     # so this should only be disabled in a controlled environment. You can
+    #     # disable certificate verification by uncommenting the line below.
+    #     #
+    #     insecure_skip_verify: true
+    #   bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token
+    #   kubernetes_sd_configs:
+    #     - role: node
+    #   # This configuration will work only on kubelet 1.7.3+
+    #   # As the scrape endpoints for cAdvisor have changed
+    #   # if you are using older version you need to change the replacement to
+    #   # replacement: /api/v1/nodes/$1:4194/proxy/metrics
+    #   # more info here https://github.com/coreos/prometheus-operator/issues/633
+    #   relabel_configs:
+    #     - action: labelmap
+    #       regex: __meta_kubernetes_node_label_(.+)
+    #     - target_label: __address__
+    #       replacement: kubernetes.default.svc:443
+    #     - source_labels: [__meta_kubernetes_node_name]
+    #       regex: (.+)
+    #       target_label: __metrics_path__
+    #       replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor
+    # # Scrape config for service endpoints.
+    # #
+    # # The relabeling allows the actual service scrape endpoint to be configured
+    # # via the following annotations:
+    # #
+    # # * `prometheus.io/scrape`: Only scrape services that have a value of `true`
+    # # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+    # # to set this to `https` & most likely set the `tls_config` of the scrape config.
+    # # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+    # # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+    # # service then set this appropriately.
+    # - job_name: 'kubernetes-service-endpoints'
+    #   kubernetes_sd_configs:
+    #     - role: endpoints
+    #   relabel_configs:
+    #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape]
+    #       action: keep
+    #       regex: true
+    #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+    #       action: replace
+    #       target_label: __scheme__
+    #       regex: (https?)
+    #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+    #       action: replace
+    #       target_label: __metrics_path__
+    #       regex: (.+)
+    #     - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+    #       action: replace
+    #       target_label: __address__
+    #       regex: ([^:]+)(?::\d+)?;(\d+)
+    #       replacement: $1:$2
+    #     - action: labelmap
+    #       regex: __meta_kubernetes_service_label_(.+)
+    #     - source_labels: [__meta_kubernetes_namespace]
+    #       action: replace
+    #       target_label: kubernetes_namespace
+    #     - source_labels: [__meta_kubernetes_service_name]
+    #       action: replace
+    #       target_label: kubernetes_name
+    #     - source_labels: [__meta_kubernetes_pod_node_name]
+    #       action: replace
+    #       target_label: kubernetes_node
+    # # Scrape config for slow service endpoints; same as above, but with a larger
+    # # timeout and a larger interval
+    # #
+    # # The relabeling allows the actual service scrape endpoint to be configured
+    # # via the following annotations:
+    # #
+    # # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true`
+    # # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need
+    # # to set this to `https` & most likely set the `tls_config` of the scrape config.
+    # # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+    # # * `prometheus.io/port`: If the metrics are exposed on a different port to the
+    # # service then set this appropriately.
+    # - job_name: 'kubernetes-service-endpoints-slow'
+    #   scrape_interval: 5m
+    #   scrape_timeout: 30s
+    #   kubernetes_sd_configs:
+    #     - role: endpoints
+    #   relabel_configs:
+    #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow]
+    #       action: keep
+    #       regex: true
+    #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme]
+    #       action: replace
+    #       target_label: __scheme__
+    #       regex: (https?)
+    #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path]
+    #       action: replace
+    #       target_label: __metrics_path__
+    #       regex: (.+)
+    #     - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port]
+    #       action: replace
+    #       target_label: __address__
+    #       regex: ([^:]+)(?::\d+)?;(\d+)
+    #       replacement: $1:$2
+    #     - action: labelmap
+    #       regex: __meta_kubernetes_service_label_(.+)
+    #     - source_labels: [__meta_kubernetes_namespace]
+    #       action: replace
+    #       target_label: kubernetes_namespace
+    #     - source_labels: [__meta_kubernetes_service_name]
+    #       action: replace
+    #       target_label: kubernetes_name
+    #     - source_labels: [__meta_kubernetes_pod_node_name]
+    #       action: replace
+    #       target_label: kubernetes_node
+    # - job_name: 'prometheus-pushgateway'
+    #   honor_labels: true
+    #   kubernetes_sd_configs:
+    #     - role: service
+    #   relabel_configs:
+    #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+    #       action: keep
+    #       regex: pushgateway
+    # # Example scrape config for probing services via the Blackbox Exporter.
+    # #
+    # # The relabeling allows the actual service scrape endpoint to be configured
+    # # via the following annotations:
+    # #
+    # # * `prometheus.io/probe`: Only probe services that have a value of `true`
+    # - job_name: 'kubernetes-services'
+    #   metrics_path: /probe
+    #   params:
+    #     module: [http_2xx]
+    #   kubernetes_sd_configs:
+    #     - role: service
+    #   relabel_configs:
+    #     - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe]
+    #       action: keep
+    #       regex: true
+    #     - source_labels: [__address__]
+    #       target_label: __param_target
+    #     - target_label: __address__
+    #       replacement: blackbox
+    #     - source_labels: [__param_target]
+    #       target_label: instance
+    #     - action: labelmap
+    #       regex: __meta_kubernetes_service_label_(.+)
+    #     - source_labels: [__meta_kubernetes_namespace]
+    #       target_label: kubernetes_namespace
+    #     - source_labels: [__meta_kubernetes_service_name]
+    #       target_label: kubernetes_name
+    # # Example scrape config for pods
+    # #
+    # # The relabeling allows the actual pod scrape endpoint to be configured via the
+    # # following annotations:
+    # #
+    # # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`
+    # # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+    # # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
+    # - job_name: 'kubernetes-pods'
+    #   kubernetes_sd_configs:
+    #     - role: pod
+    #   relabel_configs:
+    #     - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
+    #       action: keep
+    #       regex: true
+    #     - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+    #       action: replace
+    #       target_label: __metrics_path__
+    #       regex: (.+)
+    #     - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+    #       action: replace
+    #       regex: ([^:]+)(?::\d+)?;(\d+)
+    #       replacement: $1:$2
+    #       target_label: __address__
+    #     - action: labelmap
+    #       regex: __meta_kubernetes_pod_label_(.+)
+    #     - source_labels: [__meta_kubernetes_namespace]
+    #       action: replace
+    #       target_label: kubernetes_namespace
+    #     - source_labels: [__meta_kubernetes_pod_name]
+    #       action: replace
+    #       target_label: kubernetes_pod_name
+    #     - source_labels: [__meta_kubernetes_pod_phase]
+    #       regex: Pending|Succeeded|Failed
+    #       action: drop
+    # # Example Scrape config for pods which should be scraped slower. An useful example
+    # # would be stackriver-exporter which queries an API on every scrape of the pod
+    # #
+    # # The relabeling allows the actual pod scrape endpoint to be configured via the
+    # # following annotations:
+    # #
+    # # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true`
+    # # * `prometheus.io/path`: If the metrics path is not `/metrics` override this.
+    # # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`.
+    # - job_name: 'kubernetes-pods-slow'
+    #   scrape_interval: 5m
+    #   scrape_timeout: 30s
+    #   kubernetes_sd_configs:
+    #     - role: pod
+    #   relabel_configs:
+    #     - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow]
+    #       action: keep
+    #       regex: true
+    #     - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
+    #       action: replace
+    #       target_label: __metrics_path__
+    #       regex: (.+)
+    #     - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port]
+    #       action: replace
+    #       regex: ([^:]+)(?::\d+)?;(\d+)
+    #       replacement: $1:$2
+    #       target_label: __address__
+    #     - action: labelmap
+    #       regex: __meta_kubernetes_pod_label_(.+)
+    #     - source_labels: [__meta_kubernetes_namespace]
+    #       action: replace
+    #       target_label: kubernetes_namespace
+    #     - source_labels: [__meta_kubernetes_pod_name]
+    #       action: replace
+    #       target_label: kubernetes_pod_name
+    #     - source_labels: [__meta_kubernetes_pod_phase]
+    #       regex: Pending|Succeeded|Failed
+    #       action: drop
 
 # adds additional scrape configs to prometheus.yml
 # must be a string so you have to add a | after extraScrapeConfigs:
@@ -1490,3 +1655,6 @@
   ## Enable creation of NetworkPolicy resources.
   ##
   enabled: true
+
+# Force namespace of namespaced resources
+forceNamespace: null
diff --git a/subcommands/_globals.py b/subcommands/_globals.py
index 987b5bd..1b6d7d8 100644
--- a/subcommands/_globals.py
+++ b/subcommands/_globals.py
@@ -15,6 +15,6 @@
 HELM_CHARTS = {
     "grafana": "stable/grafana",
     "loki": "loki/loki",
-    "prometheus": "stable/prometheus",
+    "prometheus": "prometheus-community/prometheus",
     "promtail": "loki/promtail",
 }
diff --git a/subcommands/install.py b/subcommands/install.py
index b4a91d6..424de72 100644
--- a/subcommands/install.py
+++ b/subcommands/install.py
@@ -37,6 +37,7 @@
 HELM_REPOS = {
     "stable": "https://charts.helm.sh/stable",
     "loki": "https://grafana.github.io/loki/charts",
+    "prometheus-community": "https://prometheus-community.github.io/helm-charts",
 }
 
 LOOSE_RESOURCES = [