1
0
Fork 0

fix grafana

This commit is contained in:
Massaki Archambault 2021-08-28 16:32:06 -04:00
parent 54e83287c9
commit 17d0a1508b
4 changed files with 83 additions and 5 deletions

View File

@ -16,14 +16,19 @@ spec:
labels:
app.kubernetes.io/name: grafana
spec:
initContainers:
- name: init-ownership
image: bash:5
command: ['chown', '-R', '472:472', '/var/lib/grafana']
volumeMounts:
- mountPath: /var/lib/grafana
name: grafana-pv
containers:
- name: grafana
image: grafana/grafana
env:
- name: GF_AUTH_ANONYMOUS_ENABLED
value: "true"
- name: GF_AUTH_ANONYMOUS_ORG_ROLE
value: Viewer
value: "false"
# - name: GF_DATABASE_TYPE
# value: postgres
# - name: GF_DATABASE_HOST

View File

@ -1,3 +1,6 @@
resources:
- https://raw.githubusercontent.com/longhorn/longhorn/v1.2.0/deploy/longhorn.yaml
- longhorn-ingress.yaml
- longhorn-ingress.yaml
patchesStrategicMerge:
- longhorn-configmap-patch.yaml

View File

@ -0,0 +1,70 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-default-setting
namespace: longhorn-system
data:
default-setting.yaml: |-
backup-target:
backup-target-credential-secret:
allow-recurring-job-while-volume-detached:
create-default-disk-labeled-nodes:
default-data-path:
replica-soft-anti-affinity:
storage-over-provisioning-percentage:
storage-minimal-available-percentage:
upgrade-checker:
default-replica-count: 2
default-data-locality: best-effort
guaranteed-engine-cpu:
default-longhorn-static-storage-class:
backupstore-poll-interval:
taint-toleration:
system-managed-components-node-selector:
priority-class:
auto-salvage:
auto-delete-pod-when-volume-detached-unexpectedly:
disable-scheduling-on-cordoned-node:
replica-zone-soft-anti-affinity:
volume-attachment-recovery-policy:
node-down-pod-deletion-policy:
allow-node-drain-with-last-healthy-replica:
mkfs-ext4-parameters:
disable-replica-rebuild:
replica-replenishment-wait-interval:
disable-revision-counter:
system-managed-pods-image-pull-policy:
allow-volume-creation-with-degraded-availability:
auto-cleanup-system-generated-snapshot:
concurrent-automatic-engine-upgrade-per-node-limit:
backing-image-cleanup-wait-interval:
guaranteed-engine-manager-cpu:
guaranteed-replica-manager-cpu:
---
apiVersion: v1
kind: ConfigMap
metadata:
name: longhorn-storageclass
namespace: longhorn-system
data:
storageclass.yaml: |
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: longhorn
provisioner: driver.longhorn.io
allowVolumeExpansion: true
reclaimPolicy: Delete
volumeBindingMode: Immediate
parameters:
numberOfReplicas: "2"
staleReplicaTimeout: "2880"
fromBackup: ""
# backingImage: "bi-test"
# backingImageDataSourceType: "download"
# backingImageDataSourceParameters: '{"url": "https://backing-image-example.s3-region.amazonaws.com/test-backing-image"}'
# backingImageChecksum: "SHA512 checksum of the backing image"
# diskSelector: "ssd,fast"
# nodeSelector: "storage,fast"
# recurringJobSelector: '[{"name":"snap-group", "isGroup":true},
# {"name":"backup", "isGroup":false}]'

View File

@ -12,7 +12,7 @@ spec:
monitor: prometheus
resources:
requests:
cpu: 7500m
cpu: 750m
memory: 2Gi
limits:
cpu: 1000m