跳转至

部署ArtifactoryPro.bak

使用 Helm 部署

安装 helm

参考网络安装方式安装 helm

编写 values.yaml

# Default values for artifactory.
# This is a YAML-formatted file.

# Beware when changing values here. You should know what you are doing!
# Access the values with {{ .Values.key.subkey }}


global: {}
  # imagePullSecrets:
  #   - myRegistryKeySecretName
  # joinKey: 

  # masterKey: 

  # joinKeySecretName:
  # masterKeySecretName:
  # customInitContainersBegin: |

  # customInitContainers: |

  # customVolumes: |

  # customVolumeMounts: |

  # customSidecarContainers: |


# initContainerImage: docker.bintray.io/alpine:3.12
initContainerImage: alpine:3.12

# Init containers
initContainers:
  resources:
   requests:
     memory: "64Mi"
     cpu: "10m"
   limits:
     memory: "128Mi"
     cpu: "250m"

installer:
  type:
  platform:

installerInfo: '{"productId": "Helm_artifactory/{{ .Chart.Version }}", "features": [ { "featureId": "Platform/{{ default "kubernetes" .Values.installer.platform }}"}]}'

# For supporting pulling from private registries
# imagePullSecrets:
#   - myRegistryKeySecretName

## Role Based Access Control
## Ref: https://kubernetes.io/docs/admin/authorization/rbac/
rbac:
  create: true
  role:
    ## Rules to create. It follows the role specification
    rules:
    - apiGroups:
      - ''
      resources:
      - services
      - endpoints
      - pods
      verbs:
      - get
      - watch
      - list

## Service Account
## Ref: https://kubernetes.io/docs/admin/service-accounts-admin/
##
serviceAccount:
  create: true
  ## The name of the ServiceAccount to use.
  ## If not set and create is true, a name is generated using the fullname template
  name:
  ## Service Account annotations
  annotations: {}

ingress:
  enabled: true
  defaultBackend:
    enabled: true
  # Used to create an Ingress record.
  hosts: 
    - repos.local-repo.ai
  routerPath: /
  artifactoryPath: /artifactory/
  annotations:
    nginx.org/proxy-read-timeout: 60s
    ingress.kubernetes.io/ssl-redirect: 'true'
    kubernetes.io/ingress.class: nginx
    nginx.org/locations-snippets: |-
      proxy_pass_header   Server;
      proxy_set_header    X-JFrog-Override-Base-Url https://<artifactory-domain>;
    nginx.org/client-max-body-size: 10240000m
  labels: {}
  # traffic-type: external
  # traffic-type: internal
  tls:
  # Secrets must be manually created in the namespace.
  - secretName: artifactory-https-secret
    hosts:
      - repos.local-repo.ai

  # Additional ingress rules
  additionalRules: []

networkpolicy:
  # Allows all ingress and egress
  - name: artifactory
    podSelector:
      matchLabels:
        app: artifactory
    egress:
    - {}
    ingress:
    - {}
  # Uncomment to allow only artifactory pods to communicate with postgresql (if postgresql.enabled is true)
  # - name: postgresql
  #   podSelector:
  #     matchLabels:
  #       app: postgresql
  #   ingress:
  #   - from:
  #     - podSelector:
  #         matchLabels:
  #           app: artifactory

logger:
  image:
    # registry: docker.bintray.io
    # repository: busybox
    # tag: 1.31.1
    registry: "dockerhub.local-repo.ai:5000"
    repository: devops/busybox
    tag: 1.31.1

# Artifactory
artifactory:
  name: artifactory
  # Note that by default we use appVersion to get image tag/version
  image:
    # registry: docker.bintray.io
    # repository: jfrog/artifactory-pro
    registry: "dockerhub.local-repo.ai:5000"
    repository: devops/artifactory-pro
    # tag:
    pullPolicy: IfNotPresent
  labels: {}

  # Create a priority class for the Artifactory pod or use an existing one
  # NOTE - Maximum allowed value of a user defined priority is 1000000000
  priorityClass:
    create: false
    value: 1000000000
    ## Override default name
    # name:
    ## Use an existing priority class
    # existingPriorityClass:

  # Delete the db.properties file in ARTIFACTORY_HOME/etc/db.properties
  deleteDBPropertiesOnStartup: true

  database:
    maxOpenConnections: 80
  tomcat:
    connector:
      maxThreads: 200
      extraConfig: 'acceptCount="100"'

  # Files to copy to ARTIFACTORY_HOME/ on each Artifactory startup
  copyOnEveryStartup:
  #  # Absolute path
  #  - source: /artifactory_bootstrap/binarystore.xml
  #    # Relative to ARTIFACTORY_HOME/
  #    target: etc/artifactory/
  #  # Absolute path
  #  - source: /artifactory_bootstrap/artifactory.lic
  #    # Relative to ARTIFACTORY_HOME/
  #    target: etc/artifactory/

  # Sidecar containers for tailing Artifactory logs
  loggers: []
  # - access-audit.log
  # - access-request.log
  # - access-security-audit.log
  # - access-service.log
  # - artifactory-access.log
  # - artifactory-event.log
  # - artifactory-import-export.log
  # - artifactory-request.log
  # - artifactory-service.log
  # - frontend-request.log
  # - frontend-service.log
  # - metadata-request.log
  # - metadata-service.log
  # - router-request.log
  # - router-service.log
  # - router-traefik.log
  # - derby.log

  # Loggers containers resources
  loggersResources:
   requests:
     memory: "10Mi"
     cpu: "10m"
   limits:
     memory: "100Mi"
     cpu: "50m"

  # Sidecar containers for tailing Tomcat (catalina) logs
  catalinaLoggers: []
  # - tomcat-catalina.log
  # - tomcat-localhost.log

  # Tomcat (catalina) loggers resources
  catalinaLoggersResources: 
   requests:
     memory: "10Mi"
     cpu: "10m"
   limits:
     memory: "100Mi"
     cpu: "50m"

  # Migration support from 6.x to 7.x
  migration:
    enabled: true
    timeoutSeconds: 3600
    ## Extra pre-start command in migration Init Container to install JDBC driver for MySql/MariaDb/Oracle
    # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar"

  ## Add custom init containers execution before predefined init containers
  customInitContainersBegin: |
  #  - name: "custom-setup"
  #    image: "{{ .Values.initContainerImage }}"
  #    imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
  #    command:
  #      - 'sh'
  #      - '-c'
  #      - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup'
  #    volumeMounts:
  #      - mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
  #        name: artifactory-volume

  ## Add custom init containers execution after predefined init containers
  customInitContainers: |
  #  - name: "custom-setup"
  #    image: "{{ .Values.initContainerImage }}"
  #    imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
  #    command:
  #      - 'sh'
  #      - '-c'
  #      - 'touch {{ .Values.artifactory.persistence.mountPath }}/example-custom-setup'
  #    volumeMounts:
  #      - mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
  #        name: artifactory-volume

  ## Add custom sidecar containers
  # - The provided example uses a custom volume (customVolumes)
  # - The provided example shows running container as root (id 0)
  customSidecarContainers: |
  #  - name: "sidecar-list-etc"
  #    image: "{{ .Values.initContainerImage }}"
  #    imagePullPolicy: "{{ .Values.artifactory.image.pullPolicy }}"
  #    securityContext:
  #      runAsUser: 0
  #      fsGroup: 0
  #    command:
  #      - 'sh'
  #      - '-c'
  #      - 'sh /scripts/script.sh'
  #    volumeMounts:
  #      - mountPath: "{{ .Values.artifactory.persistence.mountPath }}"
  #        name: artifactory-volume
  #      - mountPath: "/scripts/script.sh"
  #        name: custom-script
  #        subPath: script.sh
  #    resources:
  #      requests:
  #        memory: "32Mi"
  #        cpu: "50m"
  #      limits:
  #        memory: "128Mi"
  #        cpu: "100m"

  ## Add custom volumes
  customVolumes: |
  #  - name: custom-script
  #    configMap:
  #      name: custom-script

  ## Add custom volumesMounts
  customVolumeMounts: |
  #  - name: custom-script
  #    mountPath: "/scripts/script.sh"
  #    subPath: script.sh
  #  - name: posthook-start
  #    mountPath: "/scripts/posthoook-start.sh"
  #    subPath: posthoook-start.sh
  #  - name: prehook-start
  #    mountPath: "/scripts/prehook-start.sh"
  #    subPath: prehook-start.sh

  # Add custom persistent volume mounts - Available for the pod
  # If skipPrepareContainer is set to true , this will skip the prepare-custom-persistent-volume init container
  customPersistentPodVolumeClaim: {}
  #  name:
  #  mountPath:
  #  accessModes:
  #   - "-"
  #  size:
  #  storageClassName:
  #  skipPrepareContainer: false

  # Add custom persistent volume mounts - Available to the entire namespace
  customPersistentVolumeClaim: {}
  #  name:
  #  mountPath:
  #  accessModes:
  #   - "-"
  #  size:
  #  storageClassName:

  ## Artifactory license.
  license:
    ## licenseKey is the license key in plain text. Use either this or the license.secret setting
    licenseKey:
    ## If artifactory.license.secret is passed, it will be mounted as
    ## ARTIFACTORY_HOME/etc/artifactory.lic and loaded at run time.
    secret:
    ## The dataKey should be the name of the secret data key created.
    dataKey:

  ## Create configMap with artifactory.config.import.xml and security.import.xml and pass name of configMap in following parameter
  configMapName:

  # Add any list of configmaps to Artifactory
  configMaps: |
  #  posthook-start.sh: |-
  #    echo "This is a post start script"
  #  posthook-end.sh: |-
  #    echo "This is a post end script"

  ## List of secrets for Artifactory user plugins.
  ## One Secret per plugin's files.
  userPluginSecrets:
  #  - archive-old-artifacts
  #  - build-cleanup
  #  - webhook
  #  - '{{ template "my-chart.fullname" . }}'

  ## Artifactory requires a unique master key.
  ## You can generate one with the command: "openssl rand -hex 32"
  ## An initial one is auto generated by Artifactory on first startup.
  # masterKey: FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
  ## Alternatively, you can use a pre-existing secret with a key called master-key by specifying masterKeySecretName
  # masterKeySecretName:

  ## Join Key to connect other services to Artifactory
  ## IMPORTANT: Setting this value overrides the existing joinKey
  ## IMPORTANT: You should NOT use the example joinKey for a production deployment!
  # joinKey: EEEEEEEEEEEEEEEEEEEEEEEEEEEEEEEE
  ## Alternatively, you can use a pre-existing secret with a key called join-key by specifying joinKeySecretName
  # joinKeySecretName:

  # Add custom secrets - secret per file
  customSecrets:
  #  - name: custom-secret
  #    key: custom-secret.yaml
  #    data: >
  #      custom_secret_config:
  #        parameter1: value1
  #        parameter2: value2
  #  - name: custom-secret2
  #    key: custom-secret2.config
  #    data: |
  #      here the custom secret 2 config

  ## If false, all service console logs will not redirect to a common console.log
  consoleLog: false

  binarystore:
    enabled: true

  ## admin allows to set the password for the default admin user.
  ## See: https://www.jfrog.com/confluence/display/JFROG/Users+and+Groups#UsersandGroups-RecreatingtheDefaultAdminUserrecreate
  admin:
    ip: "127.0.0.1"
    username: "admin"
    password:
    secret:
    dataKey:

  ## Extra pre-start command to install JDBC driver for MySql/MariaDb/Oracle
  # preStartCommand: "mkdir -p /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib; cd /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib && wget -O /opt/jfrog/artifactory/var/bootstrap/artifactory/tomcat/lib/mysql-connector-java-5.1.41.jar https://jcenter.bintray.com/mysql/mysql-connector-java/5.1.41/mysql-connector-java-5.1.41.jar"
  ## Extra post-start command to run extra commands after container starts
  # postStartCommand:

  ## Extra environment variables that can be used to tune Artifactory to your needs.
  ## Uncomment and set value as needed
  extraEnvironmentVariables:
  # - name: SERVER_XML_ARTIFACTORY_PORT
  #   value: "8081"
  # - name: SERVER_XML_ARTIFACTORY_MAX_THREADS
  #   value: "200"
  # - name: SERVER_XML_ACCESS_MAX_THREADS
  #   value: "50"
  # - name: SERVER_XML_ARTIFACTORY_EXTRA_CONFIG
  #   value: ""
  # - name: SERVER_XML_ACCESS_EXTRA_CONFIG
  #   value: ""
  # - name: SERVER_XML_EXTRA_CONNECTOR
  #   value: ""
  # - name: DB_POOL_MAX_ACTIVE
  #   value: "100"
  # - name: DB_POOL_MAX_IDLE
  #   value: "10"
  # - name: MY_SECRET_ENV_VAR
  #   valueFrom:
  #     secretKeyRef:
  #       name: my-secret-name
  #       key: my-secret-key

  systemYaml: |
    shared:
      logging:
        consoleLog:
          enabled: {{ .Values.artifactory.consoleLog }}
      extraJavaOpts: >
        -Dartifactory.access.client.max.connections={{ .Values.access.tomcat.connector.maxThreads }}
      {{- with .Values.artifactory.javaOpts }}
        -Dartifactory.async.corePoolSize={{ .corePoolSize }}
      {{- if .xms }}
        -Xms{{ .xms }}
      {{- end }}
      {{- if .xmx }}
        -Xmx{{ .xmx }}
      {{- end }}
      {{- if .jmx.enabled }}
        -Dcom.sun.management.jmxremote
        -Dcom.sun.management.jmxremote.port={{ .jmx.port }}
        -Dcom.sun.management.jmxremote.rmi.port={{ .jmx.port }}
        -Dcom.sun.management.jmxremote.ssl={{ .jmx.ssl }}
      {{- if .jmx.host }}
        -Djava.rmi.server.hostname={{ tpl .jmx.host $ }}
      {{- else }}
        -Djava.rmi.server.hostname={{ template "artifactory.fullname" $ }}
      {{- end }}
      {{- if .jmx.authenticate }}
        -Dcom.sun.management.jmxremote.authenticate=true
        -Dcom.sun.management.jmxremote.access.file={{ .jmx.accessFile }}
        -Dcom.sun.management.jmxremote.password.file={{ .jmx.passwordFile }}
      {{- else }}
        -Dcom.sun.management.jmxremote.authenticate=false
      {{- end }}
      {{- end }}
      {{- if .other }}
        {{ .other }}
      {{- end }}
      {{- end }}
      {{- if or .Values.database.type .Values.postgresql.enabled }}
      database:
        {{- if .Values.postgresql.enabled }}
        type: postgresql
        url: "jdbc:postgresql://{{ .Release.Name }}-postgresql:{{ .Values.postgresql.service.port }}/{{ .Values.postgresql.postgresqlDatabase }}"
        driver: org.postgresql.Driver
        username: "{{ .Values.postgresql.postgresqlUsername }}"
        {{- else }}
        type: "{{ .Values.database.type }}"
        driver: "{{ .Values.database.driver }}"
        {{- end }}
      {{- end }}
    artifactory:
      database:
        maxOpenConnections: {{ .Values.artifactory.database.maxOpenConnections }}
      tomcat:
        connector:
          maxThreads: {{ .Values.artifactory.tomcat.connector.maxThreads }}
          extraConfig: {{ .Values.artifactory.tomcat.connector.extraConfig }}
    frontend:
      session:
        timeMinutes: {{ .Values.frontend.session.timeoutMinutes | quote }}
    access:
      database:
        maxOpenConnections: {{ .Values.access.database.maxOpenConnections }}
      tomcat:
        connector:
          maxThreads: {{ .Values.access.tomcat.connector.maxThreads }}
          extraConfig: {{ .Values.access.tomcat.connector.extraConfig }}
    metadata:
      database:
        maxOpenConnections: {{ .Values.metadata.database.maxOpenConnections }}
    {{- if .Values.artifactory.replicator.enabled }}
    replicator:
      enabled: true
    {{- end }}

  annotations: {}

  service:
    name: artifactory
    type: ClusterIP
    ## For supporting whitelist on the Artifactory service (useful if setting service.type=LoadBalancer)
    ## Set this to a list of IP CIDR ranges
    ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32']
    ## or pass from helm command line
    ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}'
    loadBalancerSourceRanges: []
    annotations: {}

  ## The following setting are to configure a dedicated Ingress object for Replicator service
  replicator:
    enabled: false
    ingress:
      name:
      hosts: []
      annotations: {}
       # kubernetes.io/ingress.class: nginx
       # nginx.ingress.kubernetes.io/proxy-buffering: "off"
       # nginx.ingress.kubernetes.io/configuration-snippet: |
       #   chunked_transfer_encoding on;
      tls: []
       #  Secrets must be manually created in the namespace.
       # - hosts:
       #   - artifactory.domain.example
       #   secretName: chart-example-tls-secret

  ## IMPORTANT: If overriding artifactory.internalPort:
  ## DO NOT use port lower than 1024 as Artifactory runs as non-root and cannot bind to ports lower than 1024!
  externalPort: 8082
  internalPort: 8082
  externalArtifactoryPort: 8081
  internalArtifactoryPort: 8081
  uid: 1030
  terminationGracePeriodSeconds: 30

  ## The following settings are to configure the frequency of the liveness and readiness probes
  livenessProbe:
    enabled: true
    path: /router/api/v1/system/health
    initialDelaySeconds: 180
    failureThreshold: 10
    timeoutSeconds: 10
    periodSeconds: 10
    successThreshold: 1

  readinessProbe:
    enabled: true
    path: /router/api/v1/system/health
    initialDelaySeconds: 90
    failureThreshold: 10
    timeoutSeconds: 10
    periodSeconds: 10
    successThreshold: 1

  persistence:
    mountPath: "/var/opt/jfrog/artifactory"
    enabled: true
    ## A manually managed Persistent Volume and Claim
    ## Requires persistence.enabled: true
    ## If defined, PVC must be created manually before volume will be bound
    # existingClaim:

    accessMode: ReadWriteOnce
    ## Storage default size. Should be increased for production deployments.
    size: 250Gi

    ## Use a custom Secret to be mounted as your binarystore.xml
    ## NOTE: This will ignore all settings below that make up binarystore.xml
    customBinarystoreXmlSecret:
    ## Cache default size. Should be increased for production deployments.
    maxCacheSize: 5000000000
    cacheProviderDir: cache

    ## Set the persistence storage type. This will apply the matching binarystore.xml to Artifactory config
    ## Supported types are:
    ## file-system (default)
    ## nfs
    ## google-storage
    ## aws-s3
    ## aws-s3-v3
    ## azure-blob
    type: file-system

    ## Use binarystoreXml to provide a custom binarystore.xml
    ## This can be a template or hardcoded.
    binarystoreXml: |
      {{- if eq .Values.artifactory.persistence.type "file-system" -}}
      <!-- File system filestore -->
      <config version="v1">
          <chain>
            {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }}
              <provider id="cache-fs" type="cache-fs">
            {{- end }}
                  <provider id="file-system" type="file-system"/>
            {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }}
              </provider>
            {{- end }}
          </chain>

        {{- if .Values.artifactory.persistence.fileSystem.cache.enabled }}
          <provider id="cache-fs" type="cache-fs">
              <maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
              <cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
          </provider>
        {{- end }}
      </config>
      {{- end }}
      {{- if eq .Values.artifactory.persistence.type "google-storage" }}
      <!-- Google storage -->
      <config version="2">
          <chain>
              <provider id="cache-fs" type="cache-fs">
                  <provider id="eventual" type="eventual">
                      <provider id="retry" type="retry">
                          <provider id="google-storage" type="google-storage"/>
                      </provider>
                  </provider>
              </provider>
          </chain>

          <!-- Set max cache-fs size -->
          <provider id="cache-fs" type="cache-fs">
              <maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
              <cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
          </provider>

          <provider id="file-system" type="file-system">
              <fileStoreDir>{{ .Values.artifactory.persistence.mountPath }}/data/filestore</fileStoreDir>
              <tempDir>/tmp</tempDir>
          </provider>

          <provider id="google-storage" type="google-storage">
              <providerId>google-cloud-storage</providerId>
              <endpoint>{{ .Values.artifactory.persistence.googleStorage.endpoint }}</endpoint>
              <httpsOnly>{{ .Values.artifactory.persistence.googleStorage.httpsOnly }}</httpsOnly>
              <bucketName>{{ .Values.artifactory.persistence.googleStorage.bucketName }}</bucketName>
              <identity>{{ .Values.artifactory.persistence.googleStorage.identity }}</identity>
              <credential>{{ .Values.artifactory.persistence.googleStorage.credential }}</credential>
              <path>{{ .Values.artifactory.persistence.googleStorage.path }}</path>
              <bucketExists>{{ .Values.artifactory.persistence.googleStorage.bucketExists }}</bucketExists>
          </provider>
      </config>
      {{- end }}
      {{- if eq .Values.artifactory.persistence.type "aws-s3-v3" }}
      <!-- AWS S3 V3 -->
      <config version="2">
          <chain>
              <provider id="cache-fs" type="cache-fs">
                  <provider id="eventual" type="eventual">
                      <provider id="retry" type="retry">
                          <provider id="s3-storage-v3" type="s3-storage-v3"/>
                      </provider>
                  </provider>
              </provider>
          </chain>

          <!-- Set max cache-fs size -->
          <provider id="cache-fs" type="cache-fs">
              <maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
              <cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
          </provider>

        {{- with .Values.artifactory.persistence.awsS3V3 }}
          <provider id="s3-storage-v3" type="s3-storage-v3">
              <testConnection>{{ .testConnection }}</testConnection>
            {{- if .identity }}
              <identity>{{ .identity }}</identity>
            {{- end }}
            {{- if .credential }}
              <credential>{{ .credential }}</credential>
            {{- end }}
              <region>{{ .region }}</region>
              <bucketName>{{ .bucketName }}</bucketName>
              <path>{{ .path }}</path>
              <endpoint>{{ .endpoint }}</endpoint>
            {{- with .maxConnections }}
              <maxConnections>{{ . }}</maxConnections>
            {{- end }}
            {{- with .kmsServerSideEncryptionKeyId }}
              <kmsServerSideEncryptionKeyId>{{ . }}</kmsServerSideEncryptionKeyId>
            {{- end }}
            {{- with .kmsKeyRegion }}
              <kmsKeyRegion>{{ . }}</kmsKeyRegion>
            {{- end }}
            {{- with .kmsCryptoMode }}
              <kmsCryptoMode>{{ . }}</kmsCryptoMode>
            {{- end }}
            {{- if .useInstanceCredentials }}
              <useInstanceCredentials>true</useInstanceCredentials>
            {{- else }}
              <useInstanceCredentials>false</useInstanceCredentials>
            {{- end }}
              <usePresigning>{{ .usePresigning }}</usePresigning>
              <signatureExpirySeconds>{{ .signatureExpirySeconds }}</signatureExpirySeconds>
            {{- with .cloudFrontDomainName }}
              <cloudFrontDomainName>{{ . }}</cloudFrontDomainName>
            {{- end }}
            {{- with .cloudFrontKeyPairId }}
              <cloudFrontKeyPairId>{{ .cloudFrontKeyPairId }}</cloudFrontKeyPairId>
            {{- end }}
            {{- with .cloudFrontPrivateKey }}
              <cloudFrontPrivateKey>{{ . }}</cloudFrontPrivateKey>
            {{- end }}
            {{- with .enableSignedUrlRedirect }}
              <enableSignedUrlRedirect>{{ . }}</enableSignedUrlRedirect>
            {{- end }}
            {{- with .enablePathStyleAccess }}
              <enablePathStyleAccess>{{ . }}</enablePathStyleAccess>
            {{- end }}
          </provider>
        {{- end }}
      </config>
      {{- end }}

      {{- if eq .Values.artifactory.persistence.type "aws-s3" }}
      <!-- AWS S3 -->
      <config version="2">
          <chain> <!--template="s3"-->
              <provider id="cache-fs" type="cache-fs">
                  <provider id="eventual" type="eventual">
                      <provider id="retry-s3" type="retry">
                          <provider id="s3" type="s3"/>
                      </provider>
                  </provider>
              </provider>
          </chain>

          <!-- Set max cache-fs size -->
          <provider id="cache-fs" type="cache-fs">
              <maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
              <cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
          </provider>

          <provider id="s3" type="s3">
              <endpoint>{{ .Values.artifactory.persistence.awsS3.endpoint }}</endpoint>
          {{- if .Values.artifactory.persistence.awsS3.roleName }}
              <roleName>{{ .Values.artifactory.persistence.awsS3.roleName }}</roleName>
              <refreshCredentials>true</refreshCredentials>
          {{- else }}
              <refreshCredentials>{{ .Values.artifactory.persistence.awsS3.refreshCredentials }}</refreshCredentials>
          {{- end }}
              <s3AwsVersion>{{ .Values.artifactory.persistence.awsS3.s3AwsVersion }}</s3AwsVersion>
              <testConnection>{{ .Values.artifactory.persistence.awsS3.testConnection }}</testConnection>
              <httpsOnly>{{ .Values.artifactory.persistence.awsS3.httpsOnly }}</httpsOnly>
              <region>{{ .Values.artifactory.persistence.awsS3.region }}</region>
              <bucketName>{{ .Values.artifactory.persistence.awsS3.bucketName }}</bucketName>
          {{- if .Values.artifactory.persistence.awsS3.identity }}
              <identity>{{ .Values.artifactory.persistence.awsS3.identity }}</identity>
          {{- end }}
          {{- if .Values.artifactory.persistence.awsS3.credential }}
              <credential>{{ .Values.artifactory.persistence.awsS3.credential }}</credential>
          {{- end }}
              <path>{{ .Values.artifactory.persistence.awsS3.path }}</path>
          {{- range $key, $value := .Values.artifactory.persistence.awsS3.properties }}
              <property name="{{ $key }}" value="{{ $value }}"/>
          {{- end }}
          </provider>
      </config>
      {{- end }}
      {{- if eq .Values.artifactory.persistence.type "azure-blob" }}
      <!-- Azure Blob Storage -->
      <config version="2">
          <chain> <!--template="azure-blob-storage"-->
              <provider id="cache-fs" type="cache-fs">
                  <provider id="eventual" type="eventual">
                      <provider id="retry-azure-blob-storage" type="retry">
                          <provider id="azure-blob-storage" type="azure-blob-storage"/>
                      </provider>
                  </provider>
              </provider>
          </chain>

          <!-- Set max cache-fs size -->
          <provider id="cache-fs" type="cache-fs">
              <maxCacheSize>{{ .Values.artifactory.persistence.maxCacheSize }}</maxCacheSize>
              <cacheProviderDir>{{ .Values.artifactory.persistence.cacheProviderDir }}</cacheProviderDir>
          </provider>

          <provider id="azure-blob-storage" type="azure-blob-storage">
              <accountName>{{ .Values.artifactory.persistence.azureBlob.accountName }}</accountName>
              <accountKey>{{ .Values.artifactory.persistence.azureBlob.accountKey }}</accountKey>
              <endpoint>{{ .Values.artifactory.persistence.azureBlob.endpoint }}</endpoint>
              <containerName>{{ .Values.artifactory.persistence.azureBlob.containerName }}</containerName>
              <testConnection>{{ .Values.artifactory.persistence.azureBlob.testConnection }}</testConnection>
          </provider>
      </config>
      {{- end }}

    ## For artifactory.persistence.type nfs
    ## If using NFS as the shared storage, you must have a running NFS server that is accessible by your Kubernetes
    ## cluster nodes.
    ## Need to have the following set
    nfs:
      # Must pass actual IP of NFS server with '--set For artifactory.persistence.nfs.ip=${NFS_IP}'
      ip:
      haDataMount: "/data"
      haBackupMount: "/backup"
      dataDir: "/var/opt/jfrog/artifactory"
      backupDir: "/var/opt/jfrog/artifactory-backup"
      capacity: 200Gi

    ## For artifactory.persistence.type file-system
    fileSystem:
      cache:
        enabled: false

    ## For artifactory.persistence.type google-storage
    googleStorage:
      endpoint: storage.googleapis.com
      httpsOnly: false
      # Set a unique bucket name
      bucketName: "artifactory-gcp"
      identity:
      credential:
      path: "artifactory/filestore"
      bucketExists: false

    ## For artifactory.persistence.type aws-s3-v3
    awsS3V3:
      testConnection: false
      identity:
      credential:
      region:
      bucketName: artifactory-aws
      path: artifactory/filestore
      endpoint:
      maxConnections: 50
      kmsServerSideEncryptionKeyId:
      kmsKeyRegion:
      kmsCryptoMode:
      useInstanceCredentials: true
      usePresigning: false
      signatureExpirySeconds: 300
      cloudFrontDomainName:
      cloudFrontKeyPairId:
      cloudFrontPrivateKey:
      enableSignedUrlRedirect: false
      enablePathStyleAccess: false

    ## For artifactory.persistence.type aws-s3
    ## IMPORTANT: Make sure S3 `endpoint` and `region` match! See https://docs.aws.amazon.com/general/latest/gr/rande.html
    awsS3:
      # Set a unique bucket name
      bucketName: "artifactory-aws"
      endpoint:
      region:
      roleName:
      identity:
      credential:
      path: "artifactory/filestore"
      refreshCredentials: true
      httpsOnly: true
      testConnection: false
      s3AwsVersion: AWS4-HMAC-SHA256
      ## Additional properties to set on the s3 provider
      properties: {}
      #  httpclient.max-connections: 100
    ## For artifactory.persistence.type azure-blob
    azureBlob:
      accountName:
      accountKey:
      endpoint:
      containerName:
      testConnection: false
    ## artifactory data Persistent Volume Storage Class
    ## If defined, storageClassName: <storageClass>
    ## If set to "-", storageClassName: "", which disables dynamic provisioning
    ## If undefined (the default) or set to null, no storageClassName spec is
    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
    ##   GKE, AWS & OpenStack)
    ##
    # storageClassName: "-"
    ## Annotations for the Persistent Volume Claim
    annotations: {}
  ## Uncomment the following resources definitions or pass them from command line
  ## to control the cpu and memory resources allocated by the Kubernetes cluster
  # resources:
  #  requests:
  #    memory: "1Gi"
  #    cpu: "500m"
  #  limits:
  #    memory: "2Gi"
  #    cpu: "1"
  resources:
    requests:
      memory: "6Gi"
      cpu: "4"
    limits:
      memory: "10Gi"
      cpu: "8"
  ## The following Java options are passed to the java process running Artifactory.
  ## You should set them according to the resources set above
  # javaOpts:
   # xms: "1g"
   # xmx: "2g"
  javaOpts:
    xms: "6g"
    xmx: "8g"
    jmx:
      enabled: false
      port: 9010
      host:
      ssl: false
      # When authenticate is true, accessFile and passwordFile are required
      authenticate: false
      accessFile:
      passwordFile:
    corePoolSize: 8
   # other: ""

  nodeSelector:
    local-repo_repo: "true"

  tolerations: []

  affinity: {}

  ssh:
    enabled: false
    internalPort: 1339
    externalPort: 1339

frontend:
  ## Session settings
  session:
    ## Time in minutes after which the frontend token will need to be refreshed
    timeoutMinutes: '30'

access:
  ## Enable TLS by changing the tls entry (under the security section) in the access.config.yaml file.
  ## ref: https://www.jfrog.com/confluence/display/JFROG/Managing+TLS+Certificates#ManagingTLSCertificates
  ## When security.tls is set to true, JFrog Access will act as the Certificate Authority (CA) and sign the TLS certificates used by all the different JFrog Platform nodes.
  ## This ensures that the node to node communication is done over TLS.
  accessConfig:
    security:
      tls: false

  ## You can use a pre-existing secret by specifying customCertificatesSecretName
  ## Example : Create a tls secret using `kubectl create secret tls <secret-name> --cert=ca.crt --key=ca.private.key`
  # customCertificatesSecretName:

  ## When resetAccessCAKeys is true, Access will regenerate the CA certificate and matching private key
  # resetAccessCAKeys: false
  database:
    maxOpenConnections: 80
  tomcat:
    connector:
      maxThreads: 50
      extraConfig: 'acceptCount="100"'

metadata:
  database:
    maxOpenConnections: 80

# Nginx
nginx:
  enabled: true
  kind: Deployment
  name: nginx
  labels: {}
  replicaCount: 1
  uid: 104
  gid: 107
  # Note that by default we use appVersion to get image tag/version
  image:
    # registry: docker.bintray.io
    # repository: jfrog/nginx-artifactory-pro
    registry: "dockerhub.local-repo.ai:5000"
    repository: devops/nginx-artifactory-pro
    # tag:
    pullPolicy: IfNotPresent

  # Priority Class name to be used in deployment if provided
  priorityClassName:

  # Sidecar containers for tailing Nginx logs
  loggers: []
  # - access.log
  # - error.log

  # Loggers containers resources
  loggersResources:
   requests:
     memory: "64Mi"
     cpu: "25m"
   limits:
     memory: "128Mi"
     cpu: "50m"

  # Logs options
  logs:
    stderr: false
    level: warn

  mainConf: |
    # Main Nginx configuration file
    worker_processes  4;

    {{ if .Values.nginx.logs.stderr }}
    error_log  stderr {{ .Values.nginx.logs.level }};
    {{- else -}}
    error_log  {{ .Values.nginx.persistence.mountPath }}/logs/error.log {{ .Values.nginx.logs.level }};
    {{- end }}
    pid        /tmp/nginx.pid;

    {{- if .Values.artifactory.ssh.enabled }}
    ## SSH Server Configuration
    stream {
      server {
        listen {{ .Values.nginx.ssh.internalPort }};
        proxy_pass {{ include "artifactory.fullname" . }}:{{ .Values.artifactory.ssh.externalPort }};
      }
    }
    {{- end }}

    events {
      worker_connections  1024;
    }


    http {
      include       /etc/nginx/mime.types;
      default_type  application/octet-stream;

      variables_hash_max_size 1024;
      variables_hash_bucket_size 64;
      server_names_hash_max_size 4096;
      server_names_hash_bucket_size 128;
      types_hash_max_size 2048;
      types_hash_bucket_size 64;
      proxy_read_timeout 2400s;
      client_header_timeout 2400s;
      client_body_timeout 2400s;
      proxy_connect_timeout 75s;
      proxy_send_timeout 2400s;
      proxy_buffer_size 128k;
      proxy_buffers 40 128k;
      proxy_busy_buffers_size 128k;
      proxy_temp_file_write_size 250m;
      proxy_http_version 1.1;
      client_body_buffer_size 128k;

      log_format  main  '$remote_addr - $remote_user [$time_local] "$request" '
      '$status $body_bytes_sent "$http_referer" '
      '"$http_user_agent" "$http_x_forwarded_for"';

      log_format timing 'ip = $remote_addr '
      'user = \"$remote_user\" '
      'local_time = \"$time_local\" '
      'host = $host '
      'request = \"$request\" '
      'status = $status '
      'bytes = $body_bytes_sent '
      'upstream = \"$upstream_addr\" '
      'upstream_time = $upstream_response_time '
      'request_time = $request_time '
      'referer = \"$http_referer\" '
      'UA = \"$http_user_agent\"';

      access_log  {{ .Values.nginx.persistence.mountPath }}/logs/access.log  timing;

      sendfile        on;
      #tcp_nopush     on;

      keepalive_timeout  65;

      #gzip  on;

      include /etc/nginx/conf.d/*.conf;

    }


  artifactoryConf: |
    {{- if .Values.nginx.https.enabled }}
    ssl_protocols TLSv1 TLSv1.1 TLSv1.2 TLSv1.3;
    ssl_certificate  {{ .Values.nginx.persistence.mountPath }}/ssl/tls.crt;
    ssl_certificate_key  {{ .Values.nginx.persistence.mountPath }}/ssl/tls.key;
    ssl_session_cache shared:SSL:1m;
    ssl_prefer_server_ciphers   on;
    {{- end }}
    ## server configuration
    server {
    {{- if .Values.nginx.internalPortHttps }}
      listen {{ .Values.nginx.internalPortHttps }} ssl;
    {{- else -}}
      {{- if .Values.nginx.https.enabled }}
      listen {{ .Values.nginx.https.internalPort }} ssl;
      {{- end }}
    {{- end }}
    {{- if .Values.nginx.internalPortHttp }}
      listen {{ .Values.nginx.internalPortHttp }};
    {{- else -}}
      {{- if .Values.nginx.http.enabled }}
      listen {{ .Values.nginx.http.internalPort }};
      {{- end }}
    {{- end }}
      server_name ~(?<repo>.+)\.{{ include "artifactory.fullname" . }} {{ include "artifactory.fullname" . }}
      {{- range .Values.ingress.hosts -}}
        {{- if contains "." . -}}
          {{ "" | indent 0 }} ~(?<repo>.+)\.{{ . }}
        {{- end -}}
      {{- end -}};

      if ($http_x_forwarded_proto = '') {
        set $http_x_forwarded_proto  $scheme;
      }
      ## Application specific logs
      ## access_log /var/log/nginx/artifactory-access.log timing;
      ## error_log /var/log/nginx/artifactory-error.log;
      rewrite ^/artifactory/?$ / redirect;
      if ( $repo != "" ) {
        rewrite ^/(v1|v2)/(.*) /artifactory/api/docker/$repo/$1/$2 break;
      }
      chunked_transfer_encoding on;
      client_max_body_size 0;

      location / {
        proxy_read_timeout  900;
        proxy_pass_header   Server;
        proxy_cookie_path   ~*^/.* /;
        proxy_pass          {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalPort }}/;
        {{- if .Values.nginx.service.ssloffload}}
        proxy_set_header    X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host;
        {{- else }}
        proxy_set_header    X-JFrog-Override-Base-Url $http_x_forwarded_proto://$host:$server_port;
        proxy_set_header    X-Forwarded-Port  $server_port;
        {{- end }}
        proxy_set_header    X-Forwarded-Proto $http_x_forwarded_proto;
        proxy_set_header    Host              $http_host;
        proxy_set_header    X-Forwarded-For   $proxy_add_x_forwarded_for;

        location /artifactory/ {
          if ( $request_uri ~ ^/artifactory/(.*)$ ) {
            proxy_pass       {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/$1;
          }
          proxy_pass         {{ include "artifactory.scheme" . }}://{{ include "artifactory.fullname" . }}:{{ .Values.artifactory.externalArtifactoryPort }}/artifactory/;
        }
      }
    }

  service:
    ## For minikube, set this to NodePort, elsewhere use LoadBalancer
    type: LoadBalancer
    ssloffload: false
    ## For supporting whitelist on the Nginx LoadBalancer service
    ## Set this to a list of IP CIDR ranges
    ## Example: loadBalancerSourceRanges: ['10.10.10.5/32', '10.11.10.5/32']
    ## or pass from helm command line
    ## Example: helm install ... --set nginx.service.loadBalancerSourceRanges='{10.10.10.5/32,10.11.10.5/32}'
    loadBalancerSourceRanges: []
    annotations: {}
    ## Provide static ip address
    loadBalancerIP:
    ## There are two available options: “Cluster” (default) and “Local”.
    externalTrafficPolicy: Cluster

  http:
    enabled: true
    externalPort: 80
    internalPort: 80
  https:
    enabled: true
    externalPort: 443
    internalPort: 443

  ssh:
    internalPort: 1339
    externalPort: 1339

  # DEPRECATED: The following will be removed in a future release
  # externalPortHttp: 80
  # internalPortHttp: 80
  # externalPortHttps: 443
  # internalPortHttps: 443

  ## The following settings are to configure the frequency of the liveness and readiness probes
  livenessProbe:
    enabled: true
    path: /router/api/v1/system/health
    initialDelaySeconds: 180
    failureThreshold: 10
    timeoutSeconds: 10
    periodSeconds: 10
    successThreshold: 1

  readinessProbe:
    enabled: true
    path: /router/api/v1/system/health
    initialDelaySeconds: 120
    failureThreshold: 10
    timeoutSeconds: 10
    periodSeconds: 10
    successThreshold: 1

  ## The SSL secret that will be used by the Nginx pod
  # tlsSecretName: chart-example-tls
  ## Custom ConfigMap for nginx.conf
  customConfigMap:
  ## Custom ConfigMap for artifactory-ha.conf
  customArtifactoryConfigMap:
  persistence:
    mountPath: "/var/opt/jfrog/nginx"
    enabled: false
    ## A manually managed Persistent Volume and Claim
    ## Requires persistence.enabled: true
    ## If defined, PVC must be created manually before volume will be bound
    # existingClaim:

    accessMode: ReadWriteOnce
    size: 5Gi
    ## nginx data Persistent Volume Storage Class
    ## If defined, storageClassName: <storageClass>
    ## If set to "-", storageClassName: "", which disables dynamic provisioning
    ## If undefined (the default) or set to null, no storageClassName spec is
    ##   set, choosing the default provisioner.  (gp2 on AWS, standard on
    ##   GKE, AWS & OpenStack)
    ##
    # storageClassName: "-"
  resources:
   requests:
     memory: "250Mi"
     cpu: "100m"
   limits:
     memory: "250Mi"
     cpu: "500m"
  nodeSelector:
    local-repo_repo: "true"

  tolerations: []

  affinity: {}

## Database configurations
## Use the wait-for-db init container. Set to false to skip
waitForDatabase: true

## Configuration values for the PostgreSQL dependency sub-chart
## ref: https://github.com/bitnami/charts/blob/master/bitnami/postgresql/README.md
postgresql:
  enabled: true
  image:
    # registry: docker.bintray.io
    # repository: bitnami/postgresql
    # tag: 12.3.0-debian-10-r71
    registry: "dockerhub.local-repo.ai:5000"
    repository: devops/postgresql
    tag: 12.3.0-debian-10-r71
  postgresqlUsername: artifactory
  postgresqlPassword: "local-repo123!"
  postgresqlDatabase: artifactory
  postgresqlExtendedConf:
    listenAddresses: "'*'"
    maxConnections: "1500"
  persistence:
    enabled: true
    size: 50Gi
  service:
    port: 5432
  master:
    nodeSelector:
      local-repo_repo: "true"
    affinity: {}
    tolerations: []
  slave:
    nodeSelector:
      local-repo_repo: "true"
    affinity: {}
    tolerations: []
  resources:
   requests:
     memory: "512Mi"
     cpu: "100m"
   limits:
     memory: "1Gi"
     cpu: "500m"

## If NOT using the PostgreSQL in this chart (postgresql.enabled=false),
## specify custom database details here or leave empty and Artifactory will use embedded derby
database:
  type:
  driver:
  ## If you set the url, leave host and port empty
  url:
  ## If you would like this chart to create the secret containing the db
  ## password, use these values
  user:
  password:
  ## If you have existing Kubernetes secrets containing db credentials, use
  ## these values
  secrets: {}
  #  user:
  #    name: "rds-artifactory"
  #    key: "db-user"
  #  password:
  #    name: "rds-artifactory"
  #    key: "db-password"
  #  url:
  #    name: "rds-artifactory"
  #    key: "db-url"

# Filebeat Sidecar container
## The provided filebeat configuration is for Artifactory logs. It assumes you have a logstash installed and configured properly.
filebeat:
  enabled: false
  name: artifactory-filebeat
  image:
    repository: "docker.elastic.co/beats/filebeat"
    version: 7.9.2
  logstashUrl: "logstash:5044"

  livenessProbe:
    exec:
      command:
        - sh
        - -c
        - |
          #!/usr/bin/env bash -e
          curl --fail 127.0.0.1:5066
    failureThreshold: 3
    initialDelaySeconds: 10
    periodSeconds: 10
    timeoutSeconds: 5

  readinessProbe:
    exec:
      command:
        - sh
        - -c
        - |
          #!/usr/bin/env bash -e
          filebeat test output
    failureThreshold: 3
    initialDelaySeconds: 10
    periodSeconds: 10
    timeoutSeconds: 5

  resources:
   requests:
     memory: "100Mi"
     cpu: "100m"
   limits:
     memory: "100Mi"
     cpu: "100m"

  filebeatYml: |
    logging.level: info
    path.data: {{ .Values.artifactory.persistence.mountPath }}/log/filebeat
    name: artifactory-filebeat
    queue.spool: ~
    filebeat.inputs:
    - type: log
      enabled: true
      close_eof: ${CLOSE:false}
      paths:
         - {{ .Values.artifactory.persistence.mountPath }}/log/*.log
      fields:
        service: "jfrt"
        log_type: "artifactory"
    output:
      logstash:
         hosts: ["{{ .Values.filebeat.logstashUrl }}"]

## Prometheus Exporter / Metrics
##
metrics:
  enabled: false

  service:
    port: 24231
    type: ClusterIP

  ## Prometheus Operator ServiceMonitor configuration
  ##
  serviceMonitor:
    path: "/metrics"
    labels: {}

    ## Interval at which metrics should be scraped
    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
    ##
    # interval:

    ## Timeout after which the scrape is ended
    ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#endpoint
    ##
    # scrapeTimeout:

## Fluentd Sidecar Container
##
fluentd:
  enabled: false

  image:
    ## The Bitnami Fluentd image has the Prometheus plugin pre-installed.
    ##
    repository: docker.io/bitnami/fluentd
    tag: 1.11.2

  fluentdConf: |
    ## Prometheus Input Plugin Configuration

    # input plugin that exports metrics
    <source>
      @type prometheus
      port {{ .Values.metrics.service.port }}
      metrics_path {{ .Values.metrics.serviceMonitor.path }}
    </source>

    <source>
      @type monitor_agent
    </source>

    <source>
      @type forward
    </source>

    # input plugin that collects metrics from MonitorAgent
    <source>
      @type prometheus_monitor
      <labels>
        host ${hostname}
      </labels>
    </source>

    # input plugin that collects metrics for output plugin
    <source>
      @type prometheus_output_monitor
      <labels>
        host ${hostname}
      </labels>
    </source>

    # input plugin that collects metrics for in_tail plugin
    <source>
      @type prometheus_tail_monitor
      <labels>
        host ${hostname}
      </labels>
    </source>

    <source>
      @type tail
      @id access_service_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-service.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-service.log.pos"
      tag jfrog.rt.access.service
      <parse>
        @type none
      </parse>
    </source>
    <source>
      @type tail
      @id artifactory_service_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-service.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-service.log.pos"
      tag jfrog.rt.artifactory.service
      <parse>
        @type none
      </parse>
    </source>
    <source>
      @type tail
      @id frontend_service_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-service.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-service.log.pos"
      tag jfrog.rt.frontend.service
      <parse>
        @type none
      </parse>
    </source>
    <source>
      @type tail
      @id metadata_service_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-service.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-service.log.pos"
      tag jfrog.rt.metadata.service
      <parse>
        @type none
      </parse>
    </source>
    <source>
      @type tail
      @id router_service_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-service.log.pos"
      tag jfrog.rt.router.service
      <parse>
        @type none
      </parse>
    </source>
    # Strip out color codes then field extract the service fields
    <filter jfrog.rt.**.service>
      @type record_transformer
      enable_ruby true
      <record>
        message ${record["message"].gsub(/\e\[([;\d]+)?m/, '')}
      </record>
    </filter>
    <filter jfrog.rt.**.service>
      @type parser
      key_name message
      <parse>
        @type multiline
        format_firstline /\d{4}-\d{1,2}-\d{1,2}/
        format1 /^(?<timestamp>[^ ]*) \[(?<service_type>[^\]]*)\] \[(?<log_level>[^\]]*)\] \[(?<trace_id>[^\]]*)\] \[(?<class_line_number>.*)\] \[(?<thread>.*)\] -(?<message>.*)$/
        time_key timestamp
        time_format %Y-%m-%dT%H:%M:%S.%LZ
      </parse>
    </filter>
    # End Service Fields Extraction
    <source>
      @type tail
      @id router_traefik_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-traefik.log.pos"
      tag jfrog.rt.router.traefik
      <parse>
          @type multiline
          format_firstline /\d{4}-\d{1,2}-\d{1,2}/
          format1 /^(?<timestamp>[^ ]*) \[(?<service_type>[^\]]*)\] \[(?<log_level>[^\]]*)\] \[(?<trace_id>[^\]]*)\] \[(?<class_line_number>.*)\] \[(?<thread>.*)\] - (?<message>.+)$/
          time_key timestamp
          time_format %Y-%m-%dT%H:%M:%S.%LZ
      </parse>
    </source>
    <source>
      @type tail
      @id access_request_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-request.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-request.log.pos"
      tag jfrog.rt.access.request
      <parse>
        @type regexp
        expression ^(?<timestamp>[^ ]*)\|(?<trace_id>[^\|]*)\|(?<remote_address>[^\|]*)\|(?<username>[^\|]*)\|(?<request_method>[^\|]*)\|(?<request_url>[^\|]*)\|(?<return_status>[^\|]*)\|(?<request_content_length>[^\|]*)\|(?<response_content_length>[^\|]*)\|(?<request_duration>[^\|]*)\|(?<request_user_agent>.+)$
        time_key timestamp
        time_format %Y-%m-%dT%H:%M:%S.%LZ
      </parse>
    </source>
    <source>
      @type tail
      @id artifactory_request_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-request.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-request.log.pos"
      tag jfrog.rt.artifactory.request
      <parse>
        @type regexp
        expression ^(?<timestamp>[^ ]*)\|(?<trace_id>[^\|]*)\|(?<remote_address>[^\|]*)\|(?<username>[^\|]*)\|(?<request_method>[^\|]*)\|(?<request_url>[^\|]*)\|(?<return_status>[^\|]*)\|(?<request_content_length>[^\|]*)\|(?<response_content_length>[^\|]*)\|(?<request_duration>[^\|]*)\|(?<request_user_agent>.+)$
        time_key timestamp
        time_format %Y-%m-%dT%H:%M:%S.%LZ
        types response_content_length:integer, request_content_length:integer, return_status_code:integer
      </parse>
    </source>
    <filter jfrog.rt.artifactory.request>
      @type record_transformer
      enable_ruby true
      <record>
        repo ${!record["request_url"].strip().start_with?("/api") ? (record["request_url"].strip().split('/')[1]) : ("")}
        artifact ${!record["request_url"].strip().start_with?("/api") ? (val = record["request_url"].strip().split('/'); val[val.length()-1]) : ("")}
        dockerRepo ${record["request_url"].strip().include?("/api/docker") && !record["request_url"].include?("/api/docker/null") && !record["request_url"].include?("/api/docker/v2") ? (record["request_url"].strip().split('/')[3]) : ("")}
        dockerImage ${record["request_url"].strip().include?("/api/docker") && !record["request_url"].include?("/api/docker/null") && !record["request_url"].include?("/api/docker/v2")  ? (record["request_url"].strip().split('/')[5]) : ("")}
        data_download ${record["response_content_length"] == -1 ? 0 : record["response_content_length"]}
        data_upload ${record["request_content_length"] == -1 ? 0 : record["request_content_length"]}
      </record>
    </filter>
    <source>
      @type tail
      @id frontend_request_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-request.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/frontend-request.log.pos"
      tag jfrog.rt.frontend.request
      <parse>
        @type regexp
        expression ^(?<timestamp>[^ ]*)\|(?<trace_id>[^\|]*)\|(?<remote_address>[^\|]*)\|(?<username>[^\|]*)\|(?<request_method>[^\|]*)\|(?<request_url>[^\|]*)\|(?<return_status>[^\|]*)\|(?<request_content_length>[^\|]*)\|(?<response_content_length>[^\|]*)\|(?<request_duration>[^\|]*)\|(?<request_user_agent>.+)$
        time_key timestamp
        time_format %Y-%m-%dT%H:%M:%S.%LZ
      </parse>
    </source>
    <source>
      @type tail
      @id metadata_request_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-request.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/metadata-request.log.pos"
      tag jfrog.rt.metadata.request
      <parse>
        @type regexp
        expression ^(?<timestamp>[^ ]*)\|(?<trace_id>[^\|]*)\|(?<remote_address>[^\|]*)\|(?<username>[^\|]*)\|(?<request_method>[^\|]*)\|(?<request_url>[^\|]*)\|(?<return_status>[^\|]*)\|(?<request_content_length>[^\|]*)\|(?<response_content_length>[^\|]*)\|(?<request_duration>[^\|]*)\|(?<request_user_agent>.+)$
        time_key timestamp
        time_format %Y-%m-%dT%H:%M:%S.%LZ
      </parse>
    </source>
    <source>
      @type tail
      @id router_request_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/router-request.log.pos"
      tag jfrog.rt.router.request
      <parse>
        @type json
        time_key time
        time_format %Y-%m-%dT%H:%M:%SZ
      </parse>
    </source>
    <source>
      @type tail
      @id artifactory_access_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-access.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/artifactory-access.log.pos"
      tag jfrog.rt.artifactory.access
      <parse>
        @type regexp
        expression /^(?<timestamp>[^ ]*) \[(?<trace_id>[^\]]*)\] \[(?<action_response>[^\]]*)\] (?<repo_path>.*) for client : (?<username>.+)/(?<ip>.+)$/
        time_key timestamp
        time_format %Y-%m-%dT%H:%M:%S.%LZ
      </parse>
    </source>
    <source>
      @type tail
      @id access_security_audit_tail
      path "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-security-audit.log"
      pos_file "#{ENV['JF_PRODUCT_DATA_INTERNAL']}/log/access-security-audit.log.pos"
      tag jfrog.rt.access.audit
      <parse>
        @type regexp
        expression /^(?<timestamp>[^ ]*)\|(?<token_id>[^ ]*)\|(?<user_ip>[^ ]*)\|(?<user>[^ ]*)\|(?<logged_principal>[^ ]*)\|(?<entity_name>[^ ]*)\|(?<event_type>[^ ]*)\|(?<event>[^ ]*)\|(?<data_changed>.*)/
        time_key timestamp
        time_format %Y-%m-%dT%H:%M:%S.%LZ
      </parse>
    </source>

    # WHAT LOG IT WAS INTO THE JSON
    <filter jfrog.**>
      @type record_transformer
      <record>
        log_source ${tag}
      </record>
    </filter>

    <filter jfrog.rt.artifactory.request>
      @type prometheus

      <metric>
        name jfrog_rt_data_download
        type gauge
        desc artifactory data download
        key data_download
        <labels>
          host ${hostname}
          remote_address ${remote_address}
          repo ${repo}
          response_content_length ${response_content_length}
          data_download ${data_download}
        </labels>
      </metric>

      <metric>
        name jfrog_rt_data_upload
        type gauge
        desc artifactory data upload
        key data_upload
        <labels>
          host ${hostname}
          remote_address ${remote_address}
          repo ${repo}
          request_content_length ${request_content_length}
          data_upload ${data_upload}
        </labels>
      </metric>

      <metric>
        name jfrog_rt_req
        type counter
        desc artifactory requests
        <labels>
          host ${hostname}
          request_url ${request_url}
          return_status ${return_status}
          repo ${repo}
          artifact ${artifact}
          dockerRepo ${dockerRepo}
          dockerImage ${dockerImage}
          remote_address ${remote_address}
        </labels>
      </metric>
    </filter>

    <filter jfrog.rt.artifactory.service>
    @type prometheus
      <metric>
        name jfrog_rt_log_level
        type counter
        desc artifactory log_levels
        <labels>
          host ${hostname}
          log_level ${log_level}
        </labels>
      </metric>
    </filter>

    <filter jfrog.rt.artifactory.access>
    @type prometheus

    <metric>
      name jfrog_rt_access
      type counter
      desc artifactory access
      <labels>
        host ${hostname}
        username ${username}
        action_response ${action_response}
        ip ${ip}
      </labels>
    </metric>

    </filter>

    <filter jfrog.rt.access.audit>
    @type prometheus

    <metric>
      name jfrog_rt_access_audit
      type counter
      desc artifactory access audit
      <labels>
        host ${hostname}
        user ${user}
        event_type ${event_type}
        event ${event}
      </labels>
    </metric>

    </filter>

然后执行如下的部署命令

cd helm

helm install local-repo artifactory-11.4.5.tgz --version 11.4.5 --namespace artifactory -f ./values.yaml \
    --set artifactory.joinKeySecretName=local-repo-joinkey-secret \
    --set artifactory.masterKeySecretName=local-repo-masterkey-secret

如果你想 debug,可以使用下面的方式

# for debug: 
# helm install local-repo artifactory-11.4.5.tgz --version 11.4.5 --namespace artifactory -f ./values.yaml \
#     --set artifactory.joinKeySecretName=local-repo-joinkey-secret \
#     --set artifactory.masterKeySecretName=local-repo-masterkey-secret --dry-run

删除部署

helm delete local-repo  -n artifactory