kind: ConfigMap apiVersion: v1 metadata: name: ko-data namespace: openshift-operators data: knative-serving-v0.12.1.yaml: | --- apiVersion: v1 kind: Namespace metadata: name: knative-serving labels: istio-injection: enabled serving.knative.dev/release: devel --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: knative-serving-addressable-resolver labels: serving.knative.dev/release: devel duck.knative.dev/addressable: "true" rules: - apiGroups: - serving.knative.dev resources: - routes - routes/status - services - services/status verbs: - get - list - watch --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: knative-serving-istio labels: serving.knative.dev/release: devel serving.knative.dev/controller: "true" networking.knative.dev/ingress-provider: istio rules: - apiGroups: ["networking.istio.io"] resources: ["virtualservices", "gateways"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: custom-metrics-server-resources labels: serving.knative.dev/release: devel autoscaling.knative.dev/metric-provider: custom-metrics rules: - apiGroups: ["custom.metrics.k8s.io"] resources: ["*"] verbs: ["*"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: knative-serving-namespaced-admin labels: rbac.authorization.k8s.io/aggregate-to-admin: "true" serving.knative.dev/release: devel rules: - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] resources: ["*"] verbs: ["*"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: knative-serving-namespaced-edit labels: rbac.authorization.k8s.io/aggregate-to-edit: "true" serving.knative.dev/release: devel rules: - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] resources: ["*"] verbs: ["create", "update", "patch", "delete"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: knative-serving-namespaced-view labels: rbac.authorization.k8s.io/aggregate-to-view: "true" serving.knative.dev/release: devel rules: - apiGroups: ["serving.knative.dev", "networking.internal.knative.dev", "autoscaling.internal.knative.dev", "caching.internal.knative.dev"] resources: ["*"] verbs: ["get", "list", "watch"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: knative-serving-admin labels: serving.knative.dev/release: devel aggregationRule: clusterRoleSelectors: - matchLabels: serving.knative.dev/controller: "true" rules: [] # Rules are automatically filled in by the controller manager. --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: knative-serving-core labels: serving.knative.dev/release: devel serving.knative.dev/controller: "true" rules: - apiGroups: [""] resources: ["pods", "namespaces", "secrets", "configmaps", "endpoints", "services", "events", "serviceaccounts"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - apiGroups: [""] resources: ["endpoints/restricted"] # Permission for RestrictedEndpointsAdmission verbs: ["create"] - apiGroups: ["apps"] resources: ["deployments", "deployments/finalizers"] # finalizers are needed for the owner reference of the webhook verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - apiGroups: ["admissionregistration.k8s.io"] resources: ["mutatingwebhookconfigurations", "validatingwebhookconfigurations"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - apiGroups: ["apiextensions.k8s.io"] resources: ["customresourcedefinitions"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - apiGroups: ["autoscaling"] resources: ["horizontalpodautoscalers"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] - apiGroups: ["serving.knative.dev", "autoscaling.internal.knative.dev", "networking.internal.knative.dev"] resources: ["*", "*/status", "*/finalizers"] verbs: ["get", "list", "create", "update", "delete", "deletecollection", "patch", "watch"] - apiGroups: ["caching.internal.knative.dev"] resources: ["images"] verbs: ["get", "list", "create", "update", "delete", "patch", "watch"] --- kind: ClusterRole apiVersion: rbac.authorization.k8s.io/v1 metadata: name: knative-serving-podspecable-binding labels: serving.knative.dev/release: devel duck.knative.dev/podspecable: "true" rules: - apiGroups: - serving.knative.dev resources: - configurations - services verbs: - list - watch - patch --- apiVersion: v1 kind: ServiceAccount metadata: name: controller namespace: knative-serving labels: serving.knative.dev/release: devel --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: custom-metrics:system:auth-delegator labels: serving.knative.dev/release: devel autoscaling.knative.dev/metric-provider: custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: system:auth-delegator subjects: - kind: ServiceAccount name: controller namespace: knative-serving --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: hpa-controller-custom-metrics labels: serving.knative.dev/release: devel autoscaling.knative.dev/metric-provider: custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: custom-metrics-server-resources subjects: - kind: ServiceAccount name: horizontal-pod-autoscaler namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: knative-serving-controller-admin labels: serving.knative.dev/release: devel subjects: - kind: ServiceAccount name: controller namespace: knative-serving roleRef: kind: ClusterRole name: knative-serving-admin apiGroup: rbac.authorization.k8s.io --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: custom-metrics-auth-reader namespace: kube-system labels: serving.knative.dev/release: devel autoscaling.knative.dev/metric-provider: custom-metrics roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: extension-apiserver-authentication-reader subjects: - kind: ServiceAccount name: controller namespace: knative-serving --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: knative-ingress-gateway namespace: knative-serving labels: serving.knative.dev/release: devel networking.knative.dev/ingress-provider: istio spec: selector: istio: ingressgateway servers: - port: number: 80 name: http protocol: HTTP hosts: - "*" --- apiVersion: networking.istio.io/v1alpha3 kind: Gateway metadata: name: cluster-local-gateway namespace: knative-serving labels: serving.knative.dev/release: devel networking.knative.dev/ingress-provider: istio spec: selector: istio: cluster-local-gateway servers: - port: number: 80 name: http protocol: HTTP hosts: - "*" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: certificates.networking.internal.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" spec: group: networking.internal.knative.dev version: v1alpha1 names: kind: Certificate plural: certificates singular: certificate categories: - knative-internal - networking shortNames: - kcert scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: Ready type: string JSONPath: ".status.conditions[?(@.type==\"Ready\")].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type==\"Ready\")].reason" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: configurations.serving.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" duck.knative.dev/podspecable: "true" spec: group: serving.knative.dev versions: - name: v1alpha1 served: true storage: true - name: v1beta1 served: true storage: false - name: v1 served: true storage: false names: kind: Configuration plural: configurations singular: configuration categories: - all - knative - serving shortNames: - config - cfg scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: LatestCreated type: string JSONPath: .status.latestCreatedRevisionName - name: LatestReady type: string JSONPath: .status.latestReadyRevisionName - name: Ready type: string JSONPath: ".status.conditions[?(@.type=='Ready')].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type=='Ready')].reason" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: images.caching.internal.knative.dev labels: knative.dev/crd-install: "true" spec: group: caching.internal.knative.dev version: v1alpha1 names: kind: Image plural: images singular: image categories: - knative-internal - caching shortNames: - img scope: Namespaced subresources: status: {} --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: ingresses.networking.internal.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" spec: group: networking.internal.knative.dev versions: - name: v1alpha1 served: true storage: true names: kind: Ingress plural: ingresses singular: ingress categories: - knative-internal - networking shortNames: - kingress scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: Ready type: string JSONPath: ".status.conditions[?(@.type=='Ready')].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type=='Ready')].reason" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: metrics.autoscaling.internal.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" spec: group: autoscaling.internal.knative.dev version: v1alpha1 names: kind: Metric plural: metrics singular: metric categories: - knative-internal - autoscaling scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: Ready type: string JSONPath: ".status.conditions[?(@.type=='Ready')].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type=='Ready')].reason" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: podautoscalers.autoscaling.internal.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" spec: group: autoscaling.internal.knative.dev versions: - name: v1alpha1 served: true storage: true names: kind: PodAutoscaler plural: podautoscalers singular: podautoscaler categories: - knative-internal - autoscaling shortNames: - kpa - pa scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: DesiredScale type: integer JSONPath: ".status.desiredScale" - name: ActualScale type: integer JSONPath: ".status.actualScale" - name: Ready type: string JSONPath: ".status.conditions[?(@.type=='Ready')].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type=='Ready')].reason" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: revisions.serving.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" spec: group: serving.knative.dev versions: - name: v1alpha1 served: true storage: true - name: v1beta1 served: true storage: false - name: v1 served: true storage: false names: kind: Revision plural: revisions singular: revision categories: - all - knative - serving shortNames: - rev scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: Config Name type: string JSONPath: ".metadata.labels['serving\\.knative\\.dev/configuration']" - name: K8s Service Name type: string JSONPath: ".status.serviceName" - name: Generation type: string # int in string form :( JSONPath: ".metadata.labels['serving\\.knative\\.dev/configurationGeneration']" - name: Ready type: string JSONPath: ".status.conditions[?(@.type=='Ready')].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type=='Ready')].reason" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: routes.serving.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" duck.knative.dev/addressable: "true" spec: group: serving.knative.dev versions: - name: v1alpha1 served: true storage: true - name: v1beta1 served: true storage: false - name: v1 served: true storage: false names: kind: Route plural: routes singular: route categories: - all - knative - serving shortNames: - rt scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: URL type: string JSONPath: .status.url - name: Ready type: string JSONPath: ".status.conditions[?(@.type=='Ready')].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type=='Ready')].reason" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: services.serving.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" duck.knative.dev/addressable: "true" duck.knative.dev/podspecable: "true" spec: group: serving.knative.dev versions: - name: v1alpha1 served: true storage: true - name: v1beta1 served: true storage: false - name: v1 served: true storage: false names: kind: Service plural: services singular: service categories: - all - knative - serving shortNames: - kservice - ksvc scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: URL type: string JSONPath: .status.url - name: LatestCreated type: string JSONPath: .status.latestCreatedRevisionName - name: LatestReady type: string JSONPath: .status.latestReadyRevisionName - name: Ready type: string JSONPath: ".status.conditions[?(@.type=='Ready')].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type=='Ready')].reason" --- apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: name: serverlessservices.networking.internal.knative.dev labels: serving.knative.dev/release: devel knative.dev/crd-install: "true" spec: group: networking.internal.knative.dev versions: - name: v1alpha1 served: true storage: true names: kind: ServerlessService plural: serverlessservices singular: serverlessservice categories: - knative-internal - networking shortNames: - sks scope: Namespaced subresources: status: {} additionalPrinterColumns: - name: Mode type: string JSONPath: ".spec.mode" - name: ServiceName type: string JSONPath: ".status.serviceName" - name: PrivateServiceName type: string JSONPath: ".status.privateServiceName" - name: Ready type: string JSONPath: ".status.conditions[?(@.type=='Ready')].status" - name: Reason type: string JSONPath: ".status.conditions[?(@.type=='Ready')].reason" --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: name: config.webhook.serving.knative.dev labels: serving.knative.dev/release: devel webhooks: - admissionReviewVersions: - v1beta1 clientConfig: service: name: webhook namespace: knative-serving failurePolicy: Fail sideEffects: None name: config.webhook.serving.knative.dev namespaceSelector: matchExpressions: - key: serving.knative.dev/release operator: Exists --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: MutatingWebhookConfiguration metadata: name: webhook.serving.knative.dev labels: serving.knative.dev/release: devel webhooks: - admissionReviewVersions: - v1beta1 clientConfig: service: name: webhook namespace: knative-serving failurePolicy: Fail sideEffects: None name: webhook.serving.knative.dev --- apiVersion: admissionregistration.k8s.io/v1beta1 kind: ValidatingWebhookConfiguration metadata: name: validation.webhook.serving.knative.dev labels: serving.knative.dev/release: devel webhooks: - admissionReviewVersions: - v1beta1 clientConfig: service: name: webhook namespace: knative-serving failurePolicy: Fail sideEffects: None name: validation.webhook.serving.knative.dev --- apiVersion: v1 kind: Secret metadata: name: webhook-certs namespace: knative-serving labels: serving.knative.dev/release: devel --- apiVersion: caching.internal.knative.dev/v1alpha1 kind: Image metadata: name: queue-proxy namespace: knative-serving labels: serving.knative.dev/release: devel spec: image: quay.io/openshift-knative/knative-serving-queue:v0.12.1 --- apiVersion: autoscaling/v2beta1 kind: HorizontalPodAutoscaler metadata: name: activator namespace: knative-serving labels: serving.knative.dev/release: devel spec: minReplicas: 1 maxReplicas: 20 scaleTargetRef: apiVersion: apps/v1 kind: Deployment name: activator metrics: - type: Resource resource: name: cpu targetAverageUtilization: 100 --- apiVersion: apps/v1 kind: Deployment metadata: name: activator namespace: knative-serving labels: serving.knative.dev/release: devel spec: selector: matchLabels: app: activator role: activator template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "false" labels: app: activator role: activator serving.knative.dev/release: devel spec: serviceAccountName: controller containers: - name: activator image: quay.io/openshift-knative/knative-serving-activator:v0.12.1 resources: requests: cpu: 300m memory: 60Mi limits: cpu: 1000m memory: 600Mi env: - name: GOGC value: "500" - name: POD_NAME valueFrom: fieldRef: fieldPath: metadata.name - name: POD_IP valueFrom: fieldRef: fieldPath: status.podIP - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CONFIG_LOGGING_NAME value: config-logging - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN value: knative.dev/internal/serving securityContext: allowPrivilegeEscalation: false ports: - name: metrics containerPort: 9090 - name: profiling containerPort: 8008 - name: http1 containerPort: 8012 - name: h2c containerPort: 8013 readinessProbe: &probe httpGet: port: 8012 httpHeaders: - name: k-kubelet-probe value: "activator" livenessProbe: *probe terminationGracePeriodSeconds: 300 --- apiVersion: v1 kind: Service metadata: name: activator-service namespace: knative-serving labels: app: activator serving.knative.dev/release: devel spec: selector: app: activator ports: - name: http-metrics port: 9090 targetPort: 9090 - name: http-profiling port: 8008 targetPort: 8008 - name: http port: 80 targetPort: 8012 - name: http2 port: 81 targetPort: 8013 type: ClusterIP --- apiVersion: apps/v1 kind: Deployment metadata: name: autoscaler-hpa namespace: knative-serving labels: serving.knative.dev/release: devel autoscaling.knative.dev/autoscaler-provider: hpa spec: selector: matchLabels: app: autoscaler-hpa template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" labels: app: autoscaler-hpa serving.knative.dev/release: devel spec: serviceAccountName: controller containers: - name: autoscaler-hpa image: quay.io/openshift-knative/knative-serving-autoscaler-hpa:v0.12.1 resources: requests: cpu: 30m memory: 40Mi limits: cpu: 300m memory: 400Mi env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CONFIG_LOGGING_NAME value: config-logging - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving securityContext: allowPrivilegeEscalation: false ports: - name: metrics containerPort: 9090 - name: profiling containerPort: 8008 --- apiVersion: v1 kind: Service metadata: labels: app: autoscaler-hpa serving.knative.dev/release: devel autoscaling.knative.dev/autoscaler-provider: hpa name: autoscaler-hpa namespace: knative-serving spec: ports: - name: http-metrics port: 9090 targetPort: 9090 - name: http-profiling port: 8008 targetPort: 8008 selector: app: autoscaler-hpa --- apiVersion: apps/v1 kind: Deployment metadata: name: autoscaler namespace: knative-serving labels: serving.knative.dev/release: devel spec: replicas: 1 selector: matchLabels: app: autoscaler template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "false" labels: app: autoscaler serving.knative.dev/release: devel spec: serviceAccountName: controller containers: - name: autoscaler image: quay.io/openshift-knative/knative-serving-autoscaler:v0.12.1 resources: requests: cpu: 30m memory: 40Mi limits: cpu: 300m memory: 400Mi env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CONFIG_LOGGING_NAME value: config-logging - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving securityContext: allowPrivilegeEscalation: false ports: - name: metrics containerPort: 9090 - name: profiling containerPort: 8008 - name: websocket containerPort: 8080 - name: custom-metrics containerPort: 8443 readinessProbe: &probe httpGet: port: 8080 httpHeaders: - name: k-kubelet-probe value: "autoscaler" livenessProbe: *probe args: - "--secure-port=8443" - "--cert-dir=/tmp" --- apiVersion: v1 kind: Service metadata: labels: app: autoscaler serving.knative.dev/release: devel name: autoscaler namespace: knative-serving spec: ports: - name: http-metrics port: 9090 targetPort: 9090 - name: http-profiling port: 8008 targetPort: 8008 - name: http port: 8080 targetPort: 8080 - name: https-custom-metrics port: 443 targetPort: 8443 selector: app: autoscaler --- apiVersion: v1 kind: ConfigMap metadata: name: config-autoscaler namespace: knative-serving labels: serving.knative.dev/release: devel data: _example: | container-concurrency-target-percentage: "70" container-concurrency-target-default: "100" requests-per-second-target-default: "200" target-burst-capacity: "200" stable-window: "60s" panic-window-percentage: "10.0" panic-threshold-percentage: "200.0" max-scale-up-rate: "1000.0" max-scale-down-rate: "2.0" enable-scale-to-zero: "true" tick-interval: "2s" scale-to-zero-grace-period: "30s" enable-graceful-scaledown: "false" --- apiVersion: v1 kind: ConfigMap metadata: name: config-defaults namespace: knative-serving labels: serving.knative.dev/release: devel data: _example: | revision-timeout-seconds: "300" # 5 minutes max-revision-timeout-seconds: "600" # 10 minutes revision-cpu-request: "400m" # 0.4 of a CPU (aka 400 milli-CPU) revision-memory-request: "100M" # 100 megabytes of memory revision-cpu-limit: "1000m" # 1 CPU (aka 1000 milli-CPU) revision-memory-limit: "200M" # 200 megabytes of memory container-name-template: "user-container" container-concurrency: "0" --- apiVersion: v1 kind: ConfigMap metadata: name: config-deployment namespace: knative-serving labels: serving.knative.dev/release: devel data: queueSidecarImage: quay.io/openshift-knative/knative-serving-queue:v0.12.1 _example: | registriesSkippingTagResolving: "ko.local,dev.local" --- apiVersion: v1 kind: ConfigMap metadata: name: config-domain namespace: knative-serving labels: serving.knative.dev/release: devel data: _example: | example.com: | example.org: | selector: app: nonprofit svc.cluster.local: | selector: app: secret --- apiVersion: v1 kind: ConfigMap metadata: name: config-gc namespace: knative-serving labels: serving.knative.dev/release: devel data: _example: | stale-revision-create-delay: "48h" stale-revision-timeout: "15h" stale-revision-minimum-generations: "20" stale-revision-lastpinned-debounce: "5h" --- apiVersion: v1 kind: ConfigMap metadata: name: config-istio namespace: knative-serving labels: serving.knative.dev/release: devel networking.knative.dev/ingress-provider: istio data: _example: | gateway.knative-serving.knative-ingress-gateway: "istio-ingressgateway.istio-system.svc.cluster.local" local-gateway.knative-serving.cluster-local-gateway: "cluster-local-gateway.istio-system.svc.cluster.local" local-gateway.mesh: "mesh" --- apiVersion: v1 kind: ConfigMap metadata: name: config-logging namespace: knative-serving labels: serving.knative.dev/release: devel data: _example: | zap-logger-config: | { "level": "info", "development": false, "outputPaths": ["stdout"], "errorOutputPaths": ["stderr"], "encoding": "json", "encoderConfig": { "timeKey": "ts", "levelKey": "level", "nameKey": "logger", "callerKey": "caller", "messageKey": "msg", "stacktraceKey": "stacktrace", "lineEnding": "", "levelEncoder": "", "timeEncoder": "iso8601", "durationEncoder": "", "callerEncoder": "" } } loglevel.controller: "info" loglevel.autoscaler: "info" loglevel.queueproxy: "info" loglevel.webhook: "info" loglevel.activator: "info" --- apiVersion: v1 kind: ConfigMap metadata: name: config-network namespace: knative-serving labels: serving.knative.dev/release: devel data: _example: | istio.sidecar.includeOutboundIPRanges: "*" ingress.class: "istio.ingress.networking.knative.dev" certificate.class: "cert-manager.certificate.networking.internal.knative.dev" domainTemplate: "{{.Name}}.{{.Namespace}}.{{.Domain}}" tagTemplate: "{{.Tag}}-{{.Name}}" autoTLS: "Disabled" httpProtocol: "Enabled" --- apiVersion: v1 kind: ConfigMap metadata: name: config-observability namespace: knative-serving labels: serving.knative.dev/release: devel data: _example: | logging.enable-var-log-collection: "false" logging.revision-url-template: | http://localhost:8001/api/v1/namespaces/knative-monitoring/services/kibana-logging/proxy/app/kibana#/discover?_a=(query:(match:(kubernetes.labels.serving-knative-dev%2FrevisionUID:(query:'${REVISION_UID}',type:phrase)))) logging.request-log-template: '{"httpRequest": {"requestMethod": "{{.Request.Method}}", "requestUrl": "{{js .Request.RequestURI}}", "requestSize": "{{.Request.ContentLength}}", "status": {{.Response.Code}}, "responseSize": "{{.Response.Size}}", "userAgent": "{{js .Request.UserAgent}}", "remoteIp": "{{js .Request.RemoteAddr}}", "serverIp": "{{.Revision.PodIP}}", "referer": "{{js .Request.Referer}}", "latency": "{{.Response.Latency}}s", "protocol": "{{.Request.Proto}}"}, "traceId": "{{index .Request.Header "X-B3-Traceid"}}"}' logging.enable-probe-request-log: "false" metrics.backend-destination: prometheus metrics.request-metrics-backend-destination: prometheus metrics.stackdriver-project-id: "" metrics.allow-stackdriver-custom-metrics: "false" profiling.enable: "false" --- apiVersion: v1 kind: ConfigMap metadata: name: config-tracing namespace: knative-serving labels: serving.knative.dev/release: devel data: _example: | backend: "none" zipkin-endpoint: "http://zipkin.istio-system.svc.cluster.local:9411/api/v2/spans" stackdriver-project-id: "my-project" debug: "false" sample-rate: "0.1" --- apiVersion: apps/v1 kind: Deployment metadata: name: controller namespace: knative-serving labels: serving.knative.dev/release: devel spec: selector: matchLabels: app: controller template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" labels: app: controller serving.knative.dev/release: devel spec: serviceAccountName: controller containers: - name: controller image: quay.io/openshift-knative/knative-serving-controller:v0.12.1 resources: requests: cpu: 100m memory: 100Mi limits: cpu: 1000m memory: 1000Mi env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CONFIG_LOGGING_NAME value: config-logging - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN value: knative.dev/internal/serving securityContext: allowPrivilegeEscalation: false ports: - name: metrics containerPort: 9090 - name: profiling containerPort: 8008 --- apiVersion: v1 kind: Service metadata: labels: app: controller serving.knative.dev/release: devel name: controller namespace: knative-serving spec: ports: - name: http-metrics port: 9090 targetPort: 9090 - name: http-profiling port: 8008 targetPort: 8008 selector: app: controller --- apiVersion: apiregistration.k8s.io/v1beta1 kind: APIService metadata: name: v1beta1.custom.metrics.k8s.io labels: serving.knative.dev/release: devel autoscaling.knative.dev/metric-provider: custom-metrics spec: service: name: autoscaler namespace: knative-serving group: custom.metrics.k8s.io version: v1beta1 insecureSkipTLSVerify: true groupPriorityMinimum: 100 versionPriority: 100 --- apiVersion: apps/v1 kind: Deployment metadata: name: networking-istio namespace: knative-serving labels: serving.knative.dev/release: devel networking.knative.dev/ingress-provider: istio spec: selector: matchLabels: app: networking-istio template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "true" sidecar.istio.io/inject: "false" labels: app: networking-istio serving.knative.dev/release: devel spec: serviceAccountName: controller containers: - name: networking-istio image: quay.io/openshift-knative/knative-serving-istio:v0.12.1 resources: requests: cpu: 30m memory: 40Mi limits: cpu: 300m memory: 400Mi env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CONFIG_LOGGING_NAME value: config-logging - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving securityContext: allowPrivilegeEscalation: false ports: - name: metrics containerPort: 9090 - name: profiling containerPort: 8008 --- apiVersion: apps/v1 kind: Deployment metadata: name: webhook namespace: knative-serving labels: serving.knative.dev/release: devel spec: selector: matchLabels: app: webhook role: webhook template: metadata: annotations: cluster-autoscaler.kubernetes.io/safe-to-evict: "false" labels: app: webhook role: webhook serving.knative.dev/release: devel spec: serviceAccountName: controller containers: - name: webhook image: quay.io/openshift-knative/knative-serving-webhook:v0.12.1 resources: requests: cpu: 20m memory: 20Mi limits: cpu: 200m memory: 2Gi env: - name: SYSTEM_NAMESPACE valueFrom: fieldRef: fieldPath: metadata.namespace - name: CONFIG_LOGGING_NAME value: config-logging - name: CONFIG_OBSERVABILITY_NAME value: config-observability - name: METRICS_DOMAIN value: knative.dev/serving securityContext: allowPrivilegeEscalation: false ports: - name: metrics containerPort: 9090 - name: profiling containerPort: 8008 --- apiVersion: v1 kind: Service metadata: labels: role: webhook serving.knative.dev/release: devel name: webhook namespace: knative-serving spec: ports: - name: http-metrics port: 9090 targetPort: 9090 - name: http-profiling port: 8008 targetPort: 8008 - name: https-webhook port: 443 targetPort: 8443 selector: role: webhook