Outils pour utilisateurs

Outils du site


blog

Notes Kafka

Voir aussi :

Serveur

Source :

Voir :

Install Java

apt-get install default-jdk-headless

Télécharger Kafka https://kafka.apache.org/downloads

Apache Kafka dépend de Zookeeper pour la gestion des clusters. Par conséquent, avant de lancer Kafka, Zookeeper doit être démarré. Il n'est pas nécessaire d'installer explicitement Zookeeper, car il est fourni avec Apache Kafka.

Lancer Zookeeper

bin/zookeeper-server-start.sh config/zookeeper.properties

Lancer Kafka Server

bin/kafka-server-start.sh config/server.properties
Autres

Kafka/config/server.properties

delete.topic.enable = true
acks=all
min.insync.replica=2

Client

Note en vrac

EnableSslCertificateVerification = false
security.protocol=SASL_SSL
kafkacat -b localhost:9092 -L
kafkacat -b localhost:9092 -L -d broker
 
kafka-topics --list --zookeeper zkinfo
kafka-topics --zookeeper zk01.example.com:2181 --list
 
 
# kafka-consumer-groups --new-consumer --bootstrap-server broker01.example.com:9092 --describe --group flume
export KAFKA_HEAP_OPTS=-Xmx1024M
kafka-consumer-groups --bootstrap-server host:9092 --list
kafka-consumer-groups --bootstrap-server host:9092 --describe --group foo
 
kafka-consumer-groups --bootstrap-server host.server.com:9093 --describe --command-config client.properties

Dockerfile

FROM frolvlad/alpine-python2
COPY root/requirements.txt /
COPY root/opt /opt
 
#RUN apk add librdkafka
#RUN apk add gcc python2-dev libc-dev librdkafka-dev
 
RUN pip install --upgrade pip
RUN pip install --no-cache-dir -r /requirements.txt
WORKDIR /opt/app/
#CMD ["gunicorn", "-w 4", "main:app"]
CMD ["python", "/opt/app/run.py"]

requirements.txt

#librdkafka==0.11.6
simplejson
requests
six
#avro_json_serializer==0.5
avro_json_serializer
#avro==1.8.2
confluent_kafka
RedHat CentOS
# Pour librdkafka ?
yum install gcc python-devel
 
# Pour client Oracle
yum install libaio
 
# Kafka-cli
yum install java-latest-openjdk-headless
curl -O http://miroir.univ-lorraine.fr/apache/kafka/2.4.0/kafka_2.13-2.4.0.tgz
Docker

Docker CentOS

Dockerfile

#FROM centos:centos6.9
FROM centos:centos7
 
COPY root/requirements.txt /
COPY root/opt/app /opt/app
 
RUN yum install -y epel-release.noarch
RUN yum install -y python-pip
 
# Pour CentOS6
#RUN curl https://bootstrap.pypa.io/2.6/get-pip.py -o get-pip.py
#RUN python get-pip.py
#RUN pip install --upgrade --ignore-installed pip setuptools
 
#RUN apk add librdkafka
#RUN apk add gcc python2-dev libc-dev librdkafka-dev
 
RUN pip install --upgrade pip
RUN pip install --no-cache-dir -r /requirements.txt
WORKDIR /opt/app
#CMD ["gunicorn", "-w 4", "main:app"]
CMD ["python", "/opt/app/run.py"]

FIXME

2025/03/24 15:06

Notes k8s - logs install kubeadm init

root@vmdeb01:~# kubeadm init --config kubelet.yaml                                                        
W0912 12:20:41.002134    1433 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "ClusterConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml
 --new-config new.yaml', which will write the new, similar spec using a newer API version.                                                                                                                        
W0912 12:20:41.002461    1433 common.go:101] your configuration file uses a deprecated API spec: "kubeadm.k8s.io/v1beta3" (kind: "InitConfiguration"). Please use 'kubeadm config migrate --old-config old.yaml --
new-config new.yaml', which will write the new, similar spec using a newer API version.
[init] Using Kubernetes version: v1.31.0                                                                 
[preflight] Running pre-flight checks
[preflight] Pulling images required for setting up a Kubernetes cluster
[preflight] This might take a minute or two, depending on the speed of your internet connection
[preflight] You can also perform this action beforehand using 'kubeadm config images pull'
W0912 12:20:41.069179    1433 checks.go:846] detected that the sandbox image "registry.k8s.io/pause:3.6" of the container runtime is inconsistent with that used by kubeadm.It is recommended to use "registry.k8s
.io/pause:3.10" as the CRI sandbox image.
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local vmdeb01] and IPs [10.96.0.1 192.168.122.184]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key
[certs] Generating "etcd/server" certificate and key
[certs] etcd/server serving cert is signed for DNS names [localhost vmdeb01] and IPs [192.168.122.184 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key
[certs] etcd/peer serving cert is signed for DNS names [localhost vmdeb01] and IPs [192.168.122.184 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key
[certs] Generating "apiserver-etcd-client" certificate and key
[certs] Generating "sa" key and public key
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s
[kubelet-check] The kubelet is healthy after 501.985567ms
[api-check] Waiting for a healthy API server. This can take up to 4m0s
[certs] Using certificateDir folder "/etc/kubernetes/pki"
[certs] Generating "ca" certificate and key                                                                                                                                                                       
[certs] Generating "apiserver" certificate and key
[certs] apiserver serving cert is signed for DNS names [k8s-master kubernetes kubernetes.default kubernetes.default.svc kubernetes.default.svc.cluster.local vmdeb01] and IPs [10.96.0.1 192.168.122.184]
[certs] Generating "apiserver-kubelet-client" certificate and key
[certs] Generating "front-proxy-ca" certificate and key
[certs] Generating "front-proxy-client" certificate and key
[certs] Generating "etcd/ca" certificate and key                                                                                                                                                                  
[certs] Generating "etcd/server" certificate and key                             
[certs] etcd/server serving cert is signed for DNS names [localhost vmdeb01] and IPs [192.168.122.184 127.0.0.1 ::1]
[certs] Generating "etcd/peer" certificate and key                                                                                                                                                                
[certs] etcd/peer serving cert is signed for DNS names [localhost vmdeb01] and IPs [192.168.122.184 127.0.0.1 ::1]
[certs] Generating "etcd/healthcheck-client" certificate and key                                 
[certs] Generating "apiserver-etcd-client" certificate and key                     
[certs] Generating "sa" key and public key                                                               
[kubeconfig] Using kubeconfig folder "/etc/kubernetes"
[kubeconfig] Writing "admin.conf" kubeconfig file
[kubeconfig] Writing "super-admin.conf" kubeconfig file
[kubeconfig] Writing "kubelet.conf" kubeconfig file
[kubeconfig] Writing "controller-manager.conf" kubeconfig file
[kubeconfig] Writing "scheduler.conf" kubeconfig file
[etcd] Creating static Pod manifest for local etcd in "/etc/kubernetes/manifests"
[control-plane] Using manifest folder "/etc/kubernetes/manifests"
[control-plane] Creating static Pod manifest for "kube-apiserver"
[control-plane] Creating static Pod manifest for "kube-controller-manager"
[control-plane] Creating static Pod manifest for "kube-scheduler"
[kubelet-start] Writing kubelet environment file with flags to file "/var/lib/kubelet/kubeadm-flags.env"
[kubelet-start] Writing kubelet configuration to file "/var/lib/kubelet/config.yaml"
[kubelet-start] Starting the kubelet
[wait-control-plane] Waiting for the kubelet to boot up the control plane as static Pods from directory "/etc/kubernetes/manifests"
[kubelet-check] Waiting for a healthy kubelet at http://127.0.0.1:10248/healthz. This can take up to 4m0s 
[kubelet-check] The kubelet is healthy after 501.985567ms
[api-check] Waiting for a healthy API server. This can take up to 4m0s

[api-check] The API server is not healthy after 4m0.000872011s
[api-check] Waiting for a healthy API server. This can take up to 4m0s

[api-check] The API server is not healthy after 4m0.000872011s

Unfortunately, an error has occurred:
        context deadline exceeded

This error is likely caused by:
        - The kubelet is not running
        - The kubelet is unhealthy due to a misconfiguration of the node in some way (required cgroups disabled)

If you are on a systemd-powered system, you can try to troubleshoot the error with the following commands:
        - 'systemctl status kubelet'
        - 'journalctl -xeu kubelet'

Additionally, a control plane component may have crashed or exited when started by the container runtime. 
To troubleshoot, list all containers using your preferred container runtimes CLI.
Here is one example how you may list all running Kubernetes containers by using crictl:
        - 'crictl --runtime-endpoint unix:///var/run/containerd/containerd.sock ps -a | grep kube | grep -v pause'
        Once you have found the failing container, you can inspect its logs with:
        - 'crictl --runtime-endpoint unix:///var/run/containerd/containerd.sock logs CONTAINERID'
error execution phase wait-control-plane: could not initialize a Kubernetes cluster
To see the stack trace of this error execute with --v=5 or higher
2025/03/24 15:06

How to Install Kubernetes Cluster on Debian 11-12

1 Set Host Name and Update Hosts File

Run on master node

sudo hostnamectl hostname "k8s-master.local"      

/etc/hosts

192.168.1.23   k8s-master.local     k8s-master
192.168.1.24   k8s-worker01.local   k8s-worker01
192.168.1.25   k8s-worker02.local   k8s-worker02

2 Disable Swap on All Nodes

sudo swapoff -a
sudo sed -i '/ swap / s/^\(.*\)$/#\1/g' /etc/fstab
3 Add Firewall Rules for Kubernetes Cluster

Sur le master

sudo ufw allow 6443/tcp
sudo ufw allow 2379/tcp
sudo ufw allow 2380/tcp
sudo ufw allow 10250/tcp
sudo ufw allow 10251/tcp
sudo ufw allow 10252/tcp
sudo ufw allow 10255/tcp
sudo ufw reload

Sur les workers

sudo ufw allow 10250/tcp
sudo ufw allow 30000:32767/tcp
sudo ufw reload

4 Install de Containerd Run time sur tous les noeuds

cat <<EOF | sudo tee /etc/modules-load.d/containerd.conf 
overlay 
br_netfilter
EOF
 
sudo modprobe overlay 
sudo modprobe br_netfilter
 
cat <<EOF | sudo tee /etc/sysctl.d/99-kubernetes-k8s.conf
net.bridge.bridge-nf-call-iptables = 1
net.ipv4.ip_forward = 1 
net.bridge.bridge-nf-call-ip6tables = 1 
EOF
 
sudo sysctl --system
sudo apt update
sudo apt -y install containerd
containerd config default | sudo tee /etc/containerd/config.toml >/dev/null
 
vim /etc/containerd/config.toml

/etc/containerd/config.toml

          [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.runc.options]
            BinaryName = ""
            CriuImagePath = ""
            CriuPath = ""
            CriuWorkPath = ""
            IoGid = 0
            IoUid = 0
            NoNewKeyring = false 
            NoPivotRoot = false
            Root = ""
            ShimCgroup = ""
            SystemdCgroup = true	# change from 'false' to 'true'
sudo systemctl restart containerd
sudo systemctl enable containerd

5 Add Kubernetes Apt Repository

In Debian 12/11, Kubernetes related packages are not available in the default package repositories. We have to add additional Kubernetes apt repository on all the nodes, run

echo "deb [signed-by=/etc/apt/keyrings/kubernetes-apt-keyring.gpg] https://pkgs.k8s.io/core:/stable:/v1.31/deb/ /" | sudo tee /etc/apt/sources.list.d/kubernetes.list
curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.31/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg

6 Install Kubernetes Tools

Next, install the Kubernetes tools, including kubeadm, kubelet, and kubectl on all the nodes.

sudo apt update
sudo apt install kubelet kubeadm kubectl -y
sudo apt-mark hold kubelet kubeadm kubectl

7 Install Kubernetes Cluster with Kubeadm

kubelet doesn’t appreciate the command-line options anymore (these are deprecated). Instead, I suggest to create a configuration file, say ‘kubelet.yaml’ with following content.

kubelet.yaml

apiVersion: kubeadm.k8s.io/v1beta3
kind: InitConfiguration
---
apiVersion: kubeadm.k8s.io/v1beta3
kind: ClusterConfiguration
kubernetesVersion: "1.28.0" 		# Replace with your desired version
controlPlaneEndpoint: "k8s-master"	# Replace by controller node
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration

Now, we are all set to initialize Kubernetes cluster, run following command only from master node,

sudo kubeadm init --config kubelet.yaml
mkdir -p $HOME/.kube
sudo cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
sudo chown $(id -u):$(id -g) $HOME/.kube/config
kubectl get nodes
kubectl cluster-info
sudo kubeadm join vmdeb01:6443 --token 6jse32.ca55n57wj1wq65xc --discovery-token-ca-cert-hash sha256:a05d9c4b67cbe61de75af6ac0e7d87d632f578348239edd03d76e961bf7a679a
kubectl get nodes

8 Setup Pod Network Using Calico

kubectl apply -f https://raw.githubusercontent.com/projectcalico/calico/refs/heads/release-v3.28/manifests/calico.yaml

Allow Calico ports in OS firewall, run beneath ufw commands on all the nodes,

sudo ufw allow 179/tcp
sudo ufw allow 4789/udp
sudo ufw allow 51820/udp
sudo ufw allow 51821/udp
sudo ufw reload
watch kubectl get pods -n kube-system

9 Test Kubernetes Cluster Installation

In order validate and test Kubernetes cluster installation, let’s try to deploy nginx based application via deployment. Run beneath commands,

kubectl create deployment nginx-app --image=nginx --replicas 2
kubectl expose deployment nginx-app --name=nginx-web-svc --type NodePort --port 80 --target-port 80
kubectl describe svc nginx-web-svc

test

curl http://k8s-worker01:32283

Uninstall

Changes made to this host by 'kubeadm init' or 'kubeadm join' will be reverted

kubeadm reset
 
rm -f /etc/cni/net.d/*
rm ~/.kube/ -rf
 
ip link del cni0
ip link del flannel.1
systemctl restart network

Autres

Voir :

Initialisation du Master

kubeadm init
kubeadm init --control-plane-endpoint=192.168.100.12:6443 --apiserver-advertise-address=192.168.100.12  # --pod-network-cidr=10.32.0.0/12
# kubeadm token create --print-join-command

Note :

  • --pod-network-cidr Weave ignore cette option (surcharge)

Mise en place du réseau pour la communication entre les Pods

kubectl apply -f https://github.com/weaveworks/weave/releases/download/v2.8.1/weave-daemonset-k8s.yaml
2025/03/24 15:06

Notes JSON et YAML

jc JSON Convert JSONifies the output of many CLI tools and file-types
jq Command-line JSON processor
json_pp JSON Pretty Printer
JMESPath JMESPath is a query language for JSON (utilisé par json_query d'Ansible
jp The jp command is a command line interface to JMESPath

Voir :

  • JSONPath et go_template
  • JSON RFC 6902 patch

Voir Ansible :

Voir aussi :

sudo apt-get install jc
dig www.google.com | jc --dig -p
[
  {
    "id": 17598,
    "opcode": "QUERY",
    "status": "NOERROR",
    "flags": [
      "qr",
      "rd",
      "ra"
    ],
    "query_num": 1,
    "answer_num": 1,
    "authority_num": 0,
    "additional_num": 1,
    "opt_pseudosection": {
      "edns": {
        "version": 0,
        "flags": [],
        "udp": 4096
      }
    },
    "question": {
      "name": "www.google.com.",
      "class": "IN",
      "type": "A"
    },
    "answer": [
      {
        "name": "www.google.com.",
        "class": "IN",
        "type": "A",
        "ttl": 38,
        "data": "142.250.178.132"
      }
    ],
    "query_time": 8,
    "server": "208.67.222.123#53(208.67.222.123) (UDP)",
    "when": "Sun Oct 16 21:54:04 CEST 2022",
    "rcvd": 59,
    "when_epoch": 1665950044,
    "when_epoch_utc": null
  }
]
cat fic.json |python3 -m json.tool |native2ascii -encoding UTF-8 -reverse

Extension Firefox http://jsonview.com/

sudo apt-get install yajl-tools

yajl-tools

  • json_reformat
  • json_verify
mongoimport --host localhost --db database --collection collection <fichier.json --jsonArray

Convert JSON to YAML

yq -P sample.json

Ou encore

python -c 'import sys, yaml, json; yaml.safe_dump(json.load(sys.stdin), sys.stdout, default_flow_style=False)' < /opt/stack/keystone/etc/policy.v3cloudsample.json > /etc/keystone/policy.yaml
pip install jsbeautifier
js-beautify file.js

Dico vers JSON ?

cat plop.t |tr \' \" |sed -e 's/None/""/g' |jq .

VIM - Formater le JSON

:%!python -m json.tool

Requête (Query)

Avec curl

curl -k https://aap-controller.local/api/v2/users/27/roles/ -X POST -u user:password --data-raw '{"id":534}'

Avec jq voir : https://hyperpolyglot.org/json

Top level sections

cat single-instance.json |jq 'keys[]'

Exemple wildcard

 
$ curl -s -u "${AAP_USER}:${AAP_PASS}" "https://aap.acme.local/api/v2/job_templates/" | jq '.results[0].id'
1680

$ curl -s -u "${AAP_USER}:${AAP_PASS}" "https://aap.acme.local/api/v2/job_templates/" | jq '.results[].id' | tail -3
1572
1569
2343

jq retours sans guillemet (quote) :

jq -r
docker inspect portainer/portainer-ce |jq '.[0].RepoTags'
ansible -i testsrv, -m setup all |sed -e 's/^.*=>//' |jq -r '.ansible_facts.ansible_mounts[] |.mount'
Filtres

Conversion string integer avec tonumber

Exemple

jq '.blockdevices[].size | tonumber'

Autres

Ansible - Utilisation de boolean en extra vars via JSON
ansible-playbook playbook.yml -i inventory.ini -e '{ "remove_vhost": false }'

YAML

yq portable command-line YAML processor
yb YAML Parser in bash

Voir :

Contrôle syntaxe :

Ce que jq est à json, yq l'est pour le yaml.

yq permet également de convertir des fichiers JSON en YAML

cat .kube/config |egrep -v "\-data"  |yq .
# pip install --user yq
sudo apt-get install yq
openstack image show IMAGE1 -c properties -f yaml |yq '.properties.direct_url'

Filtrer sur les clefs (keys)

cat meta/runtime.yml | yq '.action_groups | keys'
# Ou
cat meta/runtime.yml | yq -r '.action_groups | keys[]'

Mofifier un fichier YAML

avec yq :

En Python et autres :

Avec Ansible : https://github.com/kwoodson/ansible-role-yedit

Exemple non-specific tag

configuration: !include config.d/*.yaml

Reférences

vars:
  webapp:
      version: &my_version 1.0
      custom_name:
          - "ToDo_App"
          - *my_version
Exemple Python
import yaml
 
with open("filter.yml", "r") as yamlfile:
    #filters = yaml.load(yamlfile, Loader=yaml.FullLoader)
 
    filters = yaml.safe_load(yamlfile)

Échapper certains caractères

Passer un fichier YAML à une API REST qui fonctionne en JSON
cat inv.yaml |sed -e 's/$/\\n/' -e 's/"/\\"/g' |tr -d '\n'
Exemple YAML vers JSON

inv.yaml

---
server1:
  hosts:
    server1.inf.acme.lan:
  vars:
    SET_LINGERING_LIST:
      - SET_LINGERING_USER: testplop
        SET_LINGERING_ENABLED: "false"
      - SET_LINGERING_USER: testplop
        SET_LINGERING_ENABLED: "true"

type_host:
  hosts:
    server1.inf.acme.lan: {}
  vars:
    type: host
#! /bin/bash
set -euo pipefail
 
YAML="$(cat inv.yaml |sed -e 's/$/\\n/' -e 's/"/\\"/g' |tr -d '\n')"
 
curl -v -k -u user1:'P@ssw0rd' -H 'Content-Type: application/json' -X POST https://awx.acme.fr/api/v2/job_templates/81/launch/ -d '{
"extra_vars": {
"foo1": "bar1",
"foo2": "bar2",
"inventory_content": "'"${YAML}"'"
 
}
}'

Ou avec un heredoc

YAML="$(cat <<'EOF' | sed -e 's/$/\\n/' -e 's/"/\\"/g' |tr -d '\n'
---
server1:
  hosts:
    server1.inf.acme.lan:
  vars:
    SET_LINGERING_LIST:
      - SET_LINGERING_USER: testplop
        SET_LINGERING_ENABLED: "false"
      - SET_LINGERING_USER: testplop
        SET_LINGERING_ENABLED: "true"
 
type_host:
  hosts:
    server1.inf.acme.lan: {}
  vars:
    type: host
EOF
)"
En convertissant le YAML en JSON

En passant un JSON dans un JSON en string

#JS="$(ansible-inventory -i inv.yaml --list)"
JS="$(cat inv.js)"
 
JS="$(echo $JS |sed 's/"/\\"/g')"
URL=''
 
curl -v -k -u user1:'P@ssw0rd' -H 'Content-Type: application/json' -X POST https://awx.acme.fr/api/v2/job_templates/81/launch/ -d '{
"extra_vars": {
"foo1": "bar1",
"foo2": "bar2",
"inventory_content": "'"${JS}"'"
 
}
}'
Exemple de yaml
CRON_HOST_TARGETS:
  all:
    - CRON_FILENAME: crontab_app1
      CRON_USER: app1
      CRONS:
        - DESCRIPTION: "topmem.sh exécuté toutes les heures"
	        SCHED: "0 * * * *"
	        CMDLINE: "~/scripts/topmem.sh >> ~app1/logs/topmem.out 2>&1"

  sc01:
    - CRON_FILENAME: crontab_app1_sc01
      CRON_USER: app1
      CRONS:
        - DESCRIPTION: "archivage de certains fichiers applicatifs"
	        SCHED: "06 03 * * *"
	        CMDLINE: ". ~/scripts/archive_plopsc.sh -l${LINE} >> ~app1/logs/archive_sc_${LINE}.out 2>&1"

        - DESCRIPTION: Performances
	        SCHED: "05 03 * * *"
	        CMDLINE: ". ~/scripts/appexec.sh appenv.sh ${LINE} > /dev/null; ~/scripts/stat_report_run.sh ${LINE} > /dev/null"

Pb

Compile Error - Échapper certains caractères

plop.yml

rec:plop:
  stage: plop
  variables:
    ENV: "RECETTE"

prd:plop:
  stage: plop
  variables:
    ENV: "PRODUCTION"
$ cat plop.yml |yq '.rec:plop'
jq: error: syntax error, unexpected ':', expecting $end (Unix shell quoting issues?) at <top-level>, line 1:
.rec:plop    
jq: 1 compile error
Solution
cat plop.yml |yq '.["rec:plop"]'
2025/03/24 15:06
blog.txt · Dernière modification : de 127.0.0.1

Donate Powered by PHP Valid HTML5 Valid CSS Driven by DokuWiki