Compare commits

...

72 Commits

Author SHA1 Message Date
b86f643f82 fix other runners 2025-08-26 14:07:34 +02:00
598a53ad10 typo 2025-08-26 14:03:45 +02:00
403f632493 test 2025-08-26 14:02:59 +02:00
9cbb65d19d test 2025-08-26 14:01:01 +02:00
46492f751e add to build context 2025-08-26 13:58:28 +02:00
bee14d3b79 test 2025-08-26 13:57:11 +02:00
23949c175f oeps 2025-08-26 13:54:56 +02:00
c715a61237 test3 2025-08-26 13:54:08 +02:00
06bddf89e2 test2 2025-08-26 13:51:06 +02:00
9d73a18f38 test 2025-08-26 13:50:05 +02:00
1e88e10d87 add workflows 2025-08-26 13:46:15 +02:00
62e1cabf1a clean gitignore 2025-08-26 13:43:59 +02:00
root
660fa62df7 push gitignore 2024-10-28 11:05:33 +01:00
root
0ae6edebc1 push gitignore
Some checks failed
Build / Build and analyze (push) Failing after 1m15s
2024-10-28 10:59:16 +01:00
root
59c0e8b5cb updated parakamiko to be above 3.4.1
Some checks failed
Build / Build and analyze (push) Failing after 1m17s
2024-09-20 10:03:01 +02:00
root
d24d0bae09 add dhcp servers
All checks were successful
Build / Build and analyze (push) Successful in 15s
2024-09-17 13:31:48 +02:00
root
fee1736436 add dhcp servers
All checks were successful
Build / Build and analyze (push) Successful in 13s
2024-09-17 13:29:16 +02:00
root
3a1993db91 add dhcp servers
All checks were successful
Build / Build and analyze (push) Successful in 12s
2024-09-17 13:24:30 +02:00
root
9d8a658a57 add dhcp servers
All checks were successful
Build / Build and analyze (push) Successful in 14s
2024-09-17 13:22:08 +02:00
root
8adf4af10b add dhcp servers
All checks were successful
Build / Build and analyze (push) Successful in 13s
2024-09-17 13:16:54 +02:00
root
3988abbb98 add dhcp servers
All checks were successful
Build / Build and analyze (push) Successful in 15s
2024-09-17 13:07:50 +02:00
root
36a2d0d72d add dhcp servers
Some checks failed
Build / Build and analyze (push) Failing after 7s
2024-09-17 13:06:24 +02:00
root
8f1e82e2bf add dhcp servers
Some checks failed
Build / Build and analyze (push) Failing after 10s
2024-09-17 13:06:10 +02:00
root
68b7ec2b08 add dhcp servers
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 11s
2024-09-17 13:01:33 +02:00
root
656bdcedae add dhcp servers
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 26s
2024-09-17 12:58:00 +02:00
root
e5a4fc8cf9 add dhcp servers
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 3s
2024-09-17 12:56:48 +02:00
root
9941d188a8 add dhcp servers
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 6s
2024-09-17 12:54:45 +02:00
root
08bb85eb62 add dhcp servers
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 9s
2024-09-17 11:23:31 +02:00
root
27c462ca68 add dhcp servers
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 7s
2024-09-17 11:20:14 +02:00
root
7c064e5d26 add dhcp servers
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 3s
2024-09-17 11:19:17 +02:00
root
90e3a91982 runs on docker
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 7s
2024-09-17 09:07:35 +02:00
root
23bd13f34a runs on docker
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 54s
2024-09-17 09:06:20 +02:00
root
cef367ddd4 runs on docker
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 3s
2024-09-17 08:32:46 +02:00
root
7d2df517f1 runs on docker
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 3s
2024-09-17 08:26:22 +02:00
root
f46256aebb runs on docker
Some checks failed
SonarQube Scan / SonarQube Trigger (push) Failing after 9s
2024-09-17 08:23:54 +02:00
root
21511536b9 disable domain join
Some checks are pending
SonarQube Scan / SonarQube Trigger (push) Waiting to run
2024-09-17 08:21:14 +02:00
root
f74df47fa7 disable domain join
Some checks are pending
SonarQube Scan / SonarQube Trigger (push) Waiting to run
2024-09-17 08:19:55 +02:00
4963812f67 add sonarqube
Some checks are pending
SonarQube Scan / SonarQube Trigger (push) Waiting to run
2024-09-16 16:22:57 +00:00
root
caf2583c56 add playbook import hosts from list
All checks were successful
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 19s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Successful in 14m3s
2024-08-27 16:57:13 +02:00
root
89ee036c84 update
All checks were successful
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 6s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Successful in 17m2s
2024-07-18 14:00:14 +02:00
root
15fe8d4862 update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 8s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 10s
2024-07-18 13:59:23 +02:00
root
ccd7795a02 update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 7s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 17s
2024-07-18 13:49:03 +02:00
root
d30037eeb1 update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 8s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 19s
2024-07-18 13:39:01 +02:00
root
934107b63a update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 5s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 9s
2024-07-18 13:32:49 +02:00
root
268df6a503 update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 9s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 17s
2024-07-18 13:32:03 +02:00
root
78f4027b6a update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 7s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 12s
2024-07-18 13:03:33 +02:00
root
a793dc9fce update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 18s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 2m51s
2024-07-18 12:58:18 +02:00
root
9144f1d47c update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 5s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 1m50s
2024-07-18 11:33:42 +02:00
root
ff386c2fd5 update
Some checks failed
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 6s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 13s
2024-07-18 10:26:28 +02:00
root
4f941b9bac update
Some checks failed
Gitea Actions Rocky / Bootstrap-runner (push) Successful in 18s
Gitea Actions Rocky / Create-dockerfile-with-ansible-builder (push) Failing after 4m16s
2024-07-18 10:23:52 +02:00
root
cf5a997306 update
All checks were successful
Gitea Actions Centos / Bootstrap-runner (push) Successful in 20s
Gitea Actions Centos / Create-dockerfile-with-ansible-builder (push) Successful in 3m16s
2024-07-18 10:23:29 +02:00
root
dfe10a61b2 update
Some checks failed
Gitea Actions Centos / Bootstrap-runner (push) Successful in 5s
Gitea Actions Centos / Create-dockerfile-with-ansible-builder (push) Failing after 5s
Gitea Actions Rocky / Bootstrap-runner (push) Successful in 4s
Gitea Actions Rocky / Create-dockerfile-with-ansible-builder (push) Failing after 4s
2024-07-18 10:21:49 +02:00
root
8214838cff update
Some checks failed
Gitea Actions Centos / Bootstrap-runner (push) Successful in 4s
Gitea Actions Centos / Create-dockerfile-with-ansible-builder (push) Failing after 5s
Gitea Actions Minimal EE / Bootstrap-runner (push) Successful in 5s
Gitea Actions Minimal EE / Create-dockerfile-with-ansible-builder (push) Failing after 8s
Gitea Actions Rocky / Bootstrap-runner (push) Successful in 5s
Gitea Actions Rocky / Create-dockerfile-with-ansible-builder (push) Failing after 5s
2024-07-18 10:18:31 +02:00
root
d8816a34ad update 2024-07-18 10:18:21 +02:00
root
3abb661859 update 2024-07-18 10:17:39 +02:00
root
87fe6949ba create new pipeline and build file for minimal EE
Some checks failed
Gitea Actions Minimal / Bootstrap-runner (push) Successful in 5s
Gitea Actions Minimal / Create-dockerfile-with-ansible-builder (push) Failing after 13s
2024-07-18 10:17:06 +02:00
root
cd5469b638 create new pipeline and build file for minimal EE
Some checks failed
Gitea Actions Minimal / Bootstrap-runner (push) Successful in 5s
Gitea Actions Minimal / Create-dockerfile-with-ansible-builder (push) Failing after 5s
2024-07-18 10:16:31 +02:00
root
646985f3df create new pipeline and build file for minimal EE
Some checks failed
Gitea Actions Minimal / Bootstrap-runner (push) Successful in 5s
Gitea Actions Minimal / Create-dockerfile-with-ansible-builder (push) Failing after 5s
2024-07-18 10:15:58 +02:00
root
0610517bcb create new pipeline and build file for minimal EE
Some checks failed
Gitea Actions Minimal / Bootstrap-runner (push) Successful in 4s
Gitea Actions Minimal / Create-dockerfile-with-ansible-builder (push) Failing after 5s
2024-07-18 10:14:36 +02:00
root
6c279fd0de create new pipeline and build file for minimal EE
All checks were successful
Gitea Actions Demo / Bootstrap-runner (push) Successful in 7s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Successful in 11s
2024-07-18 10:10:43 +02:00
root
f16698b4f0 create new pipeline and build file for minimal EE
All checks were successful
Gitea Actions Demo / Bootstrap-runner (push) Successful in 8s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Successful in 14m34s
2024-07-18 09:38:35 +02:00
root
aa396cbc66 change pipeline
All checks were successful
Gitea Actions Demo / Bootstrap-runner (push) Successful in 4s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Successful in 10m44s
2024-07-17 09:54:53 +02:00
root
321ade631e change pipeline
All checks were successful
Gitea Actions Demo / Bootstrap-runner (push) Successful in 6s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Successful in 8s
2024-07-17 09:53:43 +02:00
root
75fe2e8245 change pipeline
Some checks failed
Gitea Actions Demo / Bootstrap-runner (push) Successful in 10s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Failing after 4s
2024-07-17 09:52:47 +02:00
root
155922961c change pipeline
Some checks failed
Gitea Actions Demo / Bootstrap-runner (push) Failing after 1s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Failing after 3s
2024-07-17 09:51:41 +02:00
root
0b917c5488 change pipeline 2024-07-17 09:49:03 +02:00
root
013acdf22b change the runners so they don't run on every push 2024-07-16 10:50:00 +02:00
root
d17dd56416 rename default ultimate ee
Some checks failed
Gitea Actions Demo / Bootstrap-runner (push) Failing after 1s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Failing after 3s
2024-07-16 10:11:19 +02:00
root
776885226c add rocky linux
All checks were successful
Gitea Actions Demo / Bootstrap-runner (push) Successful in 5s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Successful in 10m11s
2024-07-15 18:23:41 +02:00
root
6c6daa6857 add rocky
Some checks failed
Gitea Actions Demo / Bootstrap-runner (push) Successful in 4s
Gitea Actions Demo / Create-dockerfile-with-ansible-builder (push) Has been cancelled
2024-07-15 18:22:05 +02:00
root
3c6cc47022 add rocky environment 2024-07-15 18:20:58 +02:00
root
c769ca108c add rocky linux 2024-07-15 18:20:38 +02:00
21 changed files with 267 additions and 1194 deletions

View File

@@ -1,15 +1,21 @@
name: Gitea Actions Demo
name: Gitea Actions Centos
run-name: ${{ gitea.actor }} is testing out Gitea Actions 🚀
on: [push]
on:
push:
paths:
- 'execution-environment.yml'
- '.gitea/workflows/gitea-actions-demo.yaml'
jobs:
Bootstrap-runner:
runs-on: docker
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ gitea.event_name }} event."
- run: apk add --no-cache nodejs
- run: python -m ensurepip --upgrade
- run: python -m pip install ansible-builder
- run: apk add --no-cache nodejs python3 docker
- run: python3 -m venv .venv && . .venv/bin/activate
- run: . .venv/bin/activate && python -m ensurepip --upgrade
- run: . .venv/bin/activate && python -m pip install ansible-builder
- name: Check out repository code
uses: actions/checkout@v4
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
@@ -26,6 +32,14 @@ jobs:
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}
- run: echo "build context"
- run: python3 -m venv .venv && . .venv/bin/activate
- run: . .venv/bin/activate && python -m ensurepip --upgrade
- run: . .venv/bin/activate && python -m pip install ansible-builder
- run: . .venv/bin/activate && ansible-builder create -f execution-environment.yml -c context
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}/context
- name: Login to the Container registry
uses: docker/login-action@v2
with:
@@ -36,6 +50,6 @@ jobs:
uses: https://github.com/docker/build-push-action@v5
with:
context: context
file: context/Containerfile
file: context/Dockerfile
push: true
tags: git.hyperon.be/bram/ultimate-ee
tags: git.hyperon.be/bram/ultimate-ee-centos9

View File

@@ -0,0 +1,54 @@
name: Gitea Actions Minimal EE
run-name: ${{ gitea.actor }} is testing out Gitea Actions 🚀
on:
push:
paths:
- 'minimal-environment.yml'
- '.gitea/workflows/gitea-actions-minimal.yaml'
jobs:
Bootstrap-runner:
runs-on: docker
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ gitea.event_name }} event."
- run: apk add --no-cache nodejs python3 docker
- run: python3 -m venv .venv && . .venv/bin/activate
- run: . .venv/bin/activate && python -m ensurepip --upgrade
- run: . .venv/bin/activate && python -m pip install ansible-builder
- name: Check out repository code
uses: actions/checkout@v4
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}
- run: echo "🍏 This job's status is ${{ job.status }}."
Create-dockerfile-with-ansible-builder:
runs-on: docker
steps:
- name: Check out repository code
uses: actions/checkout@v4
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}
- run: echo "build context"
- run: python3 -m venv .venv && . .venv/bin/activate
- run: . .venv/bin/activate && python -m ensurepip --upgrade
- run: . .venv/bin/activate && python -m pip install ansible-builder
- run: . .venv/bin/activate && ansible-builder create -f minimal-environment.yml -c context-minimal
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}/context-minimal
- name: Login to the Container registry
uses: docker/login-action@v2
with:
registry: git.hyperon.be
username: bramvandendaele1@gmail.com
password: ${{ secrets.CI_TOKEN }}
- name: build and push docker image
uses: https://github.com/docker/build-push-action@v5
with:
context: context-minimal
file: context-minimal/Dockerfile
push: true
tags: git.hyperon.be/bram/ultimate-ee-minimal

View File

@@ -0,0 +1,54 @@
name: Gitea Actions Rocky
run-name: ${{ gitea.actor }} is testing out Gitea Actions 🚀
on:
push:
paths:
- 'rocky-environment.yml'
- '.gitea/workflows/gitea-actions-rocky.yaml'
jobs:
Bootstrap-runner:
runs-on: docker
steps:
- run: echo "🎉 The job was automatically triggered by a ${{ gitea.event_name }} event."
- run: apk add --no-cache nodejs python3 docker
- run: python3 -m venv .venv && . .venv/bin/activate
- run: . .venv/bin/activate && python -m ensurepip --upgrade
- run: . .venv/bin/activate && python -m pip install ansible-builder
- name: Check out repository code
uses: actions/checkout@v4
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}
- run: echo "🍏 This job's status is ${{ job.status }}."
Create-dockerfile-with-ansible-builder:
runs-on: docker
steps:
- name: Check out repository code
uses: actions/checkout@v4
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}
- run: echo "build context"
- run: python3 -m venv .venv && . .venv/bin/activate
- run: . .venv/bin/activate && python -m ensurepip --upgrade
- run: . .venv/bin/activate && python -m pip install ansible-builder
- run: . .venv/bin/activate && ansible-builder create -f rocky-environment.yml -c context-rocky
- name: List files in the repository
run: |
ls ${{ gitea.workspace }}/context-rocky
- name: Login to the Container registry
uses: docker/login-action@v2
with:
registry: git.hyperon.be
username: bramvandendaele1@gmail.com
password: ${{ secrets.CI_TOKEN }}
- name: build and push docker image
uses: https://github.com/docker/build-push-action@v5
with:
context: context-rocky
file: context-rocky/Dockerfile
push: true
tags: git.hyperon.be/bram/ultimate-ee-rocky

View File

@@ -0,0 +1,27 @@
name: Build
on:
push:
branches:
- main
jobs:
build:
name: Build and analyze
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0 # Shallow clones should be disabled for a better relevancy of analysis
- uses: sonarsource/sonarqube-scan-action@master
env:
SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}
SONAR_HOST_URL: ${{ secrets.SONAR_HOST_URL }}
# If you wish to fail your job when the Quality Gate is red, uncomment the
# following lines. This would typically be used to fail a deployment.
# - uses: sonarsource/sonarqube-quality-gate-action@master
# timeout-minutes: 5
# env:
# SONAR_TOKEN: ${{ secrets.SONAR_TOKEN }}

1
.gitignore vendored Normal file
View File

@@ -0,0 +1 @@

View File

@@ -1 +1,4 @@
test
tst
TESTJE
PASSWORD =TEST

View File

@@ -1,97 +0,0 @@
ARG EE_BASE_IMAGE="quay.io/centos/centos:stream9"
ARG PYCMD="/usr/bin/python3"
ARG PKGMGR_PRESERVE_CACHE=""
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS="--pre"
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS=""
ARG ANSIBLE_INSTALL_REFS="ansible-core>=2.15.0rc2,<2.16 ansible-runner"
ARG PKGMGR="/usr/bin/dnf"
# Base build stage
FROM $EE_BASE_IMAGE as base
USER root
ARG EE_BASE_IMAGE
ARG PYCMD
ARG PKGMGR_PRESERVE_CACHE
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS
ARG ANSIBLE_INSTALL_REFS
ARG PKGMGR
RUN /usr/bin/python3 -m ensurepip
RUN /usr/bin/python3 -m pip install --upgrade pip
RUN $PYCMD -m ensurepip
RUN $PYCMD -m pip install --no-cache-dir $ANSIBLE_INSTALL_REFS
COPY _build/scripts/ /output/scripts/
COPY _build/scripts/entrypoint /opt/builder/bin/entrypoint
RUN $PYCMD -m pip install -U pip
# Galaxy build stage
FROM base as galaxy
ARG EE_BASE_IMAGE
ARG PYCMD
ARG PKGMGR_PRESERVE_CACHE
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS
ARG ANSIBLE_INSTALL_REFS
ARG PKGMGR
RUN /usr/bin/python3 -m pip install --upgrade pip cmake
COPY _build/configs/ansible.cfg /etc/ansible/ansible.cfg
RUN /output/scripts/check_galaxy
COPY _build /build
WORKDIR /build
RUN ansible-galaxy role install $ANSIBLE_GALAXY_CLI_ROLE_OPTS -r requirements.yml --roles-path "/usr/share/ansible/roles"
RUN ANSIBLE_GALAXY_DISABLE_GPG_VERIFY=1 ansible-galaxy collection install $ANSIBLE_GALAXY_CLI_COLLECTION_OPTS -r requirements.yml --collections-path "/usr/share/ansible/collections"
# Builder build stage
FROM base as builder
WORKDIR /build
ARG EE_BASE_IMAGE
ARG PYCMD
ARG PKGMGR_PRESERVE_CACHE
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS
ARG ANSIBLE_INSTALL_REFS
ARG PKGMGR
RUN $PYCMD -m pip install --no-cache-dir bindep pyyaml requirements-parser
COPY --from=galaxy /usr/share/ansible /usr/share/ansible
COPY _build/requirements.txt requirements.txt
COPY _build/bindep.txt bindep.txt
RUN $PYCMD /output/scripts/introspect.py introspect --sanitize --user-pip=requirements.txt --user-bindep=bindep.txt --write-bindep=/tmp/src/bindep.txt --write-pip=/tmp/src/requirements.txt
RUN /output/scripts/assemble
# Final build stage
FROM base as final
ARG EE_BASE_IMAGE
ARG PYCMD
ARG PKGMGR_PRESERVE_CACHE
ARG ANSIBLE_GALAXY_CLI_COLLECTION_OPTS
ARG ANSIBLE_GALAXY_CLI_ROLE_OPTS
ARG ANSIBLE_INSTALL_REFS
ARG PKGMGR
RUN whoami
RUN cat /etc/os-release
RUN /output/scripts/check_ansible $PYCMD
COPY --from=galaxy /usr/share/ansible /usr/share/ansible
COPY --from=builder /output/ /output/
RUN /output/scripts/install-from-bindep && rm -rf /output/wheels
RUN chmod ug+rw /etc/passwd
RUN mkdir -p /runner && chgrp 0 /runner && chmod -R ug+rwx /runner
WORKDIR /runner
RUN $PYCMD -m pip install --no-cache-dir 'dumb-init==1.2.5'
RUN pip3 install --upgrade azure-identity azure-cli-core paramiko
COPY --from=quay.io/ansible/receptor:devel /usr/bin/receptor /usr/bin/receptor
RUN mkdir -p /var/run/receptor
RUN git lfs install --system
RUN rm -rf /output
LABEL ansible-execution-environment=true
USER 1000
ENTRYPOINT ["/opt/builder/bin/entrypoint", "dumb-init"]
CMD ["bash"]

View File

@@ -1,28 +0,0 @@
subversion [platform:rpm]
wget [platform:rpm]
unzip [platform:rpm]
gcc [platform:rpm]
python3-devel [platform:rpm]
cmake [platform:rpm]
gcc-c++ [platform:rpm]
make [platform:rpm]
openssl-devel [platform:rpm]
git-core [platform:rpm]
python3.9-devel [platform:rpm compile]
libcurl-devel [platform:rpm compile]
krb5-devel [platform:rpm compile]
krb5-workstation [platform:rpm]
subversion [platform:rpm]
subversion [platform:dpkg]
git-lfs [platform:rpm]
sshpass [platform:rpm]
rsync [platform:rpm]
epel-release [platform:rpm]
python-unversioned-command [platform:rpm]
unzip [platform:rpm]
podman-remote [platform:rpm]
cmake [platform:rpm compile]
gcc [platform:rpm compile]
gcc-c++ [platform:rpm compile]
make [platform:rpm compile]
openssl-devel [platform:rpm compile]

View File

@@ -1,10 +0,0 @@
[galaxy]
server_list = galaxy
[galaxy_server.galaxy]
url=https://galaxy.ansible.com/
[defaults]
NETWORK_GROUP_MODULES=arubaoss
host_key_checking = false

View File

@@ -1,51 +0,0 @@
ncclient==0.6.9
scp==0.13.3
textfsm==1.1.0
ipaddr==2.2.0
#az-cli
git+https://github.com/ansible/ansible-sign
ncclient
paramiko
pykerberos
pyOpenSSL
pypsrp[kerberos,credssp]
pywinrm[kerberos,credssp]
toml
pexpect>=4.5
python-daemon
pyyaml
six
receptorctl
#azure
packaging
requests[security]
xmltodict
msgraph-sdk==1.0.0
azure-cli-core==2.61.0
azure-common==1.1.11
azure-identity==1.16.1
azure-mgmt-authorization==2.0.0
azure-mgmt-apimanagement==3.0.0
azure-mgmt-batch==16.2.0
azure-mgmt-cdn==11.0.0
azure-mgmt-compute==30.6.0
azure-mgmt-containerinstance==9.0.0
azure-mgmt-core==1.4.0
azure-mgmt-containerregistry==9.1.0
azure-containerregistry==1.1.0
azure-mgmt-containerservice==20.0.0
azure-mgmt-datafactory==2.0.0
azure-mgmt-dns==8.0.0
azure-mgmt-marketplaceordering==1.1.0
azure-mgmt-monitor==3.0.0
azure-mgmt-managedservices==6.0.0
azure-mgmt-managementgroups==1.0.0
azure-mgmt-network==19.1.0
azure-mgmt-nspkg==2.0.0
azure-mgmt-privatedns==1.0.0
azure-mgmt-redis==13.0.0
azure-mgmt-resource==21.1.0
azure-mgmt-rdbms==10.2.0b12
azure-mgmt-search==8.0.0

View File

@@ -1,14 +0,0 @@
---
collections:
- azure.azcollection
- ansible.windows
- community.windows
- community.general
- tribe29.checkmk
- ansible.posix
- awx.awx
- cisco.ios
- microsoft.ad
- arubanetworks.aos_switch
- ansible.netcommon
- community.docker

View File

@@ -1,171 +0,0 @@
#!/bin/bash
# Copyright (c) 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Make a list of bindep dependencies and a collection of built binary
# wheels for the repo in question as well as its python dependencies.
# Install javascript tools as well to support python that needs javascript
# at build time.
set -ex
RELEASE=$(source /etc/os-release; echo $ID)
# NOTE(pabelanger): Allow users to force either microdnf or dnf as a package
# manager.
PKGMGR="${PKGMGR:-}"
PKGMGR_OPTS="${PKGMGR_OPTS:-}"
PKGMGR_PRESERVE_CACHE="${PKGMGR_PRESERVE_CACHE:-}"
PYCMD="${PYCMD:=/usr/bin/python3}"
PIPCMD="${PIPCMD:=$PYCMD -m pip}"
$PYCMD -m ensurepip
if [ -z $PKGMGR ]; then
# Expect dnf to be installed, however if we find microdnf default to it.
PKGMGR=/usr/bin/dnf
if [ -f "/usr/bin/microdnf" ]; then
PKGMGR=/usr/bin/microdnf
fi
fi
if [ "$PKGMGR" = "/usr/bin/microdnf" ]
then
if [ -z $PKGMGR_OPTS ]; then
# NOTE(pabelanger): skip install docs and weak dependencies to
# make smaller images. Sadly, setting these in dnf.conf don't
# appear to work.
PKGMGR_OPTS="--nodocs --setopt install_weak_deps=0"
fi
fi
# NOTE(pabelanger): Ensure all the directory we use exists regardless
# of the user first creating them or not.
mkdir -p /output/bindep
mkdir -p /output/wheels
mkdir -p /tmp/src
cd /tmp/src
function install_bindep {
# Protect from the bindep builder image use of the assemble script
# to produce a wheel. Note we append because we want all
# sibling packages in here too
if [ -f bindep.txt ] ; then
bindep -l newline | sort >> /output/bindep/run.txt || true
if [ "$RELEASE" == "centos" ] ; then
bindep -l newline -b epel | sort >> /output/bindep/stage.txt || true
grep -Fxvf /output/bindep/run.txt /output/bindep/stage.txt >> /output/bindep/epel.txt || true
rm -rf /output/bindep/stage.txt
fi
compile_packages=$(bindep -b compile || true)
if [ ! -z "$compile_packages" ] ; then
$PKGMGR install -y $PKGMGR_OPTS ${compile_packages}
fi
fi
}
function install_wheels {
# NOTE(pabelanger): If there are build requirements to install, do so.
# However do not cache them as we do not want them in the final image.
if [ -f /tmp/src/build-requirements.txt ] && [ ! -f /tmp/src/.build-requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --no-cache -r /tmp/src/build-requirements.txt
touch /tmp/src/.build-requirements.txt
fi
# Build a wheel so that we have an install target.
# pip install . in the container context with the mounted
# source dir gets ... exciting, if setup.py exists.
# We run sdist first to trigger code generation steps such
# as are found in zuul, since the sequencing otherwise
# happens in a way that makes wheel content copying unhappy.
# pip wheel isn't used here because it puts all of the output
# in the output dir and not the wheel cache, so it's not
# possible to tell what is the wheel for the project and
# what is the wheel cache.
if [ -f setup.py ] ; then
$PYCMD setup.py sdist bdist_wheel -d /output/wheels
fi
# Install everything so that the wheel cache is populated with
# transitive depends. If a requirements.txt file exists, install
# it directly so that people can use git url syntax to do things
# like pick up patched but unreleased versions of dependencies.
# Only do this for the main package (i.e. only write requirements
# once).
if [ -f /tmp/src/requirements.txt ] && [ ! -f /output/requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /tmp/src/requirements.txt
cp /tmp/src/requirements.txt /output/requirements.txt
fi
# If we didn't build wheels, we can skip trying to install it.
if [ $(ls -1 /output/wheels/*whl 2>/dev/null | wc -l) -gt 0 ]; then
$PIPCMD uninstall -y /output/wheels/*.whl
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels /output/wheels/*whl
fi
}
PACKAGES=$*
PIP_OPTS="${PIP_OPTS-}"
# bindep the main package
install_bindep
# go through ZUUL_SIBLINGS, if any, and build those wheels too
for sibling in ${ZUUL_SIBLINGS:-}; do
pushd .zuul-siblings/${sibling}
install_bindep
popd
done
# Use a clean virtualenv for install steps to prevent things from the
# current environment making us not build a wheel.
# NOTE(pabelanger): We allow users to install distro python packages of
# libraries. This is important for projects that eventually want to produce
# an RPM or offline install.
$PYCMD -m venv /tmp/venv --system-site-packages --without-pip
source /tmp/venv/bin/activate
# If there is an upper-constraints.txt file in the source tree,
# use it in the pip commands.
if [ -f /tmp/src/upper-constraints.txt ] ; then
cp /tmp/src/upper-constraints.txt /output/upper-constraints.txt
CONSTRAINTS="-c /tmp/src/upper-constraints.txt"
fi
# If we got a list of packages, install them, otherwise install the
# main package.
if [[ $PACKAGES ]] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels $PACKAGES
for package in $PACKAGES ; do
echo "$package" >> /output/packages.txt
done
else
install_wheels
fi
# go through ZUUL_SIBLINGS, if any, and build those wheels too
for sibling in ${ZUUL_SIBLINGS:-}; do
pushd .zuul-siblings/${sibling}
install_wheels
popd
done
if [ -z $PKGMGR_PRESERVE_CACHE ]; then
$PKGMGR clean all
rm -rf /var/cache/{dnf,yum}
fi
rm -rf /var/lib/dnf/history.*
rm -rf /var/log/{dnf.*,hawkey.log}
rm -rf /tmp/venv

View File

@@ -1,110 +0,0 @@
#!/bin/bash
# Copyright (c) 2023 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
# Script to validate that Ansible and Ansible Runner are installed.
#
# Usage: check_ansible <PYCMD>
#
# Options:
# PYCMD - The path to the python executable to use.
#####################################################################
set -x
PYCMD=$1
if [ -z "$PYCMD" ]
then
echo "Usage: check_ansible <PYCMD>"
exit 1
fi
if [ ! -x "$PYCMD" ]
then
echo "$PYCMD is not an executable"
exit 1
fi
ansible --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible installation
An Ansible installation cannot be found in the final builder image.
Ansible must be installed in the final image. If you are using a
recent enough version of the execution environment file, you may
use the 'dependencies.ansible_core' configuration option to install
Ansible for you, or use 'additional_build_steps' to manually do
this yourself. Alternatively, use a base image with Ansible already
installed.
**********************************************************************
EOF
exit 1
fi
ansible-runner --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible Runner installation
An Ansible Runner installation cannot be found in the final builder
image.
Ansible Runner must be installed in the final image. If you are
using a recent enough version of the execution environment file, you
may use the 'dependencies.ansible_runner' configuration option to
install Ansible Runner for you, or use 'additional_build_steps' to
manually do this yourself. Alternatively, use a base image with
Ansible Runner already installed.
**********************************************************************
EOF
exit 1
fi
$PYCMD -c 'import ansible ; import ansible_runner'
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible or Ansible Runner for selected Python
An Ansible and/or Ansible Runner installation cannot be found in
the final builder image using the following Python interpreter:
$PYCMD
Ansible and Ansible Runner must be installed in the final image and
available to the selected Python interpreter. If you are using a
recent enough version of the execution environment file, you may use
the 'dependencies.ansible_core' configuration option to install
Ansible and the 'dependencies.ansible_runner' configuration option
to install Ansible Runner. You can also use 'additional_build_steps'
to manually do this yourself. Alternatively, use a base image with
Ansible and Ansible Runner already installed.
**********************************************************************
EOF
exit 1
fi
exit 0

View File

@@ -1,46 +0,0 @@
#!/bin/bash
# Copyright (c) 2023 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#####################################################################
# Script to validate that Ansible Galaxy is installed on the system.
#####################################################################
set -x
ansible-galaxy --version
if [ $? -ne 0 ]
then
cat<<EOF
**********************************************************************
ERROR - Missing Ansible installation
The 'ansible-galaxy' command is not found in the base image. This
image is used to create the intermediary image that performs the
Galaxy collection and role installation process.
Ansible must be installed in the base image. If you are using a
recent enough version of the execution environment file, you may
use the 'dependencies.ansible_core' configuration option to install
Ansible for you, or use 'additional_build_steps' to manually do
this yourself. Alternatively, use a base image with Ansible already
installed.
**********************************************************************
EOF
exit 1
fi
exit 0

View File

@@ -1,152 +0,0 @@
#!/usr/bin/env bash
# Copyright: (c) 2023, Ansible Project
# Apache License, Version 2.0 (see LICENSE.md or https://www.apache.org/licenses/LICENSE-2.0)
# This entrypoint script papers over a number of problems that manifest under different container runtimes when
# using ephemeral UIDs, then chain-execs to the requested init system and/or command. It is an implementation
# detail for the convenience of Ansible execution environments built by ansible-builder.
#
# If we're running as a legit user that has an entry in /etc/passwd and a valid and writeable homedir, we're all good.
#
# If the current uid is not in /etc/passwd, we'll attempt to add it, but /etc/passwd is often not writable by GID 0.
# `ansible-builder` defaults to making /etc/passwd writable by GID0 by default for maximum compatibility, but this is
# not guaranteed. Some runtimes/wrappers (eg podman, cri-o) already create an /etc/passwd entry on the fly as-needed,
# but they may set the homedir to something inaccessible (eg, `/`, WORKDIR).
#
# There are numerous cases where a missing or incorrect homedir in /etc/passwd are fatal. It breaks
# `async` in ansible-core, things like `echo ~someuid`, and numerous other software packages that assume a valid POSIX
# user configuration.
#
# If the homedir listed in /etc/passwd is not writeable by the current user (supposed to be primary GID0), we'll try
# to make it writeable (except `/`), or select another writeable home directory from `$HOME`, `/runner`, or `/tmp` and
# update $HOME (and /etc/passwd if possible) accordingly for the current process chain.
#
# This script is generally silent by default, but some likely-fatal cases will issue a brief warning to stderr. The
# envvars described below can be set before container init to cause faster failures and/or get tracing output.
# options:
# EP_BASH_DEBUG=1 (enable set -x)
# EP_DEBUG_TRACE=1 (enable debug trace to stderr)
# EP_ON_ERROR=ignore/warn/fail (default ignore)
set -eu
if (( "${EP_BASH_DEBUG:=0}" == 1 )); then
set -x
fi
: "${EP_DEBUG_TRACE:=0}"
: "${EP_ON_ERROR:=warn}"
: "${HOME:=}"
CUR_UID=$(id -u)
CUR_USERNAME=$(id -u -n 2> /dev/null || true) # whoami-free way to get current username, falls back to current uid
DEFAULT_HOME="/runner"
DEFAULT_SHELL="/bin/bash"
if (( "$EP_DEBUG_TRACE" == 1 )); then
function log_debug() { echo "EP_DEBUG: $1" 1>&2; }
else
function log_debug() { :; }
fi
log_debug "entrypoint.sh started"
case "$EP_ON_ERROR" in
"fail")
function maybe_fail() { echo "EP_FAIL: $1" 1>&2; exit 1; }
;;
"warn")
function maybe_fail() { echo "EP_WARN: $1" 1>&2; }
;;
*)
function maybe_fail() { log_debug "EP_FAIL (ignored): $1"; }
;;
esac
function is_dir_writable() {
[ -d "$1" ] && [ -w "$1" ] && [ -x "$1" ]
}
function ensure_current_uid_in_passwd() {
log_debug "is current uid ${CUR_UID} in /etc/passwd?"
if ! getent passwd "${CUR_USERNAME}" &> /dev/null ; then
if [ -w "/etc/passwd" ]; then
log_debug "appending missing uid ${CUR_UID} into /etc/passwd"
# use the default homedir; we may have to rewrite it to another value later if it's inaccessible
echo "${CUR_UID}:x:${CUR_UID}:0:container user ${CUR_UID}:${DEFAULT_HOME}:${DEFAULT_SHELL}" >> /etc/passwd
else
maybe_fail "uid ${CUR_UID} is missing from /etc/passwd, which is not writable; this error is likely fatal"
fi
else
log_debug "current uid is already in /etc/passwd"
fi
}
function ensure_writeable_homedir() {
if (is_dir_writable "${CANDIDATE_HOME}") ; then
log_debug "candidate homedir ${CANDIDATE_HOME} is valid and writeable"
else
if [ "${CANDIDATE_HOME}" == "/" ]; then
log_debug "skipping attempt to fix permissions on / as homedir"
return 1
fi
log_debug "candidate homedir ${CANDIDATE_HOME} is missing or not writeable; attempt to fix"
if ! (mkdir -p "${CANDIDATE_HOME}" >& /dev/null && chmod -R ug+rwx "${CANDIDATE_HOME}" >& /dev/null) ; then
log_debug "candidate homedir ${CANDIDATE_HOME} cannot be made writeable"
return 1
else
log_debug "candidate homedir ${CANDIDATE_HOME} was successfully made writeable"
fi
fi
# this might work; export it even if we end up not being able to update /etc/passwd
# this ensures the envvar matches current reality for this session; future sessions should set automatically if /etc/passwd is accurate
export HOME=${CANDIDATE_HOME}
if [ "${CANDIDATE_HOME}" == "${PASSWD_HOME}" ] ; then
log_debug "candidate homedir ${CANDIDATE_HOME} matches /etc/passwd"
return 0
fi
if ! [ -w /etc/passwd ]; then
log_debug "candidate homedir ${CANDIDATE_HOME} is valid for ${CUR_USERNAME}, but /etc/passwd is not writable to update it"
return 1
fi
log_debug "resetting homedir for user ${CUR_USERNAME} to ${CANDIDATE_HOME} in /etc/passwd"
# sed -i wants to create a tempfile next to the original, which won't work with /etc permissions in many cases,
# so just do it in memory and overwrite the existing file if we succeeded
NEWPW=$(sed -r "s;(^${CUR_USERNAME}:(.*:){4})(.*:);\1${CANDIDATE_HOME}:;g" /etc/passwd)
echo "${NEWPW}" > /etc/passwd
}
ensure_current_uid_in_passwd
log_debug "current value of HOME is ${HOME}"
PASSWD_HOME=$(getent passwd "${CUR_USERNAME}" | cut -d: -f6)
log_debug "user ${CUR_USERNAME} homedir from /etc/passwd is ${PASSWD_HOME}"
CANDIDATE_HOMES=("${PASSWD_HOME}" "${HOME}" "${DEFAULT_HOME}" "/tmp")
# we'll set this in the loop as soon as we find a writeable dir
unset HOME
for CANDIDATE_HOME in "${CANDIDATE_HOMES[@]}"; do
if ensure_writeable_homedir ; then
break
fi
done
if ! [ -v HOME ] ; then
maybe_fail "a valid homedir could not be set for ${CUR_USERNAME}; this is likely fatal"
fi
# chain exec whatever we were asked to run (ideally an init system) to keep any envvar state we've set
log_debug "chain exec-ing requested command $*"
exec "${@}"

View File

@@ -1,107 +0,0 @@
#!/bin/bash
# Copyright (c) 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
# NOTE(pabelanger): Allow users to force either microdnf or dnf as a package
# manager.
PKGMGR="${PKGMGR:-}"
PKGMGR_OPTS="${PKGMGR_OPTS:-}"
PKGMGR_PRESERVE_CACHE="${PKGMGR_PRESERVE_CACHE:-}"
PYCMD="${PYCMD:=/usr/bin/python3}"
PIPCMD="${PIPCMD:=$PYCMD -m pip}"
PIP_OPTS="${PIP_OPTS-}"
$PYCMD -m ensurepip
if [ -z $PKGMGR ]; then
# Expect dnf to be installed, however if we find microdnf default to it.
PKGMGR=/usr/bin/dnf
if [ -f "/usr/bin/microdnf" ]; then
PKGMGR=/usr/bin/microdnf
fi
fi
if [ "$PKGMGR" = "/usr/bin/microdnf" ]
then
if [ -z $PKGMGR_OPTS ]; then
# NOTE(pabelanger): skip install docs and weak dependencies to
# make smaller images. Sadly, setting these in dnf.conf don't
# appear to work.
PKGMGR_OPTS="--nodocs --setopt install_weak_deps=0"
fi
fi
if [ -f /output/bindep/run.txt ] ; then
PACKAGES=$(cat /output/bindep/run.txt)
if [ ! -z "$PACKAGES" ]; then
$PKGMGR install -y $PKGMGR_OPTS $PACKAGES
fi
fi
if [ -f /output/bindep/epel.txt ] ; then
EPEL_PACKAGES=$(cat /output/bindep/epel.txt)
if [ ! -z "$EPEL_PACKAGES" ]; then
$PKGMGR install -y $PKGMGR_OPTS --enablerepo epel $EPEL_PACKAGES
fi
fi
# If there's a constraints file, use it.
if [ -f /output/upper-constraints.txt ] ; then
CONSTRAINTS="-c /output/upper-constraints.txt"
fi
# If a requirements.txt file exists,
# install it directly so that people can use git url syntax
# to do things like pick up patched but unreleased versions
# of dependencies.
if [ -f /output/requirements.txt ] ; then
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /output/requirements.txt
fi
# Add any requested extras to the list of things to install
EXTRAS=""
for extra in $* ; do
EXTRAS="${EXTRAS} -r /output/$extra/requirements.txt"
done
if [ -f /output/packages.txt ] ; then
# If a package list was passed to assemble, install that in the final
# image.
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels -r /output/packages.txt $EXTRAS
else
# Install the wheels. Uninstall any existing version as siblings maybe
# be built with the same version number as the latest release, but we
# really want the speculatively built wheels installed over any
# automatic dependencies.
# NOTE(pabelanger): It is possible a project may not have a wheel, but does have requirements.txt
if [ $(ls -1 /output/wheels/*whl 2>/dev/null | wc -l) -gt 0 ]; then
$PIPCMD uninstall -y /output/wheels/*.whl
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels /output/wheels/*.whl $EXTRAS
elif [ ! -z "$EXTRAS" ] ; then
$PIPCMD uninstall -y $EXTRAS
$PIPCMD install $CONSTRAINTS $PIP_OPTS --cache-dir=/output/wheels $EXTRAS
fi
fi
# clean up after ourselves, unless requested to keep the cache
if [[ "$PKGMGR_PRESERVE_CACHE" != always ]]; then
$PKGMGR clean all
rm -rf /var/cache/{dnf,yum}
fi
rm -rf /var/lib/dnf/history.*
rm -rf /var/log/{dnf.*,hawkey.log}

View File

@@ -1,400 +0,0 @@
import argparse
import logging
import os
import sys
import yaml
import requirements
import importlib.metadata
base_collections_path = '/usr/share/ansible/collections'
default_file = 'execution-environment.yml'
logger = logging.getLogger(__name__)
def line_is_empty(line):
return bool((not line.strip()) or line.startswith('#'))
def read_req_file(path):
"""Provide some minimal error and display handling for file reading"""
if not os.path.exists(path):
print('Expected requirements file not present at: {0}'.format(os.path.abspath(path)))
with open(path, 'r') as f:
return f.read()
def pip_file_data(path):
pip_content = read_req_file(path)
pip_lines = []
for line in pip_content.split('\n'):
if line_is_empty(line):
continue
if line.startswith('-r') or line.startswith('--requirement'):
_, new_filename = line.split(None, 1)
new_path = os.path.join(os.path.dirname(path or '.'), new_filename)
pip_lines.extend(pip_file_data(new_path))
else:
pip_lines.append(line)
return pip_lines
def bindep_file_data(path):
sys_content = read_req_file(path)
sys_lines = []
for line in sys_content.split('\n'):
if line_is_empty(line):
continue
sys_lines.append(line)
return sys_lines
def process_collection(path):
"""Return a tuple of (python_dependencies, system_dependencies) for the
collection install path given.
Both items returned are a list of dependencies.
:param str path: root directory of collection (this would contain galaxy.yml file)
"""
CD = CollectionDefinition(path)
py_file = CD.get_dependency('python')
pip_lines = []
if py_file:
pip_lines = pip_file_data(os.path.join(path, py_file))
sys_file = CD.get_dependency('system')
bindep_lines = []
if sys_file:
bindep_lines = bindep_file_data(os.path.join(path, sys_file))
return (pip_lines, bindep_lines)
def process(data_dir=base_collections_path, user_pip=None, user_bindep=None):
paths = []
path_root = os.path.join(data_dir, 'ansible_collections')
# build a list of all the valid collection paths
if os.path.exists(path_root):
for namespace in sorted(os.listdir(path_root)):
if not os.path.isdir(os.path.join(path_root, namespace)):
continue
for name in sorted(os.listdir(os.path.join(path_root, namespace))):
collection_dir = os.path.join(path_root, namespace, name)
if not os.path.isdir(collection_dir):
continue
files_list = os.listdir(collection_dir)
if 'galaxy.yml' in files_list or 'MANIFEST.json' in files_list:
paths.append(collection_dir)
# populate the requirements content
py_req = {}
sys_req = {}
for path in paths:
col_pip_lines, col_sys_lines = process_collection(path)
CD = CollectionDefinition(path)
namespace, name = CD.namespace_name()
key = '{}.{}'.format(namespace, name)
if col_pip_lines:
py_req[key] = col_pip_lines
if col_sys_lines:
sys_req[key] = col_sys_lines
# add on entries from user files, if they are given
if user_pip:
col_pip_lines = pip_file_data(user_pip)
if col_pip_lines:
py_req['user'] = col_pip_lines
if user_bindep:
col_sys_lines = bindep_file_data(user_bindep)
if col_sys_lines:
sys_req['user'] = col_sys_lines
return {
'python': py_req,
'system': sys_req
}
def has_content(candidate_file):
"""Beyond checking that the candidate exists, this also assures
that the file has something other than whitespace,
which can cause errors when given to pip.
"""
if not os.path.exists(candidate_file):
return False
with open(candidate_file, 'r') as f:
content = f.read()
return bool(content.strip().strip('\n'))
class CollectionDefinition:
"""This class represents the dependency metadata for a collection
should be replaced by logic to hit the Galaxy API if made available
"""
def __init__(self, collection_path):
self.reference_path = collection_path
meta_file = os.path.join(collection_path, 'meta', default_file)
if os.path.exists(meta_file):
with open(meta_file, 'r') as f:
self.raw = yaml.safe_load(f)
else:
self.raw = {'version': 1, 'dependencies': {}}
# Automatically infer requirements for collection
for entry, filename in [('python', 'requirements.txt'), ('system', 'bindep.txt')]:
candidate_file = os.path.join(collection_path, filename)
if has_content(candidate_file):
self.raw['dependencies'][entry] = filename
def target_dir(self):
namespace, name = self.namespace_name()
return os.path.join(
base_collections_path, 'ansible_collections',
namespace, name
)
def namespace_name(self):
"Returns 2-tuple of namespace and name"
path_parts = [p for p in self.reference_path.split(os.path.sep) if p]
return tuple(path_parts[-2:])
def get_dependency(self, entry):
"""A collection is only allowed to reference a file by a relative path
which is relative to the collection root
"""
req_file = self.raw.get('dependencies', {}).get(entry)
if req_file is None:
return None
elif os.path.isabs(req_file):
raise RuntimeError(
'Collections must specify relative paths for requirements files. '
'The file {0} specified by {1} violates this.'.format(
req_file, self.reference_path
)
)
return req_file
def simple_combine(reqs):
"""Given a dictionary of requirement lines keyed off collections,
return a list with the most basic of de-duplication logic,
and comments indicating the sources based off the collection keys
"""
consolidated = []
fancy_lines = []
for collection, lines in reqs.items():
for line in lines:
if line_is_empty(line):
continue
base_line = line.split('#')[0].strip()
if base_line in consolidated:
i = consolidated.index(base_line)
fancy_lines[i] += ', {}'.format(collection)
else:
fancy_line = base_line + ' # from collection {}'.format(collection)
consolidated.append(base_line)
fancy_lines.append(fancy_line)
return fancy_lines
def parse_args(args=sys.argv[1:]):
parser = argparse.ArgumentParser(
prog='introspect',
description=(
'ansible-builder introspection; injected and used during execution environment build'
)
)
subparsers = parser.add_subparsers(help='The command to invoke.', dest='action')
subparsers.required = True
create_introspect_parser(subparsers)
args = parser.parse_args(args)
return args
def run_introspect(args, logger):
data = process(args.folder, user_pip=args.user_pip, user_bindep=args.user_bindep)
if args.sanitize:
logger.info('# Sanitized dependencies for %s', args.folder)
data_for_write = data
data['python'] = sanitize_requirements(data['python'])
data['system'] = simple_combine(data['system'])
else:
logger.info('# Dependency data for %s', args.folder)
data_for_write = data.copy()
data_for_write['python'] = simple_combine(data['python'])
data_for_write['system'] = simple_combine(data['system'])
print('---')
print(yaml.dump(data, default_flow_style=False))
if args.write_pip and data.get('python'):
write_file(args.write_pip, data_for_write.get('python') + [''])
if args.write_bindep and data.get('system'):
write_file(args.write_bindep, data_for_write.get('system') + [''])
sys.exit(0)
def create_introspect_parser(parser):
introspect_parser = parser.add_parser(
'introspect',
help='Introspects collections in folder.',
description=(
'Loops over collections in folder and returns data about dependencies. '
'This is used internally and exposed here for verification. '
'This is targeted toward collection authors and maintainers.'
)
)
introspect_parser.add_argument('--sanitize', action='store_true',
help=('Sanitize and de-duplicate requirements. '
'This is normally done separately from the introspect script, but this '
'option is given to more accurately test collection content.'))
introspect_parser.add_argument(
'folder', default=base_collections_path, nargs='?',
help=(
'Ansible collections path(s) to introspect. '
'This should have a folder named ansible_collections inside of it.'
)
)
# Combine user requirements and collection requirements into single file
# in the future, could look into passing multilple files to
# python-builder scripts to be fed multiple files as opposed to this
introspect_parser.add_argument(
'--user-pip', dest='user_pip',
help='An additional file to combine with collection pip requirements.'
)
introspect_parser.add_argument(
'--user-bindep', dest='user_bindep',
help='An additional file to combine with collection bindep requirements.'
)
introspect_parser.add_argument(
'--write-pip', dest='write_pip',
help='Write the combined pip requirements file to this location.'
)
introspect_parser.add_argument(
'--write-bindep', dest='write_bindep',
help='Write the combined bindep requirements file to this location.'
)
return introspect_parser
EXCLUDE_REQUIREMENTS = frozenset((
# obviously already satisfied or unwanted
'ansible', 'ansible-base', 'python', 'ansible-core',
# general python test requirements
'tox', 'pycodestyle', 'yamllint', 'pylint',
'flake8', 'pytest', 'pytest-xdist', 'coverage', 'mock', 'testinfra',
# test requirements highly specific to Ansible testing
'ansible-lint', 'molecule', 'galaxy-importer', 'voluptuous',
# already present in image for py3 environments
'yaml', 'pyyaml', 'json',
))
def sanitize_requirements(collection_py_reqs):
"""
Cleanup Python requirements by removing duplicates and excluded packages.
The user requirements file will go through the deduplication process, but
skips the special package exclusion process.
:param dict collection_py_reqs: A dict of lists of Python requirements, keyed
by fully qualified collection name. The special key `user` holds requirements
from the user specified requirements file from the ``--user-pip`` CLI option.
:returns: A finalized list of sanitized Python requirements.
"""
# de-duplication
consolidated = []
seen_pkgs = set()
for collection, lines in collection_py_reqs.items():
try:
for req in requirements.parse('\n'.join(lines)):
if req.specifier:
req.name = importlib.metadata.Prepared(req.name).normalized
req.collections = [collection] # add backref for later
if req.name is None:
consolidated.append(req)
continue
if req.name in seen_pkgs:
for prior_req in consolidated:
if req.name == prior_req.name:
prior_req.specs.extend(req.specs)
prior_req.collections.append(collection)
break
continue
consolidated.append(req)
seen_pkgs.add(req.name)
except Exception as e:
logger.warning('Warning: failed to parse requirements from %s, error: %s', collection, e)
# removal of unwanted packages
sanitized = []
for req in consolidated:
# Exclude packages, unless it was present in the user supplied requirements.
if req.name and req.name.lower() in EXCLUDE_REQUIREMENTS and 'user' not in req.collections:
logger.debug('# Excluding requirement %s from %s', req.name, req.collections)
continue
if req.vcs or req.uri:
# Requirement like git+ or http return as-is
new_line = req.line
elif req.name:
specs = ['{0}{1}'.format(cmp, ver) for cmp, ver in req.specs]
new_line = req.name + ','.join(specs)
else:
raise RuntimeError('Could not process {0}'.format(req.line))
sanitized.append(new_line + ' # from collection {}'.format(','.join(req.collections)))
return sanitized
def write_file(filename: str, lines: list) -> bool:
parent_dir = os.path.dirname(filename)
if parent_dir and not os.path.exists(parent_dir):
logger.warning('Creating parent directory for %s', filename)
os.makedirs(parent_dir)
new_text = '\n'.join(lines)
if os.path.exists(filename):
with open(filename, 'r') as f:
if f.read() == new_text:
logger.debug("File %s is already up-to-date.", filename)
return False
else:
logger.warning('File %s had modifications and will be rewritten', filename)
with open(filename, 'w') as f:
f.write(new_text)
return True
def main():
args = parse_args()
if args.action == 'introspect':
run_introspect(args, logger)
logger.error("An error has occurred.")
sys.exit(1)
if __name__ == '__main__':
main()

43
minimal-environment.yml Normal file
View File

@@ -0,0 +1,43 @@
---
version: 3
build_arg_defaults:
ANSIBLE_GALAXY_CLI_COLLECTION_OPTS: '--pre'
#ansible_config: ansible.cfg
dependencies:
ansible_core:
package_pip: ansible-core>=2.15.0rc2,<2.16
ansible_runner:
package_pip: ansible-runner
system: bindep.txt
python: requirements.txt
galaxy: requirements.yml
images:
base_image:
name: docker.io/rockylinux:9.3-minimal
additional_build_files:
- src: ansible.cfg
dest: configs
additional_build_steps:
prepend_base:
- RUN cp /usr/bin/microdnf /usr/bin/dnf && microdnf install python3 -y && /usr/bin/python3 -m ensurepip && /usr/bin/python3 -m pip install --upgrade pip cmake azure-identity azure-cli-core paramiko && $PYCMD -m pip install -U pip
# prepend_galaxy: |
# RUN pip3 install --upgrade pip setuptools ansible ansible-runner
prepend_galaxy:
# - RUN /usr/bin/python3 -m pip install --upgrade pip cmake
- COPY _build/configs/ansible.cfg /etc/ansible/ansible.cfg
#prepend_final: |
#-
# RUN pip3 install --upgrade pip setuptools ansible ansible-runner
# RUN yum install wget unzip gcc python3-devel -y
#append_base:
# - RUN $PYCMD -m pip install -U pip
append_final:
# - RUN pip3 install -r /usr/share/ansible/collections/ansible_collections/azure/azcollection/requirements-azure.txt
- COPY --from=quay.io/ansible/receptor:devel /usr/bin/receptor /usr/bin/receptor
- RUN mkdir -p /var/run/receptor && git lfs install --system
#- RUN mkdir -p /etc/ansible
#- COPY _build/configs/ansible.cfg /etc/ansible/ansible.cfg

View File

@@ -6,7 +6,7 @@ ipaddr==2.2.0
git+https://github.com/ansible/ansible-sign
ncclient
paramiko
paramiko>=3.4.1
pykerberos
pyOpenSSL
pypsrp[kerberos,credssp]

61
rocky-environment.yml Normal file
View File

@@ -0,0 +1,61 @@
---
version: 3
build_arg_defaults:
ANSIBLE_GALAXY_CLI_COLLECTION_OPTS: '--pre'
#ansible_config: ansible.cfg
dependencies:
ansible_core:
package_pip: ansible-core>=2.15.0rc2,<2.16
ansible_runner:
package_pip: ansible-runner
system: bindep.txt
python: requirements.txt
galaxy: requirements.yml
# options:
# container_init:
# package_pip: dumb-init>=1.2.5
# entrypoint: '["dumb-init"]'
# cmd: '["csh"]'
#package_manager_path: /usr/bin/microdnf
#relax_password_permissions: false
#skip_ansible_check: true
#workdir: /myworkdir
#user: bob
#tags:
# - ee_development:latest
images:
base_image:
name: docker.io/rockylinux:9.3
additional_build_files:
- src: ansible.cfg
dest: configs
additional_build_steps:
prepend_base:
- RUN /usr/bin/python3 -m ensurepip
- RUN /usr/bin/python3 -m pip install --upgrade pip
# prepend_galaxy: |
# RUN pip3 install --upgrade pip setuptools ansible ansible-runner
prepend_galaxy:
- RUN /usr/bin/python3 -m pip install --upgrade pip cmake
- COPY _build/configs/ansible.cfg /etc/ansible/ansible.cfg
prepend_final: |
RUN whoami
RUN cat /etc/os-release
# RUN pip3 install --upgrade pip setuptools ansible ansible-runner
# RUN yum install wget unzip gcc python3-devel -y
append_base:
- RUN $PYCMD -m pip install -U pip
append_final:
# - RUN pip3 install -r /usr/share/ansible/collections/ansible_collections/azure/azcollection/requirements-azure.txt
- RUN pip3 install --upgrade azure-identity azure-cli-core paramiko
- COPY --from=quay.io/ansible/receptor:devel /usr/bin/receptor /usr/bin/receptor
- RUN mkdir -p /var/run/receptor
- RUN git lfs install --system
#- RUN mkdir -p /etc/ansible
#- COPY _build/configs/ansible.cfg /etc/ansible/ansible.cfg

2
sonar-project.properties Normal file
View File

@@ -0,0 +1,2 @@
sonar.projectKey=test
sonar.sources=.