diff --git a/.editorconfig b/.editorconfig new file mode 100755 index 00000000..5f150f35 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,20 @@ +# This file is globally distributed to all container image projects from +# https://github.com/linuxserver/docker-jenkins-builder/blob/master/.editorconfig + +# top-most EditorConfig file +root = true + +# Unix-style newlines with a newline ending every file +[*] +end_of_line = lf +insert_final_newline = true +# trim_trailing_whitespace may cause unintended issues and should not be globally set true +trim_trailing_whitespace = false + +[{Dockerfile*,**.yml}] +indent_style = space +indent_size = 2 + +[{**.sh,root/etc/s6-overlay/s6-rc.d/**,root/etc/cont-init.d/**,root/etc/services.d/**}] +indent_style = space +indent_size = 4 diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md new file mode 100755 index 00000000..78926065 --- /dev/null +++ b/.github/CONTRIBUTING.md @@ -0,0 +1,123 @@ +# Contributing to plex + +## Gotchas + +* While contributing make sure to make all your changes before creating a Pull Request, as our pipeline builds each commit after the PR is open. +* Read, and fill the Pull Request template + * If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR + * If the PR is addressing an existing issue include, closes #\, in the body of the PR commit message +* If you want to discuss changes, you can also bring it up in [#dev-talk](https://discordapp.com/channels/354974912613449730/757585807061155840) in our [Discord server](https://linuxserver.io/discord) + +## Common files + +| File | Use case | +| :----: | --- | +| `Dockerfile` | Dockerfile used to build amd64 images | +| `Dockerfile.aarch64` | Dockerfile used to build 64bit ARM architectures | +| `Dockerfile.armhf` | Dockerfile used to build 32bit ARM architectures | +| `Jenkinsfile` | This file is a product of our builder and should not be edited directly. This is used to build the image | +| `jenkins-vars.yml` | This file is used to generate the `Jenkinsfile` mentioned above, it only affects the build-process | +| `package_versions.txt` | This file is generated as a part of the build-process and should not be edited directly. It lists all the installed packages and their versions | +| `README.md` | This file is a product of our builder and should not be edited directly. This displays the readme for the repository and image registries | +| `readme-vars.yml` | This file is used to generate the `README.md` | + +## Readme + +If you would like to change our readme, please __**do not**__ directly edit the readme, as it is auto-generated on each commit. +Instead edit the [readme-vars.yml](https://github.com/linuxserver/docker-plex/edit/master/readme-vars.yml). + +These variables are used in a template for our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) as part of an ansible play. +Most of these variables are also carried over to [docs.linuxserver.io](https://docs.linuxserver.io/images/docker-plex) + +### Fixing typos or clarify the text in the readme + +There are variables for multiple parts of the readme, the most common ones are: + +| Variable | Description | +| :----: | --- | +| `project_blurb` | This is the short excerpt shown above the project logo. | +| `app_setup_block` | This is the text that shows up under "Application Setup" if enabled | + +### Parameters + +The compose and run examples are also generated from these variables. + +We have a [reference file](https://github.com/linuxserver/docker-jenkins-builder/blob/master/vars/_container-vars-blank) in our Jenkins Builder. + +These are prefixed with `param_` for required parameters, or `opt_param` for optional parameters, except for `cap_add`. +Remember to enable param, if currently disabled. This differs between parameters, and can be seen in the reference file. + +Devices, environment variables, ports and volumes expects its variables in a certain way. + +### Devices + +```yml +param_devices: + - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } +opt_param_devices: + - { device_path: "/dev/dri", device_host_path: "/dev/dri", desc: "For hardware transcoding" } +``` + +### Environment variables + +```yml +param_env_vars: + - { env_var: "TZ", env_value: "Europe/London", desc: "Specify a timezone to use EG Europe/London." } +opt_param_env_vars: + - { env_var: "VERSION", env_value: "latest", desc: "Supported values are LATEST, PLEXPASS or a specific version number." } +``` + +### Ports + +```yml +param_ports: + - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } +opt_param_ports: + - { external_port: "80", internal_port: "80", port_desc: "Application WebUI" } +``` + +### Volumes + +```yml +param_volumes: + - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } +opt_param_volumes: + - { vol_path: "/config", vol_host_path: "", desc: "Configuration files." } +``` + +### Testing template changes + +After you make any changes to the templates, you can use our [Jenkins Builder](https://github.com/linuxserver/docker-jenkins-builder) to have the files updated from the modified templates. Please use the command found under `Running Locally` [on this page](https://github.com/linuxserver/docker-jenkins-builder/blob/master/README.md) to generate them prior to submitting a PR. + +## Dockerfiles + +We use multiple Dockerfiles in our repos, this is because sometimes some CPU architectures needs different packages to work. +If you are proposing additional packages to be added, ensure that you added the packages to all the Dockerfiles in alphabetical order. + +### Testing your changes + +```bash +git clone https://github.com/linuxserver/docker-plex.git +cd docker-plex +docker build \ + --no-cache \ + --pull \ + -t linuxserver/plex:latest . +``` + +The ARM variants can be built on x86_64 hardware and vice versa using `lscr.io/linuxserver/qemu-static` + +```bash +docker run --rm --privileged lscr.io/linuxserver/qemu-static --reset +``` + +Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`. + +## Update the changelog + +If you are modifying the Dockerfiles or any of the startup scripts in [root](https://github.com/linuxserver/docker-plex/tree/master/root), add an entry to the changelog + +```yml +changelogs: + - { date: "DD.MM.YY:", desc: "Added some love to templates" } +``` diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml index 79722137..7eaac771 100755 --- a/.github/FUNDING.yml +++ b/.github/FUNDING.yml @@ -1 +1,2 @@ +github: linuxserver open_collective: linuxserver diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md deleted file mode 100644 index c73c33be..00000000 --- a/.github/ISSUE_TEMPLATE.md +++ /dev/null @@ -1,21 +0,0 @@ - - -[linuxserverurl]: https://linuxserver.io -[![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl] - - - - - - - - - - - - - - - -## Thanks, team linuxserver.io - diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100755 index 00000000..d0e43ea2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,13 @@ +blank_issues_enabled: false +contact_links: + - name: Discord chat support + url: https://linuxserver.io/discord + about: Realtime support / chat with the community and the team. + + - name: Discourse discussion forum + url: https://discourse.linuxserver.io + about: Post on our community forum. + + - name: Documentation + url: https://docs.linuxserver.io/images/docker-plex + about: Documentation - information about all of our containers. diff --git a/.github/ISSUE_TEMPLATE/issue.bug.yml b/.github/ISSUE_TEMPLATE/issue.bug.yml new file mode 100755 index 00000000..6c6659a4 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue.bug.yml @@ -0,0 +1,76 @@ +# Based on the issue template +name: Bug report +description: Create a report to help us improve +title: "[BUG] " +labels: [Bug] +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the bug you encountered. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Current Behavior + description: Tell us what happens instead of the expected behavior. + validations: + required: true + - type: textarea + attributes: + label: Expected Behavior + description: Tell us what should happen. + validations: + required: false + - type: textarea + attributes: + label: Steps To Reproduce + description: Steps to reproduce the behavior. + placeholder: | + 1. In this environment... + 2. With this config... + 3. Run '...' + 4. See error... + validations: + required: true + - type: textarea + attributes: + label: Environment + description: | + examples: + - **OS**: Ubuntu 20.04 + - **How docker service was installed**: distro's packagemanager + value: | + - OS: + - How docker service was installed: + render: markdown + validations: + required: false + - type: dropdown + attributes: + label: CPU architecture + options: + - x86-64 + - arm64 + validations: + required: true + - type: textarea + attributes: + label: Docker creation + description: | + Command used to create docker container + Provide your docker create/run command or compose yaml snippet, or a screenshot of settings if using a gui to create the container + render: bash + validations: + required: true + - type: textarea + attributes: + description: | + Provide a full docker log, output of "docker logs plex" + label: Container logs + placeholder: | + Output of `docker logs plex` + render: bash + validations: + required: true diff --git a/.github/ISSUE_TEMPLATE/issue.feature.yml b/.github/ISSUE_TEMPLATE/issue.feature.yml new file mode 100755 index 00000000..099dcdb5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/issue.feature.yml @@ -0,0 +1,31 @@ +# Based on the issue template +name: Feature request +description: Suggest an idea for this project +title: "[FEAT] <title>" +labels: [enhancement] +body: + - type: checkboxes + attributes: + label: Is this a new feature request? + description: Please search to see if a feature request already exists. + options: + - label: I have searched the existing issues + required: true + - type: textarea + attributes: + label: Wanted change + description: Tell us what you want to happen. + validations: + required: true + - type: textarea + attributes: + label: Reason for change + description: Justify your request, why do you want it, what is the benefit. + validations: + required: true + - type: textarea + attributes: + label: Proposed code change + description: Do you have a potential code change in mind? + validations: + required: false diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f6a6381d..8ccdb75c 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,14 +2,42 @@ [linuxserverurl]: https://linuxserver.io [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)][linuxserverurl] - + <!--- Before submitting a pull request please check the following --> -<!--- That you have made a branch in your fork, we'd rather not merge from your master --> +<!--- If this is a fix for a typo (in code, documentation, or the README) please file an issue and let us sort it out. We do not need a PR --> +<!--- Ask yourself if this modification is something the whole userbase will benefit from, if this is a specific change for corner case functionality or plugins please look at making a Docker Mod or local script https://blog.linuxserver.io/2019/09/14/customizing-our-containers/ --> <!--- That if the PR is addressing an existing issue include, closes #<issue number> , in the body of the PR commit message --> <!--- You have included links to any files / patches etc your PR may be using in the body of the PR commit message --> -<!--- --> +<!--- We maintain a changelog of major revisions to the container at the end of readme-vars.yml in the root of this repository, please add your changes there if appropriate --> + + +<!--- Coding guidelines: --> +<!--- 1. Installed packages in the Dockerfiles should be in alphabetical order --> +<!--- 2. Changes to Dockerfile should be replicated in Dockerfile.armhf and Dockerfile.aarch64 if applicable --> +<!--- 3. Indentation style (tabs vs 4 spaces vs 1 space) should match the rest of the document --> +<!--- 4. Readme is auto generated from readme-vars.yml, make your changes there --> + +------------------------------ + + - [ ] I have read the [contributing](https://github.com/linuxserver/docker-plex/blob/master/.github/CONTRIBUTING.md) guideline and understand that I have made the correct modifications + +------------------------------ + +<!--- We welcome all PR’s though this doesn’t guarantee it will be accepted. --> + +## Description: +<!--- Describe your changes in detail --> + +## Benefits of this PR and context: +<!--- Please explain why we should accept this PR. If this fixes an outstanding bug, please reference the issue # --> + +## How Has This Been Tested? +<!--- Please describe in detail how you tested your changes. --> +<!--- Include details of your testing environment, and the tests you ran to --> +<!--- see how your change affects other areas of the code, etc. --> -## Thanks, team linuxserver.io +## Source / References: +<!--- Please include any forum posts/github links relevant to the PR --> diff --git a/.github/workflows/call_issue_pr_tracker.yml b/.github/workflows/call_issue_pr_tracker.yml new file mode 100755 index 00000000..d07cf121 --- /dev/null +++ b/.github/workflows/call_issue_pr_tracker.yml @@ -0,0 +1,19 @@ +name: Issue & PR Tracker + +on: + issues: + types: [opened,reopened,labeled,unlabeled,closed] + pull_request_target: + types: [opened,reopened,review_requested,review_request_removed,labeled,unlabeled,closed] + pull_request_review: + types: [submitted,edited,dismissed] + +permissions: + contents: read + +jobs: + manage-project: + permissions: + issues: write + uses: linuxserver/github-workflows/.github/workflows/issue-pr-tracker.yml@v1 + secrets: inherit diff --git a/.github/workflows/call_issues_cron.yml b/.github/workflows/call_issues_cron.yml new file mode 100755 index 00000000..77637220 --- /dev/null +++ b/.github/workflows/call_issues_cron.yml @@ -0,0 +1,16 @@ +name: Mark stale issues and pull requests +on: + schedule: + - cron: '33 12 * * *' + workflow_dispatch: + +permissions: + contents: read + +jobs: + stale: + permissions: + issues: write + pull-requests: write + uses: linuxserver/github-workflows/.github/workflows/issues-cron.yml@v1 + secrets: inherit diff --git a/.github/workflows/external_trigger.yml b/.github/workflows/external_trigger.yml new file mode 100644 index 00000000..5cadde5a --- /dev/null +++ b/.github/workflows/external_trigger.yml @@ -0,0 +1,147 @@ +name: External Trigger Main + +on: + workflow_dispatch: + +permissions: + contents: read + +jobs: + external-trigger-master: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4.1.1 + + - name: External Trigger + if: github.ref == 'refs/heads/master' + env: + SKIP_EXTERNAL_TRIGGER: ${{ vars.SKIP_EXTERNAL_TRIGGER }} + run: | + printf "# External trigger for docker-plex\n\n" >> $GITHUB_STEP_SUMMARY + if grep -q "^plex_master_" <<< "${SKIP_EXTERNAL_TRIGGER}"; then + echo "> [!NOTE]" >> $GITHUB_STEP_SUMMARY + echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` contains \`plex_master_\`; will skip trigger if version matches." >> $GITHUB_STEP_SUMMARY + elif grep -q "^plex_master" <<< "${SKIP_EXTERNAL_TRIGGER}"; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` contains \`plex_master\`; skipping trigger." >> $GITHUB_STEP_SUMMARY + exit 0 + fi + echo "> [!NOTE]" >> $GITHUB_STEP_SUMMARY + echo "> External trigger running off of master branch. To disable this trigger, add \`plex_master\` into the Github organizational variable \`SKIP_EXTERNAL_TRIGGER\`." >> $GITHUB_STEP_SUMMARY + printf "\n## Retrieving external version\n\n" >> $GITHUB_STEP_SUMMARY + EXT_RELEASE=$(curl -s "https://plex.tv/api/downloads/5.json" | jq -r '. | .computer.Linux.version') + echo "Type is \`custom_json\`" >> $GITHUB_STEP_SUMMARY + if grep -q "^plex_master_${EXT_RELEASE}" <<< "${SKIP_EXTERNAL_TRIGGER}"; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> Github organizational variable \`SKIP_EXTERNAL_TRIGGER\` matches current external release; skipping trigger." >> $GITHUB_STEP_SUMMARY + exit 0 + fi + if [ -z "${EXT_RELEASE}" ] || [ "${EXT_RELEASE}" == "null" ]; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> Can't retrieve external version, exiting" >> $GITHUB_STEP_SUMMARY + FAILURE_REASON="Can't retrieve external version for plex branch master" + GHA_TRIGGER_URL="https://github.com/linuxserver/docker-plex/actions/runs/${{ github.run_id }}" + curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 16711680, + "description": "**Trigger Failed** \n**Reason:** '"${FAILURE_REASON}"' \n**Trigger URL:** '"${GHA_TRIGGER_URL}"' \n"}], + "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} + exit 1 + fi + EXT_RELEASE_SANITIZED=$(echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g') + echo "Sanitized external version: \`${EXT_RELEASE_SANITIZED}\`" >> $GITHUB_STEP_SUMMARY + echo "Retrieving last pushed version" >> $GITHUB_STEP_SUMMARY + image="linuxserver/plex" + tag="latest" + token=$(curl -sX GET \ + "https://ghcr.io/token?scope=repository%3Alinuxserver%2Fplex%3Apull" \ + | jq -r '.token') + multidigest=$(curl -s \ + --header "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + --header "Accept: application/vnd.oci.image.index.v1+json" \ + --header "Authorization: Bearer ${token}" \ + "https://ghcr.io/v2/${image}/manifests/${tag}") + if jq -e '.layers // empty' <<< "${multidigest}" >/dev/null 2>&1; then + # If there's a layer element it's a single-arch manifest so just get that digest + digest=$(jq -r '.config.digest' <<< "${multidigest}") + else + # Otherwise it's multi-arch or has manifest annotations + if jq -e '.manifests[]?.annotations // empty' <<< "${multidigest}" >/dev/null 2>&1; then + # Check for manifest annotations and delete if found + multidigest=$(jq 'del(.manifests[] | select(.annotations))' <<< "${multidigest}") + fi + if [[ $(jq '.manifests | length' <<< "${multidigest}") -gt 1 ]]; then + # If there's still more than one digest, it's multi-arch + multidigest=$(jq -r ".manifests[] | select(.platform.architecture == \"amd64\").digest?" <<< "${multidigest}") + else + # Otherwise it's single arch + multidigest=$(jq -r ".manifests[].digest?" <<< "${multidigest}") + fi + if digest=$(curl -s \ + --header "Accept: application/vnd.docker.distribution.manifest.v2+json" \ + --header "Accept: application/vnd.oci.image.manifest.v1+json" \ + --header "Authorization: Bearer ${token}" \ + "https://ghcr.io/v2/${image}/manifests/${multidigest}"); then + digest=$(jq -r '.config.digest' <<< "${digest}"); + fi + fi + image_info=$(curl -sL \ + --header "Authorization: Bearer ${token}" \ + "https://ghcr.io/v2/${image}/blobs/${digest}") + if [[ $(echo $image_info | jq -r '.container_config') == "null" ]]; then + image_info=$(echo $image_info | jq -r '.config') + else + image_info=$(echo $image_info | jq -r '.container_config') + fi + IMAGE_RELEASE=$(echo ${image_info} | jq -r '.Labels.build_version' | awk '{print $3}') + IMAGE_VERSION=$(echo ${IMAGE_RELEASE} | awk -F'-ls' '{print $1}') + if [ -z "${IMAGE_VERSION}" ]; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "Can't retrieve last pushed version, exiting" >> $GITHUB_STEP_SUMMARY + FAILURE_REASON="Can't retrieve last pushed version for plex tag latest" + curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 16711680, + "description": "**Trigger Failed** \n**Reason:** '"${FAILURE_REASON}"' \n"}], + "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} + exit 1 + fi + echo "Last pushed version: \`${IMAGE_VERSION}\`" >> $GITHUB_STEP_SUMMARY + if [ "${EXT_RELEASE_SANITIZED}" == "${IMAGE_VERSION}" ]; then + echo "Sanitized version \`${EXT_RELEASE_SANITIZED}\` already pushed, exiting" >> $GITHUB_STEP_SUMMARY + exit 0 + elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-plex/job/master/lastBuild/api/json | jq -r '.building') == "true" ]; then + echo "New version \`${EXT_RELEASE}\` found; but there already seems to be an active build on Jenkins; exiting" >> $GITHUB_STEP_SUMMARY + exit 0 + else + if [[ "${artifacts_found}" == "false" ]]; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> New version detected, but not all artifacts are published yet; skipping trigger" >> $GITHUB_STEP_SUMMARY + FAILURE_REASON="New version ${EXT_RELEASE} for plex tag latest is detected, however not all artifacts are uploaded to upstream release yet. Will try again later." + curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, + "description": "**Trigger Failed** \n**Reason:** '"${FAILURE_REASON}"' \n"}], + "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} + else + printf "\n## Trigger new build\n\n" >> $GITHUB_STEP_SUMMARY + echo "New sanitized version \`${EXT_RELEASE_SANITIZED}\` found; old version was \`${IMAGE_VERSION}\`. Triggering new build" >> $GITHUB_STEP_SUMMARY + if [[ "${artifacts_found}" == "true" ]]; then + echo "All artifacts seem to be uploaded." >> $GITHUB_STEP_SUMMARY + fi + response=$(curl -iX POST \ + https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-plex/job/master/buildWithParameters?PACKAGE_CHECK=false \ + --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|") + echo "Jenkins [job queue url](${response%$'\r'})" >> $GITHUB_STEP_SUMMARY + echo "Sleeping 10 seconds until job starts" >> $GITHUB_STEP_SUMMARY + sleep 10 + buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url') + buildurl="${buildurl%$'\r'}" + echo "Jenkins job [build url](${buildurl})" >> $GITHUB_STEP_SUMMARY + echo "Attempting to change the Jenkins job description" >> $GITHUB_STEP_SUMMARY + curl -iX POST \ + "${buildurl}submitDescription" \ + --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \ + --data-urlencode "description=GHA external trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ + --data-urlencode "Submit=Submit" + echo "**** Notifying Discord ****" + TRIGGER_REASON="A version change was detected for plex tag latest. Old version:${IMAGE_VERSION} New version:${EXT_RELEASE_SANITIZED}" + curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, + "description": "**Build Triggered** \n**Reason:** '"${TRIGGER_REASON}"' \n**Build URL:** '"${buildurl}display/redirect"' \n"}], + "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} + fi + fi diff --git a/.github/workflows/external_trigger_scheduler.yml b/.github/workflows/external_trigger_scheduler.yml new file mode 100644 index 00000000..b3649f84 --- /dev/null +++ b/.github/workflows/external_trigger_scheduler.yml @@ -0,0 +1,48 @@ +name: External Trigger Scheduler + +on: + schedule: + - cron: '14 * * * *' + workflow_dispatch: + +permissions: + contents: read + +jobs: + external-trigger-scheduler: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4.1.1 + with: + fetch-depth: '0' + + - name: External Trigger Scheduler + run: | + printf "# External trigger scheduler for docker-plex\n\n" >> $GITHUB_STEP_SUMMARY + printf "Found the branches:\n\n%s\n" "$(git for-each-ref --format='- %(refname:lstrip=3)' refs/remotes)" >> $GITHUB_STEP_SUMMARY + for br in $(git for-each-ref --format='%(refname:lstrip=3)' refs/remotes) + do + if [[ "${br}" == "HEAD" ]]; then + printf "\nSkipping %s.\n" ${br} >> $GITHUB_STEP_SUMMARY + continue + fi + printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY + ls_jenkins_vars=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-plex/${br}/jenkins-vars.yml) + ls_branch=$(echo "${ls_jenkins_vars}" | yq -r '.ls_branch') + ls_trigger=$(echo "${ls_jenkins_vars}" | yq -r '.external_type') + if [[ "${br}" == "${ls_branch}" ]] && [[ "${ls_trigger}" != "os" ]]; then + echo "Branch appears to be live and trigger is not os; checking workflow." >> $GITHUB_STEP_SUMMARY + if curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-plex/${br}/.github/workflows/external_trigger.yml > /dev/null 2>&1; then + echo "Triggering external trigger workflow for branch." >> $GITHUB_STEP_SUMMARY + curl -iX POST \ + -H "Authorization: token ${{ secrets.CR_PAT }}" \ + -H "Accept: application/vnd.github.v3+json" \ + -d "{\"ref\":\"refs/heads/${br}\"}" \ + https://api.github.com/repos/linuxserver/docker-plex/actions/workflows/external_trigger.yml/dispatches + else + echo "Skipping branch due to no external trigger workflow present." >> $GITHUB_STEP_SUMMARY + fi + else + echo "Skipping branch due to being detected as dev branch or having no external version." >> $GITHUB_STEP_SUMMARY + fi + done diff --git a/.github/workflows/greetings.yml b/.github/workflows/greetings.yml new file mode 100755 index 00000000..9f32b525 --- /dev/null +++ b/.github/workflows/greetings.yml @@ -0,0 +1,19 @@ +name: Greetings + +on: [pull_request_target, issues] + +permissions: + contents: read + +jobs: + greeting: + permissions: + issues: write + pull-requests: write + runs-on: ubuntu-latest + steps: + - uses: actions/first-interaction@v1 + with: + issue-message: 'Thanks for opening your first issue here! Be sure to follow the relevant issue templates, or risk having this issue marked as invalid.' + pr-message: 'Thanks for opening this pull request! Be sure to follow the [pull request template](https://github.com/linuxserver/docker-plex/blob/master/.github/PULL_REQUEST_TEMPLATE.md)!' + repo-token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/package_trigger_scheduler.yml b/.github/workflows/package_trigger_scheduler.yml new file mode 100644 index 00000000..be3abba8 --- /dev/null +++ b/.github/workflows/package_trigger_scheduler.yml @@ -0,0 +1,103 @@ +name: Package Trigger Scheduler + +on: + schedule: + - cron: '0 9 * * 1' + workflow_dispatch: + +permissions: + contents: read + +jobs: + package-trigger-scheduler: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4.1.1 + with: + fetch-depth: '0' + + - name: Package Trigger Scheduler + env: + SKIP_PACKAGE_TRIGGER: ${{ vars.SKIP_PACKAGE_TRIGGER }} + run: | + printf "# Package trigger scheduler for docker-plex\n\n" >> $GITHUB_STEP_SUMMARY + printf "Found the branches:\n\n%s\n" "$(git for-each-ref --format='- %(refname:lstrip=3)' refs/remotes)" >> $GITHUB_STEP_SUMMARY + for br in $(git for-each-ref --format='%(refname:lstrip=3)' refs/remotes) + do + if [[ "${br}" == "HEAD" ]]; then + printf "\nSkipping %s.\n" ${br} >> $GITHUB_STEP_SUMMARY + continue + fi + printf "\n## Evaluating \`%s\`\n\n" ${br} >> $GITHUB_STEP_SUMMARY + JENKINS_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-plex/${br}/jenkins-vars.yml) + if ! curl -sfX GET https://raw.githubusercontent.com/linuxserver/docker-plex/${br}/Jenkinsfile >/dev/null 2>&1; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> No Jenkinsfile found. Branch is either deprecated or is an early dev branch." >> $GITHUB_STEP_SUMMARY + skipped_branches="${skipped_branches}${br} " + elif [[ "${br}" == $(yq -r '.ls_branch' <<< "${JENKINS_VARS}") ]]; then + echo "Branch appears to be live; checking workflow." >> $GITHUB_STEP_SUMMARY + README_VARS=$(curl -sX GET https://raw.githubusercontent.com/linuxserver/docker-plex/${br}/readme-vars.yml) + if [[ $(yq -r '.project_deprecation_status' <<< "${README_VARS}") == "true" ]]; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> Branch appears to be deprecated; skipping trigger." >> $GITHUB_STEP_SUMMARY + skipped_branches="${skipped_branches}${br} " + elif [[ $(yq -r '.skip_package_check' <<< "${JENKINS_VARS}") == "true" ]]; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> Skipping branch ${br} due to \`skip_package_check\` being set in \`jenkins-vars.yml\`." >> $GITHUB_STEP_SUMMARY + skipped_branches="${skipped_branches}${br} " + elif grep -q "^plex_${br}" <<< "${SKIP_PACKAGE_TRIGGER}"; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> Github organizational variable \`SKIP_PACKAGE_TRIGGER\` contains \`plex_${br}\`; skipping trigger." >> $GITHUB_STEP_SUMMARY + skipped_branches="${skipped_branches}${br} " + elif [ $(curl -s https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-plex/job/${br}/lastBuild/api/json | jq -r '.building' 2>/dev/null) == "true" ]; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> There already seems to be an active build on Jenkins; skipping package trigger for ${br}" >> $GITHUB_STEP_SUMMARY + skipped_branches="${skipped_branches}${br} " + else + echo "> [!NOTE]" >> $GITHUB_STEP_SUMMARY + echo "> Triggering package trigger for branch ${br}" >> $GITHUB_STEP_SUMMARY + printf "> To disable, add \`plex_%s\` into the Github organizational variable \`SKIP_PACKAGE_TRIGGER\`.\n\n" "${br}" >> $GITHUB_STEP_SUMMARY + triggered_branches="${triggered_branches}${br} " + response=$(curl -iX POST \ + https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-plex/job/${br}/buildWithParameters?PACKAGE_CHECK=true \ + --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} | grep -i location | sed "s|^[L|l]ocation: \(.*\)|\1|") + if [[ -z "${response}" ]]; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> Jenkins build could not be triggered. Skipping branch." + continue + fi + echo "Jenkins [job queue url](${response%$'\r'})" >> $GITHUB_STEP_SUMMARY + echo "Sleeping 10 seconds until job starts" >> $GITHUB_STEP_SUMMARY + sleep 10 + buildurl=$(curl -s "${response%$'\r'}api/json" | jq -r '.executable.url') + buildurl="${buildurl%$'\r'}" + echo "Jenkins job [build url](${buildurl})" >> $GITHUB_STEP_SUMMARY + echo "Attempting to change the Jenkins job description" >> $GITHUB_STEP_SUMMARY + if ! curl -ifX POST \ + "${buildurl}submitDescription" \ + --user ${{ secrets.JENKINS_USER }}:${{ secrets.JENKINS_TOKEN }} \ + --data-urlencode "description=GHA package trigger https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}" \ + --data-urlencode "Submit=Submit"; then + echo "> [!WARNING]" >> $GITHUB_STEP_SUMMARY + echo "> Unable to change the Jenkins job description." + fi + sleep 20 + fi + else + echo "Skipping branch ${br} due to being detected as dev branch." >> $GITHUB_STEP_SUMMARY + fi + done + if [[ -n "${triggered_branches}" ]] || [[ -n "${skipped_branches}" ]]; then + if [[ -n "${triggered_branches}" ]]; then + NOTIFY_BRANCHES="**Triggered:** ${triggered_branches} \n" + NOTIFY_BUILD_URL="**Build URL:** https://ci.linuxserver.io/blue/organizations/jenkins/Docker-Pipeline-Builders%2Fdocker-plex/activity/ \n" + echo "**** Package check build(s) triggered for branch(es): ${triggered_branches} ****" + fi + if [[ -n "${skipped_branches}" ]]; then + NOTIFY_BRANCHES="${NOTIFY_BRANCHES}**Skipped:** ${skipped_branches} \n" + fi + echo "**** Notifying Discord ****" + curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://cdn.discordapp.com/avatars/354986384542662657/df91181b3f1cf0ef1592fbe18e0962d7.png","embeds": [{"color": 9802903, + "description": "**Package Check Build(s) for plex** \n'"${NOTIFY_BRANCHES}"''"${NOTIFY_BUILD_URL}"'"}], + "username": "Github Actions"}' ${{ secrets.DISCORD_WEBHOOK }} + fi diff --git a/.github/workflows/permissions.yml b/.github/workflows/permissions.yml new file mode 100755 index 00000000..02e1bdb9 --- /dev/null +++ b/.github/workflows/permissions.yml @@ -0,0 +1,12 @@ +name: Permission check +on: + pull_request_target: + paths: + - '**/run' + - '**/finish' + - '**/check' + - 'root/migrations/*' + +jobs: + permission_check: + uses: linuxserver/github-workflows/.github/workflows/init-svc-executable-permissions.yml@v1 diff --git a/.gitignore b/.gitignore index 96374c4e..6e8ad977 100644 --- a/.gitignore +++ b/.gitignore @@ -41,3 +41,4 @@ $RECYCLE.BIN/ Network Trash Folder Temporary Items .apdisk +.jenkins-external diff --git a/Dockerfile b/Dockerfile index 7610c3a7..0ad3c085 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,61 +1,64 @@ -FROM lsiobase/ubuntu:bionic +# syntax=docker/dockerfile:1 + +FROM ghcr.io/linuxserver/unrar:latest AS unrar + +FROM ghcr.io/linuxserver/baseimage-ubuntu:noble # set version label ARG BUILD_DATE ARG VERSION ARG PLEX_RELEASE LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" -LABEL maintainer="sparklyballs, thelamer" +LABEL maintainer="thelamer" #Add needed nvidia environment variables for https://github.com/NVIDIA/nvidia-docker -ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility" +ENV NVIDIA_DRIVER_CAPABILITIES="compute,video,utility,graphics" # global environment settings ENV DEBIAN_FRONTEND="noninteractive" \ -PLEX_DOWNLOAD="https://downloads.plex.tv/plex-media-server-new" \ -PLEX_ARCH="amd64" \ -PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR="/config/Library/Application Support" \ -PLEX_MEDIA_SERVER_HOME="/usr/lib/plexmediaserver" \ -PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS="6" \ -PLEX_MEDIA_SERVER_USER="abc" \ -PLEX_MEDIA_SERVER_INFO_VENDOR="Docker" \ -PLEX_MEDIA_SERVER_INFO_DEVICE="Docker Container (LinuxServer.io)" + PLEX_DOWNLOAD="https://downloads.plex.tv/plex-media-server-new" \ + PLEX_ARCH="amd64" \ + PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR="/config/Library/Application Support" \ + PLEX_MEDIA_SERVER_HOME="/usr/lib/plexmediaserver" \ + PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS="6" \ + PLEX_MEDIA_SERVER_USER="abc" \ + PLEX_MEDIA_SERVER_INFO_VENDOR="Docker" \ + PLEX_MEDIA_SERVER_INFO_DEVICE="Docker Container (LinuxServer.io)" \ + TMPDIR=/run/plex-temp \ + ATTACHED_DEVICES_PERMS="/dev/dri /dev/dvb -type c" RUN \ - echo "**** install runtime packages ****" && \ - apt-get update && \ - apt-get install -y \ - udev \ - unrar \ - wget \ - jq && \ - echo "**** Udevadm hack ****" && \ - mv /sbin/udevadm /sbin/udevadm.bak && \ - echo "exit 0" > /sbin/udevadm && \ - chmod +x /sbin/udevadm && \ - echo "**** install plex ****" && \ - if [ -z ${PLEX_RELEASE+x} ]; then \ - PLEX_RELEASE=$(curl -sX GET 'https://plex.tv/api/downloads/5.json' \ - | jq -r '.computer.Linux.version'); \ - fi && \ - curl -o \ - /tmp/plexmediaserver.deb -L \ - "${PLEX_DOWNLOAD}/${PLEX_RELEASE}/debian/plexmediaserver_${PLEX_RELEASE}_${PLEX_ARCH}.deb" && \ - dpkg -i /tmp/plexmediaserver.deb && \ - mv /sbin/udevadm.bak /sbin/udevadm && \ - echo "**** ensure abc user's home folder is /app ****" && \ - usermod -d /app abc && \ - echo "**** cleanup ****" && \ - apt-get clean && \ - rm -rf \ - /etc/default/plexmediaserver \ - /tmp/* \ - /var/lib/apt/lists/* \ - /var/tmp/* + echo "**** install runtime packages ****" && \ + apt-get update && \ + apt-get install -y \ + udev \ + wget && \ + echo "**** install plex ****" && \ + if [ -z ${PLEX_RELEASE+x} ]; then \ + PLEX_RELEASE=$(curl -sX GET 'https://plex.tv/api/downloads/5.json' \ + | jq -r '.computer.Linux.version'); \ + fi && \ + curl -o \ + /tmp/plexmediaserver.deb -L \ + "${PLEX_DOWNLOAD}/${PLEX_RELEASE}/debian/plexmediaserver_${PLEX_RELEASE}_${PLEX_ARCH}.deb" && \ + dpkg -i /tmp/plexmediaserver.deb && \ + echo "**** ensure abc user's home folder is /app ****" && \ + usermod -d /app abc && \ + printf "Linuxserver.io version: ${VERSION}\nBuild-date: ${BUILD_DATE}" > /build_version && \ + echo "**** cleanup ****" && \ + apt-get clean && \ + rm -rf \ + /etc/default/plexmediaserver \ + /tmp/* \ + /var/lib/apt/lists/* \ + /var/tmp/* # add local files COPY root/ / +# add unrar +COPY --from=unrar /usr/bin/unrar-ubuntu /usr/bin/unrar + # ports and volumes -EXPOSE 32400/tcp 3005/tcp 8324/tcp 32469/tcp 1900/udp 32410/udp 32412/udp 32413/udp 32414/udp -VOLUME /config /transcode +EXPOSE 32400/tcp 1900/udp 5353/udp 8324/tcp 32410/udp 32412/udp 32413/udp 32414/udp 32469/tcp +VOLUME /config diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64 index a9d1305d..50d94551 100644 --- a/Dockerfile.aarch64 +++ b/Dockerfile.aarch64 @@ -1,58 +1,61 @@ -FROM lsiobase/ubuntu:arm64v8-bionic +# syntax=docker/dockerfile:1 + +FROM ghcr.io/linuxserver/unrar:arm64v8-latest AS unrar + +FROM ghcr.io/linuxserver/baseimage-ubuntu:arm64v8-noble # set version label ARG BUILD_DATE ARG VERSION ARG PLEX_RELEASE LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" -LABEL maintainer="sparklyballs, thelamer" +LABEL maintainer="thelamer" # global environment settings ENV DEBIAN_FRONTEND="noninteractive" \ -PLEX_DOWNLOAD="https://downloads.plex.tv/plex-media-server-new" \ -PLEX_ARCH="arm64" \ -PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR="/config/Library/Application Support" \ -PLEX_MEDIA_SERVER_HOME="/usr/lib/plexmediaserver" \ -PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS="6" \ -PLEX_MEDIA_SERVER_USER="abc" \ -PLEX_MEDIA_SERVER_INFO_VENDOR="Docker" \ -PLEX_MEDIA_SERVER_INFO_DEVICE="Docker Container (LinuxServer.io)" + PLEX_DOWNLOAD="https://downloads.plex.tv/plex-media-server-new" \ + PLEX_ARCH="arm64" \ + PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR="/config/Library/Application Support" \ + PLEX_MEDIA_SERVER_HOME="/usr/lib/plexmediaserver" \ + PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS="6" \ + PLEX_MEDIA_SERVER_USER="abc" \ + PLEX_MEDIA_SERVER_INFO_VENDOR="Docker" \ + PLEX_MEDIA_SERVER_INFO_DEVICE="Docker Container (LinuxServer.io)" \ + TMPDIR=/run/plex-temp \ + ATTACHED_DEVICES_PERMS="/dev/dri /dev/dvb -type c" RUN \ - echo "**** install runtime packages ****" && \ - apt-get update && \ - apt-get install -y \ - udev \ - unrar \ - wget \ - jq && \ - echo "**** Udevadm hack ****" && \ - mv /sbin/udevadm /sbin/udevadm.bak && \ - echo "exit 0" > /sbin/udevadm && \ - chmod +x /sbin/udevadm && \ - echo "**** install plex ****" && \ - if [ -z ${PLEX_RELEASE+x} ]; then \ - PLEX_RELEASE=$(curl -sX GET 'https://plex.tv/api/downloads/5.json' \ - | jq -r '.computer.Linux.version'); \ - fi && \ - curl -o \ - /tmp/plexmediaserver.deb -L \ - "${PLEX_DOWNLOAD}/${PLEX_RELEASE}/debian/plexmediaserver_${PLEX_RELEASE}_${PLEX_ARCH}.deb" && \ - dpkg -i /tmp/plexmediaserver.deb && \ - mv /sbin/udevadm.bak /sbin/udevadm && \ - echo "**** ensure abc user's home folder is /app ****" && \ - usermod -d /app abc && \ - echo "**** cleanup ****" && \ - apt-get clean && \ - rm -rf \ - /etc/default/plexmediaserver \ - /tmp/* \ - /var/lib/apt/lists/* \ - /var/tmp/* + echo "**** install runtime packages ****" && \ + apt-get update && \ + apt-get install -y \ + udev \ + wget && \ + echo "**** install plex ****" && \ + if [ -z ${PLEX_RELEASE+x} ]; then \ + PLEX_RELEASE=$(curl -sX GET 'https://plex.tv/api/downloads/5.json' \ + | jq -r '.computer.Linux.version'); \ + fi && \ + curl -o \ + /tmp/plexmediaserver.deb -L \ + "${PLEX_DOWNLOAD}/${PLEX_RELEASE}/debian/plexmediaserver_${PLEX_RELEASE}_${PLEX_ARCH}.deb" && \ + dpkg -i /tmp/plexmediaserver.deb && \ + echo "**** ensure abc user's home folder is /app ****" && \ + usermod -d /app abc && \ + printf "Linuxserver.io version: ${VERSION}\nBuild-date: ${BUILD_DATE}" > /build_version && \ + echo "**** cleanup ****" && \ + apt-get clean && \ + rm -rf \ + /etc/default/plexmediaserver \ + /tmp/* \ + /var/lib/apt/lists/* \ + /var/tmp/* # add local files COPY root/ / +# add unrar +COPY --from=unrar /usr/bin/unrar-ubuntu /usr/bin/unrar + # ports and volumes -EXPOSE 32400/tcp 3005/tcp 8324/tcp 32469/tcp 1900/udp 32410/udp 32412/udp 32413/udp 32414/udp -VOLUME /config /transcode +EXPOSE 32400/tcp 1900/udp 5353/udp 8324/tcp 32410/udp 32412/udp 32413/udp 32414/udp 32469/tcp +VOLUME /config diff --git a/Dockerfile.armhf b/Dockerfile.armhf deleted file mode 100644 index eae127e3..00000000 --- a/Dockerfile.armhf +++ /dev/null @@ -1,58 +0,0 @@ -FROM lsiobase/ubuntu:arm32v7-bionic - -# set version label -ARG BUILD_DATE -ARG VERSION -ARG PLEX_RELEASE -LABEL build_version="Linuxserver.io version:- ${VERSION} Build-date:- ${BUILD_DATE}" -LABEL maintainer="sparklyballs, thelamer" - -# global environment settings -ENV DEBIAN_FRONTEND="noninteractive" \ -PLEX_DOWNLOAD="https://downloads.plex.tv/plex-media-server-new" \ -PLEX_ARCH="armhf" \ -PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR="/config/Library/Application Support" \ -PLEX_MEDIA_SERVER_HOME="/usr/lib/plexmediaserver" \ -PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS="6" \ -PLEX_MEDIA_SERVER_USER="abc" \ -PLEX_MEDIA_SERVER_INFO_VENDOR="Docker" \ -PLEX_MEDIA_SERVER_INFO_DEVICE="Docker Container (LinuxServer.io)" - -RUN \ - echo "**** install runtime packages ****" && \ - apt-get update && \ - apt-get install -y \ - udev \ - unrar \ - wget \ - jq && \ - echo "**** Udevadm hack ****" && \ - mv /sbin/udevadm /sbin/udevadm.bak && \ - echo "exit 0" > /sbin/udevadm && \ - chmod +x /sbin/udevadm && \ - echo "**** install plex ****" && \ - if [ -z ${PLEX_RELEASE+x} ]; then \ - PLEX_RELEASE=$(curl -sX GET 'https://plex.tv/api/downloads/5.json' \ - | jq -r '.computer.Linux.version'); \ - fi && \ - curl -o \ - /tmp/plexmediaserver.deb -L \ - "${PLEX_DOWNLOAD}/${PLEX_RELEASE}/debian/plexmediaserver_${PLEX_RELEASE}_${PLEX_ARCH}.deb" && \ - dpkg -i /tmp/plexmediaserver.deb && \ - mv /sbin/udevadm.bak /sbin/udevadm && \ - echo "**** ensure abc user's home folder is /app ****" && \ - usermod -d /app abc && \ - echo "**** cleanup ****" && \ - apt-get clean && \ - rm -rf \ - /etc/default/plexmediaserver \ - /tmp/* \ - /var/lib/apt/lists/* \ - /var/tmp/* - -# add local files -COPY root/ / - -# ports and volumes -EXPOSE 32400/tcp 3005/tcp 8324/tcp 32469/tcp 1900/udp 32410/udp 32412/udp 32413/udp 32414/udp -VOLUME /config /transcode diff --git a/Jenkinsfile b/Jenkinsfile index 5ea5ba75..338b91fa 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -2,14 +2,23 @@ pipeline { agent { label 'X86-64-MULTI' } + options { + buildDiscarder(logRotator(numToKeepStr: '10', daysToKeepStr: '60')) + parallelsAlwaysFailFast() + } // Input to determine if this is a package check parameters { - string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK') + string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK') } // Configuration for the variables used for this specific repo environment { BUILDS_DISCORD=credentials('build_webhook_url') GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab') + GITLAB_TOKEN=credentials('b6f0f1dd-6952-4cf6-95d1-9c06380283f0') + GITLAB_NAMESPACE=credentials('gitlab-namespace-id') + DOCKERHUB_TOKEN=credentials('docker-hub-ci-pat') + QUAYIO_API_TOKEN=credentials('quayio-repo-api-token') + GIT_SIGNING_KEY=credentials('484fbca6-9a4f-455e-b9e3-97ac98785f5f') JSON_URL = 'https://plex.tv/api/downloads/5.json' JSON_PATH = '.computer.Linux.version' BUILD_VERSION_ARG = 'PLEX_RELEASE' @@ -26,18 +35,50 @@ pipeline { CI_PORT='32400' CI_SSL='false' CI_DELAY='120' - CI_DOCKERENV='TZ=US/Pacific' - CI_AUTH='user:password' + CI_DOCKERENV='' + CI_AUTH='' CI_WEBPATH='/web/index.html' } stages { + stage("Set git config"){ + steps{ + sh '''#!/bin/bash + cat ${GIT_SIGNING_KEY} > /config/.ssh/id_sign + chmod 600 /config/.ssh/id_sign + ssh-keygen -y -f /config/.ssh/id_sign > /config/.ssh/id_sign.pub + echo "Using $(ssh-keygen -lf /config/.ssh/id_sign) to sign commits" + git config --global gpg.format ssh + git config --global user.signingkey /config/.ssh/id_sign + git config --global commit.gpgsign true + ''' + } + } // Setup all the basic environment variables needed for the build stage("Set ENV Variables base"){ steps{ + echo "Running on node: ${NODE_NAME}" + sh '''#! /bin/bash + echo "Pruning builder" + docker builder prune -f --builder container || : + containers=$(docker ps -q) + if [[ -n "${containers}" ]]; then + BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') + for container in ${containers}; do + if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then + echo "skipping buildx container in docker stop" + else + echo "Stopping container ${container}" + docker stop ${container} + fi + done + fi + docker system prune -f --volumes || : + docker image prune -af || : + ''' script{ env.EXIT_STATUS = '' env.LS_RELEASE = sh( - script: '''docker run --rm alexeiled/skopeo sh -c 'skopeo inspect docker://docker.io/'${DOCKERHUB_IMAGE}':latest 2>/dev/null' | jq -r '.Labels.build_version' | awk '{print $3}' | grep '\\-ls' || : ''', + script: '''docker run --rm quay.io/skopeo/stable:v1 inspect docker://ghcr.io/${LS_USER}/${CONTAINER_NAME}:latest 2>/dev/null | jq -r '.Labels.build_version' | awk '{print $3}' | grep '\\-ls' || : ''', returnStdout: true).trim() env.LS_RELEASE_NOTES = sh( script: '''cat readme-vars.yml | awk -F \\" '/date: "[0-9][0-9].[0-9][0-9].[0-9][0-9]:/ {print $4;exit;}' | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''', @@ -48,26 +89,20 @@ pipeline { env.COMMIT_SHA = sh( script: '''git rev-parse HEAD''', returnStdout: true).trim() + env.GH_DEFAULT_BRANCH = sh( + script: '''git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||' ''', + returnStdout: true).trim() env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/' env.PULL_REQUEST = env.CHANGE_ID - env.LICENSE_TAG = sh( - script: '''#!/bin/bash - if [ -e LICENSE ] ; then - cat LICENSE | md5sum | cut -c1-8 - else - echo none - fi''', - returnStdout: true).trim() - env.FUNDING_TAG = sh( - script: '''#!/bin/bash - if [ -e ./.github/FUNDING.yml ] ; then - cat ./.github/FUNDING.yml | md5sum | cut -c1-8 - else - echo none - fi''', - returnStdout: true).trim() + env.TEMPLATED_FILES = 'Jenkinsfile README.md LICENSE .editorconfig ./.github/CONTRIBUTING.md ./.github/FUNDING.yml ./.github/ISSUE_TEMPLATE/config.yml ./.github/ISSUE_TEMPLATE/issue.bug.yml ./.github/ISSUE_TEMPLATE/issue.feature.yml ./.github/PULL_REQUEST_TEMPLATE.md ./.github/workflows/external_trigger_scheduler.yml ./.github/workflows/greetings.yml ./.github/workflows/package_trigger_scheduler.yml ./.github/workflows/call_issue_pr_tracker.yml ./.github/workflows/call_issues_cron.yml ./.github/workflows/permissions.yml ./.github/workflows/external_trigger.yml' + if ( env.SYFT_IMAGE_TAG == null ) { + env.SYFT_IMAGE_TAG = 'latest' + } } + echo "Using syft image tag ${SYFT_IMAGE_TAG}" + sh '''#! /bin/bash + echo "The default github branch detected as ${GH_DEFAULT_BRANCH}" ''' script{ env.LS_RELEASE_NUMBER = sh( script: '''echo ${LS_RELEASE} |sed 's/^.*-ls//g' ''', @@ -125,8 +160,32 @@ pipeline { steps{ script{ env.EXT_RELEASE_CLEAN = sh( - script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g' ''', + script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/ ]//g' ''', returnStdout: true).trim() + + def semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)\.(\d+)/ + if (semver.find()) { + env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}" + } else { + semver = env.EXT_RELEASE_CLEAN =~ /(\d+)\.(\d+)(?:\.(\d+))?(.*)/ + if (semver.find()) { + if (semver[0][3]) { + env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${semver[0][3]}" + } else if (!semver[0][3] && !semver[0][4]) { + env.SEMVER = "${semver[0][1]}.${semver[0][2]}.${(new Date()).format('YYYYMMdd')}" + } + } + } + + if (env.SEMVER != null) { + if (BRANCH_NAME != "${env.GH_DEFAULT_BRANCH}") { + env.SEMVER = "${env.SEMVER}-${BRANCH_NAME}" + } + println("SEMVER: ${env.SEMVER}") + } else { + println("No SEMVER detected") + } + } } } @@ -139,12 +198,19 @@ pipeline { steps { script{ env.IMAGE = env.DOCKERHUB_IMAGE + env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/' + env.CONTAINER_NAME + env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/' + env.CONTAINER_NAME + env.QUAYIMAGE = 'quay.io/linuxserver.io/' + env.CONTAINER_NAME if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER } else { env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER } + env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN + env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' + env.CITEST_IMAGETAG = 'latest' } } } @@ -157,13 +223,20 @@ pipeline { steps { script{ env.IMAGE = env.DEV_DOCKERHUB_IMAGE + env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lsiodev-' + env.CONTAINER_NAME + env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lsiodev-' + env.CONTAINER_NAME + env.QUAYIMAGE = 'quay.io/linuxserver.io/lsiodev-' + env.CONTAINER_NAME if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA } else { env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA } + env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/' + env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' + env.CITEST_IMAGETAG = 'develop' } } } @@ -175,14 +248,21 @@ pipeline { steps { script{ env.IMAGE = env.PR_DOCKERHUB_IMAGE + env.GITHUBIMAGE = 'ghcr.io/' + env.LS_USER + '/lspipepr-' + env.CONTAINER_NAME + env.GITLABIMAGE = 'registry.gitlab.com/linuxserver.io/' + env.LS_REPO + '/lspipepr-' + env.CONTAINER_NAME + env.QUAYIMAGE = 'quay.io/linuxserver.io/lspipepr-' + env.CONTAINER_NAME if (env.MULTIARCH == 'true') { - env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm32v7-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST } else { - env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST } - env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + env.VERSION_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST + env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '-pr-' + env.PULL_REQUEST + env.EXT_RELEASE_TAG = 'version-' + env.EXT_RELEASE_CLEAN env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/' + env.BUILDCACHE = 'docker.io/lsiodev/buildcache,registry.gitlab.com/linuxserver.io/docker-jenkins-builder/lsiodev-buildcache,ghcr.io/linuxserver/lsiodev-buildcache,quay.io/linuxserver.io/lsiodev-buildcache' + env.CITEST_IMAGETAG = 'develop' } } } @@ -193,25 +273,24 @@ pipeline { } steps { withCredentials([ - string(credentialsId: 'spaces-key', variable: 'DO_KEY'), - string(credentialsId: 'spaces-secret', variable: 'DO_SECRET') + string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), + string(credentialsId: 'ci-tests-s3-secret-access-key', variable: 'S3_SECRET') ]) { script{ - env.SHELLCHECK_URL = 'https://lsio-ci.ams3.digitaloceanspaces.com/' + env.IMAGE + '/' + env.META_TAG + '/shellcheck-result.xml' + env.SHELLCHECK_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/shellcheck-result.xml' } - sh '''curl -sL https://raw.githubusercontent.com/linuxserver/docker-shellcheck/master/checkrun.sh | /bin/bash''' + sh '''curl -sL https://raw.githubusercontent.com/linuxserver/docker-jenkins-builder/master/checkrun.sh | /bin/bash''' sh '''#! /bin/bash - set -e - docker pull lsiodev/spaces-file-upload:latest docker run --rm \ - -e DESTINATION=\"${IMAGE}/${META_TAG}/shellcheck-result.xml\" \ - -e FILE_NAME="shellcheck-result.xml" \ - -e MIMETYPE="text/xml" \ - -v ${WORKSPACE}:/mnt \ - -e SECRET_KEY=\"${DO_SECRET}\" \ - -e ACCESS_KEY=\"${DO_KEY}\" \ - -t lsiodev/spaces-file-upload:latest \ - python /upload.py''' + -v ${WORKSPACE}:/mnt \ + -e AWS_ACCESS_KEY_ID=\"${S3_KEY}\" \ + -e AWS_SECRET_ACCESS_KEY=\"${S3_SECRET}\" \ + ghcr.io/linuxserver/baseimage-alpine:3 s6-envdir -fn -- /var/run/s6/container_environment /bin/bash -c "\ + apk add --no-cache python3 && \ + python3 -m venv /lsiopy && \ + pip install --no-cache-dir -U pip && \ + pip install --no-cache-dir s3cmd && \ + s3cmd put --no-preserve --acl-public -m text/xml /mnt/shellcheck-result.xml s3://ci-tests.linuxserver.io/${IMAGE}/${META_TAG}/shellcheck-result.xml" || :''' } } } @@ -228,38 +307,198 @@ pipeline { sh '''#! /bin/bash set -e TEMPDIR=$(mktemp -d) - docker pull linuxserver/jenkins-builder:latest - docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/jenkins linuxserver/jenkins-builder:latest - docker pull linuxserver/doc-builder:latest - docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/readme linuxserver/doc-builder:latest - if [ "$(md5sum ${TEMPDIR}/${LS_REPO}/Jenkinsfile | awk '{ print $1 }')" != "$(md5sum Jenkinsfile | awk '{ print $1 }')" ] || \ - [ "$(md5sum ${TEMPDIR}/${CONTAINER_NAME}/README.md | awk '{ print $1 }')" != "$(md5sum README.md | awk '{ print $1 }')" ] || \ - [ "$(cat ${TEMPDIR}/${LS_REPO}/LICENSE | md5sum | cut -c1-8)" != "${LICENSE_TAG}" ] || \ - [ "$(cat ${TEMPDIR}/${LS_REPO}/.github/FUNDING.yml | md5sum | cut -c1-8)" != "${FUNDING_TAG}" ]; then + docker pull ghcr.io/linuxserver/jenkins-builder:latest + # Cloned repo paths for templating: + # ${TEMPDIR}/docker-${CONTAINER_NAME}: Cloned branch master of ${LS_USER}/${LS_REPO} for running the jenkins builder on + # ${TEMPDIR}/repo/${LS_REPO}: Cloned branch master of ${LS_USER}/${LS_REPO} for commiting various templated file changes and pushing back to Github + # ${TEMPDIR}/docs/docker-documentation: Cloned docs repo for pushing docs updates to Github + # ${TEMPDIR}/unraid/docker-templates: Cloned docker-templates repo to check for logos + # ${TEMPDIR}/unraid/templates: Cloned templates repo for commiting unraid template changes and pushing back to Github + git clone --branch master --depth 1 https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/docker-${CONTAINER_NAME} + docker run --rm -v ${TEMPDIR}/docker-${CONTAINER_NAME}:/tmp -e LOCAL=true -e PUID=$(id -u) -e PGID=$(id -g) ghcr.io/linuxserver/jenkins-builder:latest + echo "Starting Stage 1 - Jenkinsfile update" + if [[ "$(md5sum Jenkinsfile | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile | awk '{ print $1 }')" ]]; then mkdir -p ${TEMPDIR}/repo git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} - git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git checkout -f master - cp ${TEMPDIR}/${CONTAINER_NAME}/README.md ${TEMPDIR}/repo/${LS_REPO}/ + cd ${TEMPDIR}/repo/${LS_REPO} + git checkout -f master cp ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile ${TEMPDIR}/repo/${LS_REPO}/ - cp ${TEMPDIR}/docker-${CONTAINER_NAME}/LICENSE ${TEMPDIR}/repo/${LS_REPO}/ - mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github - cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.github/FUNDING.yml ${TEMPDIR}/repo/${LS_REPO}/.github/FUNDING.yml - cd ${TEMPDIR}/repo/${LS_REPO}/ - git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git add Jenkinsfile README.md LICENSE ./.github/FUNDING.yml - git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git commit -m 'Bot Updating Templated Files' - git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all + git add Jenkinsfile + git commit -m 'Bot Updating Templated Files' + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master + echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} + echo "Updating Jenkinsfile and exiting build, new one will trigger based on commit" + rm -Rf ${TEMPDIR} + exit 0 + else + echo "Jenkinsfile is up to date." + fi + echo "Starting Stage 2 - Delete old templates" + OLD_TEMPLATES=".github/ISSUE_TEMPLATE.md .github/ISSUE_TEMPLATE/issue.bug.md .github/ISSUE_TEMPLATE/issue.feature.md .github/workflows/call_invalid_helper.yml .github/workflows/stale.yml .github/workflows/package_trigger.yml" + for i in ${OLD_TEMPLATES}; do + if [[ -f "${i}" ]]; then + TEMPLATES_TO_DELETE="${i} ${TEMPLATES_TO_DELETE}" + fi + done + if [[ -n "${TEMPLATES_TO_DELETE}" ]]; then + mkdir -p ${TEMPDIR}/repo + git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} + cd ${TEMPDIR}/repo/${LS_REPO} + git checkout -f master + for i in ${TEMPLATES_TO_DELETE}; do + git rm "${i}" + done + git commit -m 'Bot Updating Templated Files' + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master + echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} + echo "Deleting old/deprecated templates and exiting build, new one will trigger based on commit" + rm -Rf ${TEMPDIR} + exit 0 + else + echo "No templates to delete" + fi + echo "Starting Stage 2.5 - Update init diagram" + if ! grep -q 'init_diagram:' readme-vars.yml; then + echo "Adding the key 'init_diagram' to readme-vars.yml" + sed -i '\\|^#.*changelog.*$|d' readme-vars.yml + sed -i 's|^changelogs:|# init diagram\\ninit_diagram:\\n\\n# changelog\\nchangelogs:|' readme-vars.yml + fi + mkdir -p ${TEMPDIR}/d2 + docker run --rm -v ${TEMPDIR}/d2:/output -e PUID=$(id -u) -e PGID=$(id -g) -e RAW="true" ghcr.io/linuxserver/d2-builder:latest ${CONTAINER_NAME}:latest + ls -al ${TEMPDIR}/d2 + yq -ei ".init_diagram |= load_str(\\"${TEMPDIR}/d2/${CONTAINER_NAME}-latest.d2\\")" readme-vars.yml + if [[ $(md5sum readme-vars.yml | cut -c1-8) != $(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/readme-vars.yml | cut -c1-8) ]]; then + echo "'init_diagram' has been updated. Updating repo and exiting build, new one will trigger based on commit." + mkdir -p ${TEMPDIR}/repo + git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} + cd ${TEMPDIR}/repo/${LS_REPO} + git checkout -f master + cp ${WORKSPACE}/readme-vars.yml ${TEMPDIR}/repo/${LS_REPO}/readme-vars.yml + git add readme-vars.yml + git commit -m 'Bot Updating Templated Files' + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} + echo "Updating templates and exiting build, new one will trigger based on commit" + rm -Rf ${TEMPDIR} + exit 0 else echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} + echo "Init diagram is unchanged" fi - mkdir -p ${TEMPDIR}/gitbook - git clone https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/gitbook/docker-documentation - if [[ "${BRANCH_NAME}" == "master" ]] && [[ (! -f ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md) || ("$(md5sum ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/${CONTAINER_NAME}/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')") ]]; then - cp ${TEMPDIR}/${CONTAINER_NAME}/docker-${CONTAINER_NAME}.md ${TEMPDIR}/gitbook/docker-documentation/images/ - cd ${TEMPDIR}/gitbook/docker-documentation/ - git add images/docker-${CONTAINER_NAME}.md + echo "Starting Stage 3 - Update templates" + CURRENTHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) + cd ${TEMPDIR}/docker-${CONTAINER_NAME} + NEWHASH=$(grep -hs ^ ${TEMPLATED_FILES} | md5sum | cut -c1-8) + if [[ "${CURRENTHASH}" != "${NEWHASH}" ]] || ! grep -q '.jenkins-external' "${WORKSPACE}/.gitignore" 2>/dev/null; then + mkdir -p ${TEMPDIR}/repo + git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO} + cd ${TEMPDIR}/repo/${LS_REPO} + git checkout -f master + cd ${TEMPDIR}/docker-${CONTAINER_NAME} + mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/workflows + mkdir -p ${TEMPDIR}/repo/${LS_REPO}/.github/ISSUE_TEMPLATE + cp --parents ${TEMPLATED_FILES} ${TEMPDIR}/repo/${LS_REPO}/ || : + cp --parents readme-vars.yml ${TEMPDIR}/repo/${LS_REPO}/ || : + cd ${TEMPDIR}/repo/${LS_REPO}/ + if ! grep -q '.jenkins-external' .gitignore 2>/dev/null; then + echo ".jenkins-external" >> .gitignore + git add .gitignore + fi + git add readme-vars.yml ${TEMPLATED_FILES} git commit -m 'Bot Updating Templated Files' - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git --all + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master + echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} + echo "Updating templates and exiting build, new one will trigger based on commit" + rm -Rf ${TEMPDIR} + exit 0 + else + echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER} + echo "No templates to update" + fi + echo "Starting Stage 4 - External repo updates: Docs, Unraid Template and Readme Sync to Docker Hub" + mkdir -p ${TEMPDIR}/docs + git clone --depth=1 https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/docs/docker-documentation + if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]] && [[ (! -f ${TEMPDIR}/docs/docker-documentation/docs/images/docker-${CONTAINER_NAME}.md) || ("$(md5sum ${TEMPDIR}/docs/docker-documentation/docs/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')") ]]; then + cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/docker-${CONTAINER_NAME}.md ${TEMPDIR}/docs/docker-documentation/docs/images/ + cd ${TEMPDIR}/docs/docker-documentation + GH_DOCS_DEFAULT_BRANCH=$(git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||') + git add docs/images/docker-${CONTAINER_NAME}.md + echo "Updating docs repo" + git commit -m 'Bot Updating Documentation' + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} --rebase + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} || \ + (MAXWAIT="10" && echo "Push to docs failed, trying again in ${MAXWAIT} seconds" && \ + sleep $((RANDOM % MAXWAIT)) && \ + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH} --rebase && \ + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git ${GH_DOCS_DEFAULT_BRANCH}) + else + echo "Docs update not needed, skipping" + fi + mkdir -p ${TEMPDIR}/unraid + git clone --depth=1 https://github.com/linuxserver/docker-templates.git ${TEMPDIR}/unraid/docker-templates + git clone --depth=1 https://github.com/linuxserver/templates.git ${TEMPDIR}/unraid/templates + if [[ -f ${TEMPDIR}/unraid/docker-templates/linuxserver.io/img/${CONTAINER_NAME}-logo.png ]]; then + sed -i "s|master/linuxserver.io/img/linuxserver-ls-logo.png|master/linuxserver.io/img/${CONTAINER_NAME}-logo.png|" ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml + elif [[ -f ${TEMPDIR}/unraid/docker-templates/linuxserver.io/img/${CONTAINER_NAME}-icon.png ]]; then + sed -i "s|master/linuxserver.io/img/linuxserver-ls-logo.png|master/linuxserver.io/img/${CONTAINER_NAME}-icon.png|" ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml + fi + if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]] && [[ (! -f ${TEMPDIR}/unraid/templates/unraid/${CONTAINER_NAME}.xml) || ("$(md5sum ${TEMPDIR}/unraid/templates/unraid/${CONTAINER_NAME}.xml | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml | awk '{ print $1 }')") ]]; then + echo "Updating Unraid template" + cd ${TEMPDIR}/unraid/templates/ + GH_TEMPLATES_DEFAULT_BRANCH=$(git remote show origin | grep "HEAD branch:" | sed 's|.*HEAD branch: ||') + if grep -wq "^${CONTAINER_NAME}$" ${TEMPDIR}/unraid/templates/unraid/ignore.list && [[ -f ${TEMPDIR}/unraid/templates/unraid/deprecated/${CONTAINER_NAME}.xml ]]; then + echo "Image is on the ignore list, and already in the deprecation folder." + elif grep -wq "^${CONTAINER_NAME}$" ${TEMPDIR}/unraid/templates/unraid/ignore.list; then + echo "Image is on the ignore list, marking Unraid template as deprecated" + cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml ${TEMPDIR}/unraid/templates/unraid/ + git add -u unraid/${CONTAINER_NAME}.xml + git mv unraid/${CONTAINER_NAME}.xml unraid/deprecated/${CONTAINER_NAME}.xml || : + git commit -m 'Bot Moving Deprecated Unraid Template' || : + else + cp ${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/${CONTAINER_NAME}.xml ${TEMPDIR}/unraid/templates/unraid/ + git add unraid/${CONTAINER_NAME}.xml + git commit -m 'Bot Updating Unraid Template' + fi + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git ${GH_TEMPLATES_DEFAULT_BRANCH} --rebase + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git ${GH_TEMPLATES_DEFAULT_BRANCH} || \ + (MAXWAIT="10" && echo "Push to unraid templates failed, trying again in ${MAXWAIT} seconds" && \ + sleep $((RANDOM % MAXWAIT)) && \ + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git ${GH_TEMPLATES_DEFAULT_BRANCH} --rebase && \ + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/templates.git ${GH_TEMPLATES_DEFAULT_BRANCH}) + else + echo "No updates to Unraid template needed, skipping" + fi + if [[ "${BRANCH_NAME}" == "${GH_DEFAULT_BRANCH}" ]]; then + if [[ $(cat ${TEMPDIR}/docker-${CONTAINER_NAME}/README.md | wc -m) -gt 25000 ]]; then + echo "Readme is longer than 25,000 characters. Syncing the lite version to Docker Hub" + DH_README_SYNC_PATH="${TEMPDIR}/docker-${CONTAINER_NAME}/.jenkins-external/README.lite" + else + echo "Syncing readme to Docker Hub" + DH_README_SYNC_PATH="${TEMPDIR}/docker-${CONTAINER_NAME}/README.md" + fi + if curl -s https://hub.docker.com/v2/namespaces/${DOCKERHUB_IMAGE%%/*}/repositories/${DOCKERHUB_IMAGE##*/}/tags | jq -r '.message' | grep -q 404; then + echo "Docker Hub endpoint doesn't exist. Creating endpoint first." + DH_TOKEN=$(curl -d '{"username":"linuxserverci", "password":"'${DOCKERHUB_TOKEN}'"}' -H "Content-Type: application/json" -X POST https://hub.docker.com/v2/users/login | jq -r '.token') + curl -s \ + -H "Authorization: JWT ${DH_TOKEN}" \ + -H "Content-Type: application/json" \ + -X POST \ + -d '{"name":"'${DOCKERHUB_IMAGE##*/}'", "namespace":"'${DOCKERHUB_IMAGE%%/*}'"}' \ + https://hub.docker.com/v2/repositories/ || : + fi + DH_TOKEN=$(curl -d '{"username":"linuxserverci", "password":"'${DOCKERHUB_TOKEN}'"}' -H "Content-Type: application/json" -X POST https://hub.docker.com/v2/users/login | jq -r '.token') + curl -s \ + -H "Authorization: JWT ${DH_TOKEN}" \ + -H "Content-Type: application/json" \ + -X PATCH \ + -d "{\\"full_description\\":$(jq -Rsa . ${DH_README_SYNC_PATH})}" \ + https://hub.docker.com/v2/repositories/${DOCKERHUB_IMAGE} || : + else + echo "Not the default Github branch. Skipping readme sync to Docker Hub." fi rm -Rf ${TEMPDIR}''' script{ @@ -285,57 +524,185 @@ pipeline { } } } + // If this is a master build check the S6 service file perms + stage("Check S6 Service file Permissions"){ + when { + branch "master" + environment name: 'CHANGE_ID', value: '' + environment name: 'EXIT_STATUS', value: '' + } + steps { + script{ + sh '''#! /bin/bash + WRONG_PERM=$(find ./ -path "./.git" -prune -o \\( -name "run" -o -name "finish" -o -name "check" \\) -not -perm -u=x,g=x,o=x -print) + if [[ -n "${WRONG_PERM}" ]]; then + echo "The following S6 service files are missing the executable bit; canceling the faulty build: ${WRONG_PERM}" + exit 1 + else + echo "S6 service file perms look good." + fi ''' + } + } + } + /* ####################### + GitLab Mirroring and Quay.io Repo Visibility + ####################### */ + // Ping into Gitlab to mirror this repo and have a registry endpoint & mark this repo on Quay.io as public + stage("GitLab Mirror and Quay.io Visibility"){ + when { + environment name: 'EXIT_STATUS', value: '' + } + steps{ + sh '''curl -H "Content-Type: application/json" -H "Private-Token: ${GITLAB_TOKEN}" -X POST https://gitlab.com/api/v4/projects \ + -d '{"namespace_id":'${GITLAB_NAMESPACE}',\ + "name":"'${LS_REPO}'", + "mirror":true,\ + "import_url":"https://github.com/linuxserver/'${LS_REPO}'.git",\ + "issues_access_level":"disabled",\ + "merge_requests_access_level":"disabled",\ + "repository_access_level":"enabled",\ + "visibility":"public"}' ''' + sh '''curl -H "Private-Token: ${GITLAB_TOKEN}" -X PUT "https://gitlab.com/api/v4/projects/Linuxserver.io%2F${LS_REPO}" \ + -d "mirror=true&import_url=https://github.com/linuxserver/${LS_REPO}.git" ''' + sh '''curl -H "Content-Type: application/json" -H "Authorization: Bearer ${QUAYIO_API_TOKEN}" -X POST "https://quay.io/api/v1/repository${QUAYIMAGE/quay.io/}/changevisibility" \ + -d '{"visibility":"public"}' ||: ''' + } + } /* ############### Build Container ############### */ // Build Docker container for push to LS Repo stage('Build-Single') { when { - environment name: 'MULTIARCH', value: 'false' + expression { + env.MULTIARCH == 'false' || params.PACKAGE_CHECK == 'true' + } environment name: 'EXIT_STATUS', value: '' } steps { - sh "docker build --no-cache --pull -t ${IMAGE}:${META_TAG} \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." + echo "Running on node: ${NODE_NAME}" + sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile" + sh "docker buildx build \ + --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ + --label \"org.opencontainers.image.authors=linuxserver.io\" \ + --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-plex/packages\" \ + --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-plex\" \ + --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-plex\" \ + --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ + --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ + --label \"org.opencontainers.image.vendor=linuxserver.io\" \ + --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ + --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ + --label \"org.opencontainers.image.title=Plex\" \ + --label \"org.opencontainers.image.description=[Plex](https://plex.tv) organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices. This container is packaged as a standalone Plex Media Server. Straightforward design and bulk actions mean getting things done faster.\" \ + --no-cache --pull -t ${IMAGE}:${META_TAG} --platform=linux/amd64 \ + --provenance=true --sbom=true --builder=container --load \ + --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." + sh '''#! /bin/bash + set -e + IFS=',' read -ra CACHE <<< "$BUILDCACHE" + for i in "${CACHE[@]}"; do + docker tag ${IMAGE}:${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} + done + ''' + withCredentials([ + [ + $class: 'UsernamePasswordMultiBinding', + credentialsId: 'Quay.io-Robot', + usernameVariable: 'QUAYUSER', + passwordVariable: 'QUAYPASS' + ] + ]) { + retry_backoff(5,5) { + sh '''#! /bin/bash + set -e + echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin + echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin + echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin + echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin + + if [[ "${PACKAGE_CHECK}" != "true" ]]; then + declare -A pids + IFS=',' read -ra CACHE <<< "$BUILDCACHE" + for i in "${CACHE[@]}"; do + docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & + pids[$!]="$i" + done + for p in "${!pids[@]}"; do + wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; } + done + fi + ''' + } + } } } // Build MultiArch Docker containers for push to LS Repo stage('Build-Multi') { when { - environment name: 'MULTIARCH', value: 'true' + allOf { + environment name: 'MULTIARCH', value: 'true' + expression { params.PACKAGE_CHECK == 'false' } + } environment name: 'EXIT_STATUS', value: '' } parallel { stage('Build X86') { steps { - sh "docker build --no-cache --pull -t ${IMAGE}:amd64-${META_TAG} \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - } - } - stage('Build ARMHF') { - agent { - label 'ARMHF' - } - steps { + echo "Running on node: ${NODE_NAME}" + sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile" + sh "docker buildx build \ + --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ + --label \"org.opencontainers.image.authors=linuxserver.io\" \ + --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-plex/packages\" \ + --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-plex\" \ + --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-plex\" \ + --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ + --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ + --label \"org.opencontainers.image.vendor=linuxserver.io\" \ + --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ + --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ + --label \"org.opencontainers.image.title=Plex\" \ + --label \"org.opencontainers.image.description=[Plex](https://plex.tv) organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices. This container is packaged as a standalone Plex Media Server. Straightforward design and bulk actions mean getting things done faster.\" \ + --no-cache --pull -t ${IMAGE}:amd64-${META_TAG} --platform=linux/amd64 \ + --provenance=true --sbom=true --builder=container --load \ + --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." + sh '''#! /bin/bash + set -e + IFS=',' read -ra CACHE <<< "$BUILDCACHE" + for i in "${CACHE[@]}"; do + docker tag ${IMAGE}:amd64-${META_TAG} ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} + done + ''' withCredentials([ [ $class: 'UsernamePasswordMultiBinding', - credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207', - usernameVariable: 'DOCKERUSER', - passwordVariable: 'DOCKERPASS' + credentialsId: 'Quay.io-Robot', + usernameVariable: 'QUAYUSER', + passwordVariable: 'QUAYPASS' ] ]) { - echo 'Logging into DockerHub' - sh '''#! /bin/bash - echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin - ''' - sh "docker build --no-cache --pull -f Dockerfile.armhf -t ${IMAGE}:arm32v7-${META_TAG} \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - sh "docker tag ${IMAGE}:arm32v7-${META_TAG} lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}" - sh "docker push lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER}" - sh '''docker rmi \ - ${IMAGE}:arm32v7-${META_TAG} \ - lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} || :''' + retry_backoff(5,5) { + sh '''#! /bin/bash + set -e + echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin + echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin + echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin + echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin + + if [[ "${PACKAGE_CHECK}" != "true" ]]; then + declare -A pids + IFS=',' read -ra CACHE <<< "$BUILDCACHE" + for i in "${CACHE[@]}"; do + docker push ${i}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} & + pids[$!]="$i" + done + for p in "${!pids[@]}"; do + wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; } + done + fi + ''' + } } } } @@ -344,26 +711,68 @@ pipeline { label 'ARM64' } steps { + echo "Running on node: ${NODE_NAME}" + sh "sed -r -i 's|(^FROM .*)|\\1\\n\\nENV LSIO_FIRST_PARTY=true|g' Dockerfile.aarch64" + sh "docker buildx build \ + --label \"org.opencontainers.image.created=${GITHUB_DATE}\" \ + --label \"org.opencontainers.image.authors=linuxserver.io\" \ + --label \"org.opencontainers.image.url=https://github.com/linuxserver/docker-plex/packages\" \ + --label \"org.opencontainers.image.documentation=https://docs.linuxserver.io/images/docker-plex\" \ + --label \"org.opencontainers.image.source=https://github.com/linuxserver/docker-plex\" \ + --label \"org.opencontainers.image.version=${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}\" \ + --label \"org.opencontainers.image.revision=${COMMIT_SHA}\" \ + --label \"org.opencontainers.image.vendor=linuxserver.io\" \ + --label \"org.opencontainers.image.licenses=GPL-3.0-only\" \ + --label \"org.opencontainers.image.ref.name=${COMMIT_SHA}\" \ + --label \"org.opencontainers.image.title=Plex\" \ + --label \"org.opencontainers.image.description=[Plex](https://plex.tv) organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices. This container is packaged as a standalone Plex Media Server. Straightforward design and bulk actions mean getting things done faster.\" \ + --no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} --platform=linux/arm64 \ + --provenance=true --sbom=true --builder=container --load \ + --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${VERSION_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." + sh '''#! /bin/bash + set -e + IFS=',' read -ra CACHE <<< "$BUILDCACHE" + for i in "${CACHE[@]}"; do + docker tag ${IMAGE}:arm64v8-${META_TAG} ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} + done + ''' withCredentials([ [ $class: 'UsernamePasswordMultiBinding', - credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207', - usernameVariable: 'DOCKERUSER', - passwordVariable: 'DOCKERPASS' + credentialsId: 'Quay.io-Robot', + usernameVariable: 'QUAYUSER', + passwordVariable: 'QUAYPASS' ] ]) { - echo 'Logging into DockerHub' - sh '''#! /bin/bash - echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin - ''' - sh "docker build --no-cache --pull -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} \ - --build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ." - sh "docker tag ${IMAGE}:arm64v8-${META_TAG} lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}" - sh "docker push lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}" - sh '''docker rmi \ - ${IMAGE}:arm64v8-${META_TAG} \ - lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || :''' + retry_backoff(5,5) { + sh '''#! /bin/bash + set -e + echo $DOCKERHUB_TOKEN | docker login -u linuxserverci --password-stdin + echo $GITHUB_TOKEN | docker login ghcr.io -u LinuxServer-CI --password-stdin + echo $GITLAB_TOKEN | docker login registry.gitlab.com -u LinuxServer.io --password-stdin + echo $QUAYPASS | docker login quay.io -u $QUAYUSER --password-stdin + if [[ "${PACKAGE_CHECK}" != "true" ]]; then + declare -A pids + IFS=',' read -ra CACHE <<< "$BUILDCACHE" + for i in "${CACHE[@]}"; do + docker push ${i}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} & + pids[$!]="$i" + done + for p in "${!pids[@]}"; do + wait "$p" || { [[ "${pids[$p]}" != *"quay.io"* ]] && exit 1; } + done + fi + ''' + } } + sh '''#! /bin/bash + containers=$(docker ps -aq) + if [[ -n "${containers}" ]]; then + docker stop ${containers} + fi + docker system prune -f --volumes || : + docker image prune -af || : + ''' } } } @@ -379,22 +788,17 @@ pipeline { sh '''#! /bin/bash set -e TEMPDIR=$(mktemp -d) - if [ "${MULTIARCH}" == "true" ]; then + if [ "${MULTIARCH}" == "true" ] && [ "${PACKAGE_CHECK}" != "true" ]; then LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG} else LOCAL_CONTAINER=${IMAGE}:${META_TAG} fi - if [ "${DIST_IMAGE}" == "alpine" ]; then - docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\ - apk info -v > /tmp/package_versions.txt && \ - sort -o /tmp/package_versions.txt /tmp/package_versions.txt && \ - chmod 777 /tmp/package_versions.txt' - elif [ "${DIST_IMAGE}" == "ubuntu" ]; then - docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\ - apt list -qq --installed | sed "s#/.*now ##g" | cut -d" " -f1 > /tmp/package_versions.txt && \ - sort -o /tmp/package_versions.txt /tmp/package_versions.txt && \ - chmod 777 /tmp/package_versions.txt' - fi + touch ${TEMPDIR}/package_versions.txt + docker run --rm \ + -v /var/run/docker.sock:/var/run/docker.sock:ro \ + -v ${TEMPDIR}:/tmp \ + ghcr.io/anchore/syft:${SYFT_IMAGE_TAG} \ + ${LOCAL_CONTAINER} -o table=/tmp/package_versions.txt NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 ) echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github" if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then @@ -405,7 +809,8 @@ pipeline { wait git add package_versions.txt git commit -m 'Bot Updating Package Versions' - git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all + git pull https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master + git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git master echo "true" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER} echo "Package tag updated, stopping build process" else @@ -462,41 +867,51 @@ pipeline { } steps { withCredentials([ - string(credentialsId: 'spaces-key', variable: 'DO_KEY'), - string(credentialsId: 'spaces-secret', variable: 'DO_SECRET') + string(credentialsId: 'ci-tests-s3-key-id', variable: 'S3_KEY'), + string(credentialsId: 'ci-tests-s3-secret-access-key ', variable: 'S3_SECRET') ]) { script{ - env.CI_URL = 'https://lsio-ci.ams3.digitaloceanspaces.com/' + env.IMAGE + '/' + env.META_TAG + '/index.html' + env.CI_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/index.html' + env.CI_JSON_URL = 'https://ci-tests.linuxserver.io/' + env.IMAGE + '/' + env.META_TAG + '/report.json' } sh '''#! /bin/bash set -e - docker pull lsiodev/ci:latest + if grep -q 'docker-baseimage' <<< "${LS_REPO}"; then + echo "Detected baseimage, setting LSIO_FIRST_PARTY=true" + if [ -n "${CI_DOCKERENV}" ]; then + CI_DOCKERENV="LSIO_FIRST_PARTY=true|${CI_DOCKERENV}" + else + CI_DOCKERENV="LSIO_FIRST_PARTY=true" + fi + fi + docker pull ghcr.io/linuxserver/ci:${CITEST_IMAGETAG} if [ "${MULTIARCH}" == "true" ]; then - docker pull lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} - docker pull lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} - docker tag lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v7-${META_TAG} - docker tag lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} + docker pull ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} --platform=arm64 + docker tag ghcr.io/linuxserver/lsiodev-buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} fi docker run --rm \ --shm-size=1gb \ -v /var/run/docker.sock:/var/run/docker.sock \ -e IMAGE=\"${IMAGE}\" \ - -e DELAY_START=\"${CI_DELAY}\" \ + -e DOCKER_LOGS_TIMEOUT=\"${CI_DELAY}\" \ -e TAGS=\"${CI_TAGS}\" \ -e META_TAG=\"${META_TAG}\" \ + -e RELEASE_TAG=\"latest\" \ -e PORT=\"${CI_PORT}\" \ -e SSL=\"${CI_SSL}\" \ -e BASE=\"${DIST_IMAGE}\" \ - -e SECRET_KEY=\"${DO_SECRET}\" \ - -e ACCESS_KEY=\"${DO_KEY}\" \ + -e SECRET_KEY=\"${S3_SECRET}\" \ + -e ACCESS_KEY=\"${S3_KEY}\" \ -e DOCKER_ENV=\"${CI_DOCKERENV}\" \ -e WEB_SCREENSHOT=\"${CI_WEB}\" \ -e WEB_AUTH=\"${CI_AUTH}\" \ -e WEB_PATH=\"${CI_WEBPATH}\" \ - -e DO_REGION="ams3" \ - -e DO_BUCKET="lsio-ci" \ - -t lsiodev/ci:latest \ - python /ci/ci.py''' + -e NODE_NAME=\"${NODE_NAME}\" \ + -e SYFT_IMAGE_TAG=\"${CI_SYFT_IMAGE_TAG:-${SYFT_IMAGE_TAG}}\" \ + -e COMMIT_SHA=\"${COMMIT_SHA}\" \ + -e BUILD_NUMBER=\"${BUILD_NUMBER}\" \ + -t ghcr.io/linuxserver/ci:${CITEST_IMAGETAG} \ + python3 test_build.py''' } } } @@ -510,25 +925,25 @@ pipeline { environment name: 'EXIT_STATUS', value: '' } steps { - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207', - usernameVariable: 'DOCKERUSER', - passwordVariable: 'DOCKERPASS' - ] - ]) { - echo 'Logging into DockerHub' + retry_backoff(5,5) { sh '''#! /bin/bash - echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin - ''' - sh "docker tag ${IMAGE}:${META_TAG} ${IMAGE}:latest" - sh "docker push ${IMAGE}:latest" - sh "docker push ${IMAGE}:${META_TAG}" - sh '''docker rmi \ - ${IMAGE}:${META_TAG} \ - ${IMAGE}:latest || :''' - + set -e + for PUSHIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do + [[ ${PUSHIMAGE%%/*} =~ \\. ]] && PUSHIMAGEPLUS="${PUSHIMAGE}" || PUSHIMAGEPLUS="docker.io/${PUSHIMAGE}" + IFS=',' read -ra CACHE <<< "$BUILDCACHE" + for i in "${CACHE[@]}"; do + if [[ "${PUSHIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then + CACHEIMAGE=${i} + fi + done + docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${META_TAG} -t ${PUSHIMAGE}:latest -t ${PUSHIMAGE}:${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ + { if [[ "${PUSHIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + if [ -n "${SEMVER}" ]; then + docker buildx imagetools create --prefer-index=false -t ${PUSHIMAGE}:${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ + { if [[ "${PUSHIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + fi + done + ''' } } } @@ -539,52 +954,41 @@ pipeline { environment name: 'EXIT_STATUS', value: '' } steps { - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207', - usernameVariable: 'DOCKERUSER', - passwordVariable: 'DOCKERPASS' - ] - ]) { - sh '''#! /bin/bash - echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin - ''' + retry_backoff(5,5) { sh '''#! /bin/bash - if [ "${CI}" == "false" ]; then - docker pull lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} - docker pull lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} - docker tag lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v7-${META_TAG} - docker tag lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG} - fi''' - sh "docker tag ${IMAGE}:amd64-${META_TAG} ${IMAGE}:amd64-latest" - sh "docker tag ${IMAGE}:arm32v7-${META_TAG} ${IMAGE}:arm32v7-latest" - sh "docker tag ${IMAGE}:arm64v8-${META_TAG} ${IMAGE}:arm64v8-latest" - sh "docker push ${IMAGE}:amd64-${META_TAG}" - sh "docker push ${IMAGE}:arm32v7-${META_TAG}" - sh "docker push ${IMAGE}:arm64v8-${META_TAG}" - sh "docker push ${IMAGE}:amd64-latest" - sh "docker push ${IMAGE}:arm32v7-latest" - sh "docker push ${IMAGE}:arm64v8-latest" - sh "docker manifest push --purge ${IMAGE}:latest || :" - sh "docker manifest create ${IMAGE}:latest ${IMAGE}:amd64-latest ${IMAGE}:arm32v7-latest ${IMAGE}:arm64v8-latest" - sh "docker manifest annotate ${IMAGE}:latest ${IMAGE}:arm32v7-latest --os linux --arch arm" - sh "docker manifest annotate ${IMAGE}:latest ${IMAGE}:arm64v8-latest --os linux --arch arm64 --variant v8" - sh "docker manifest push --purge ${IMAGE}:${META_TAG} || :" - sh "docker manifest create ${IMAGE}:${META_TAG} ${IMAGE}:amd64-${META_TAG} ${IMAGE}:arm32v7-${META_TAG} ${IMAGE}:arm64v8-${META_TAG}" - sh "docker manifest annotate ${IMAGE}:${META_TAG} ${IMAGE}:arm32v7-${META_TAG} --os linux --arch arm" - sh "docker manifest annotate ${IMAGE}:${META_TAG} ${IMAGE}:arm64v8-${META_TAG} --os linux --arch arm64 --variant v8" - sh "docker manifest push --purge ${IMAGE}:latest" - sh "docker manifest push --purge ${IMAGE}:${META_TAG}" - sh '''docker rmi \ - ${IMAGE}:amd64-${META_TAG} \ - ${IMAGE}:amd64-latest \ - ${IMAGE}:arm32v7-${META_TAG} \ - ${IMAGE}:arm32v7-latest \ - ${IMAGE}:arm64v8-${META_TAG} \ - ${IMAGE}:arm64v8-latest \ - lsiodev/buildcache:arm32v7-${COMMIT_SHA}-${BUILD_NUMBER} \ - lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || :''' + set -e + for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do + [[ ${MANIFESTIMAGE%%/*} =~ \\. ]] && MANIFESTIMAGEPLUS="${MANIFESTIMAGE}" || MANIFESTIMAGEPLUS="docker.io/${MANIFESTIMAGE}" + IFS=',' read -ra CACHE <<< "$BUILDCACHE" + for i in "${CACHE[@]}"; do + if [[ "${MANIFESTIMAGEPLUS}" == "$(cut -d "/" -f1 <<< ${i})"* ]]; then + CACHEIMAGE=${i} + fi + done + docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${META_TAG} -t ${MANIFESTIMAGE}:amd64-latest -t ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ + { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${META_TAG} -t ${MANIFESTIMAGE}:arm64v8-latest -t ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || \ + { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + if [ -n "${SEMVER}" ]; then + docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:amd64-${SEMVER} ${CACHEIMAGE}:amd64-${COMMIT_SHA}-${BUILD_NUMBER} || \ + { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + docker buildx imagetools create --prefer-index=false -t ${MANIFESTIMAGE}:arm64v8-${SEMVER} ${CACHEIMAGE}:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} || \ + { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + fi + done + for MANIFESTIMAGE in "${IMAGE}" "${GITLABIMAGE}" "${GITHUBIMAGE}" "${QUAYIMAGE}"; do + docker buildx imagetools create -t ${MANIFESTIMAGE}:latest ${MANIFESTIMAGE}:amd64-latest ${MANIFESTIMAGE}:arm64v8-latest || \ + { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + docker buildx imagetools create -t ${MANIFESTIMAGE}:${META_TAG} ${MANIFESTIMAGE}:amd64-${META_TAG} ${MANIFESTIMAGE}:arm64v8-${META_TAG} || \ + { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + docker buildx imagetools create -t ${MANIFESTIMAGE}:${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:amd64-${EXT_RELEASE_TAG} ${MANIFESTIMAGE}:arm64v8-${EXT_RELEASE_TAG} || \ + { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + if [ -n "${SEMVER}" ]; then + docker buildx imagetools create -t ${MANIFESTIMAGE}:${SEMVER} ${MANIFESTIMAGE}:amd64-${SEMVER} ${MANIFESTIMAGE}:arm64v8-${SEMVER} || \ + { if [[ "${MANIFESTIMAGE}" != "${QUAYIMAGE}" ]]; then exit 1; fi; } + fi + done + ''' } } } @@ -599,62 +1003,154 @@ pipeline { environment name: 'EXIT_STATUS', value: '' } steps { - echo "Pushing New tag for current commit ${EXT_RELEASE_CLEAN}-ls${LS_TAG_NUMBER}" - sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \ - -d '{"tag":"'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\ - "object": "'${COMMIT_SHA}'",\ - "message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\ - "type": "commit",\ - "tagger": {"name": "LinuxServer Jenkins","email": "jenkins@linuxserver.io","date": "'${GITHUB_DATE}'"}}' ''' - echo "Pushing New release for Tag" sh '''#! /bin/bash + echo "Auto-generating release notes" + if [ "$(git tag --points-at HEAD)" != "" ]; then + echo "Existing tag points to current commit, suggesting no new LS changes" + AUTO_RELEASE_NOTES="No changes" + else + AUTO_RELEASE_NOTES=$(curl -fsL -H "Authorization: token ${GITHUB_TOKEN}" -H "Accept: application/vnd.github+json" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases/generate-notes \ + -d '{"tag_name":"'${META_TAG}'",\ + "target_commitish": "master"}' \ + | jq -r '.body' | sed 's|## What.s Changed||') + fi + echo "Pushing New tag for current commit ${META_TAG}" + curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \ + -d '{"tag":"'${META_TAG}'",\ + "object": "'${COMMIT_SHA}'",\ + "message": "Tagging Release '${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}' to master",\ + "type": "commit",\ + "tagger": {"name": "LinuxServer-CI","email": "ci@linuxserver.io","date": "'${GITHUB_DATE}'"}}' + echo "Pushing New release for Tag" echo "Data change at JSON endpoint ${JSON_URL}" > releasebody.json - echo '{"tag_name":"'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\ - "target_commitish": "master",\ - "name": "'${EXT_RELEASE_CLEAN}'-ls'${LS_TAG_NUMBER}'",\ - "body": "**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n**Remote Changes:**\\n\\n' > start - printf '","draft": false,"prerelease": false}' >> releasebody.json - paste -d'\\0' start releasebody.json > releasebody.json.done - curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done''' + jq -n \ + --arg tag_name "$META_TAG" \ + --arg target_commitish "master" \ + --arg ci_url "${CI_URL:-N/A}" \ + --arg ls_notes "$AUTO_RELEASE_NOTES" \ + --arg remote_notes "$(cat releasebody.json)" \ + '{ + "tag_name": $tag_name, + "target_commitish": $target_commitish, + "name": $tag_name, + "body": ("**CI Report:**\\n\\n" + $ci_url + "\\n\\n**LinuxServer Changes:**\\n\\n" + $ls_notes + "\\n\\n**Remote Changes:**\\n\\n" + $remote_notes), + "draft": false, + "prerelease": false }' > releasebody.json.done + curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done + ''' } } - // Use helper container to sync the current README on master to the dockerhub endpoint - stage('Sync-README') { + // Add protection to the release branch + stage('Github-Release-Branch-Protection') { when { + branch "master" environment name: 'CHANGE_ID', value: '' environment name: 'EXIT_STATUS', value: '' } steps { - withCredentials([ - [ - $class: 'UsernamePasswordMultiBinding', - credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207', - usernameVariable: 'DOCKERUSER', - passwordVariable: 'DOCKERPASS' - ] - ]) { - sh '''#! /bin/bash - docker pull lsiodev/readme-sync - docker run --rm=true \ - -e DOCKERHUB_USERNAME=$DOCKERUSER \ - -e DOCKERHUB_PASSWORD=$DOCKERPASS \ - -e GIT_REPOSITORY=${LS_USER}/${LS_REPO} \ - -e DOCKER_REPOSITORY=${IMAGE} \ - -e GIT_BRANCH=master \ - lsiodev/readme-sync bash -c 'node sync' ''' - } + echo "Setting up protection for release branch master" + sh '''#! /bin/bash + curl -H "Authorization: token ${GITHUB_TOKEN}" -X PUT https://api.github.com/repos/${LS_USER}/${LS_REPO}/branches/master/protection \ + -d $(jq -c . << EOF + { + "required_status_checks": null, + "enforce_admins": false, + "required_pull_request_reviews": { + "dismiss_stale_reviews": false, + "require_code_owner_reviews": false, + "require_last_push_approval": false, + "required_approving_review_count": 1 + }, + "restrictions": null, + "required_linear_history": false, + "allow_force_pushes": false, + "allow_deletions": false, + "block_creations": false, + "required_conversation_resolution": true, + "lock_branch": false, + "allow_fork_syncing": false, + "required_signatures": false + } +EOF + ) ''' } } // If this is a Pull request send the CI link as a comment on it stage('Pull Request Comment') { when { not {environment name: 'CHANGE_ID', value: ''} - environment name: 'CI', value: 'true' environment name: 'EXIT_STATUS', value: '' } steps { - sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/issues/${PULL_REQUEST}/comments \ - -d '{"body": "I am a bot, here are the test results for this PR: \\n'${CI_URL}' \\n'${SHELLCHECK_URL}'"}' ''' + sh '''#! /bin/bash + # Function to retrieve JSON data from URL + get_json() { + local url="$1" + local response=$(curl -s "$url") + if [ $? -ne 0 ]; then + echo "Failed to retrieve JSON data from $url" + return 1 + fi + local json=$(echo "$response" | jq .) + if [ $? -ne 0 ]; then + echo "Failed to parse JSON data from $url" + return 1 + fi + echo "$json" + } + + build_table() { + local data="$1" + + # Get the keys in the JSON data + local keys=$(echo "$data" | jq -r 'to_entries | map(.key) | .[]') + + # Check if keys are empty + if [ -z "$keys" ]; then + echo "JSON report data does not contain any keys or the report does not exist." + return 1 + fi + + # Build table header + local header="| Tag | Passed |\\n| --- | --- |\\n" + + # Loop through the JSON data to build the table rows + local rows="" + for build in $keys; do + local status=$(echo "$data" | jq -r ".[\\"$build\\"].test_success") + if [ "$status" = "true" ]; then + status="✅" + else + status="❌" + fi + local row="| "$build" | "$status" |\\n" + rows="${rows}${row}" + done + + local table="${header}${rows}" + local escaped_table=$(echo "$table" | sed 's/\"/\\\\"/g') + echo "$escaped_table" + } + + if [[ "${CI}" = "true" ]]; then + # Retrieve JSON data from URL + data=$(get_json "$CI_JSON_URL") + # Create table from JSON data + table=$(build_table "$data") + echo -e "$table" + + curl -X POST -H "Authorization: token $GITHUB_TOKEN" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/$LS_USER/$LS_REPO/issues/$PULL_REQUEST/comments" \ + -d "{\\"body\\": \\"I am a bot, here are the test results for this PR: \\n${CI_URL}\\n${SHELLCHECK_URL}\\n${table}\\"}" + else + curl -X POST -H "Authorization: token $GITHUB_TOKEN" \ + -H "Accept: application/vnd.github.v3+json" \ + "https://api.github.com/repos/$LS_USER/$LS_REPO/issues/$PULL_REQUEST/comments" \ + -d "{\\"body\\": \\"I am a bot, here is the pushed image/manifest for this PR: \\n\\n\\`${GITHUBIMAGE}:${META_TAG}\\`\\"}" + fi + ''' + } } } @@ -663,21 +1159,94 @@ pipeline { ###################### */ post { always { + sh '''#!/bin/bash + rm -rf /config/.ssh/id_sign + rm -rf /config/.ssh/id_sign.pub + git config --global --unset gpg.format + git config --global --unset user.signingkey + git config --global --unset commit.gpgsign + ''' script{ + env.JOB_DATE = sh( + script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''', + returnStdout: true).trim() if (env.EXIT_STATUS == "ABORTED"){ sh 'echo "build aborted"' - } - else if (currentBuild.currentResult == "SUCCESS"){ - sh ''' curl -X POST --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 1681177,\ - "description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** Success\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\ - "username": "Jenkins"}' ${BUILDS_DISCORD} ''' - } - else { - sh ''' curl -X POST --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 16711680,\ - "description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** failure\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\ + }else{ + if (currentBuild.currentResult == "SUCCESS"){ + if (env.GITHUBIMAGE =~ /lspipepr/){ + env.JOB_WEBHOOK_STATUS='Success' + env.JOB_WEBHOOK_COLOUR=3957028 + env.JOB_WEBHOOK_FOOTER='PR Build' + }else if (env.GITHUBIMAGE =~ /lsiodev/){ + env.JOB_WEBHOOK_STATUS='Success' + env.JOB_WEBHOOK_COLOUR=3957028 + env.JOB_WEBHOOK_FOOTER='Dev Build' + }else{ + env.JOB_WEBHOOK_STATUS='Success' + env.JOB_WEBHOOK_COLOUR=1681177 + env.JOB_WEBHOOK_FOOTER='Live Build' + } + }else{ + if (env.GITHUBIMAGE =~ /lspipepr/){ + env.JOB_WEBHOOK_STATUS='Failure' + env.JOB_WEBHOOK_COLOUR=12669523 + env.JOB_WEBHOOK_FOOTER='PR Build' + }else if (env.GITHUBIMAGE =~ /lsiodev/){ + env.JOB_WEBHOOK_STATUS='Failure' + env.JOB_WEBHOOK_COLOUR=12669523 + env.JOB_WEBHOOK_FOOTER='Dev Build' + }else{ + env.JOB_WEBHOOK_STATUS='Failure' + env.JOB_WEBHOOK_COLOUR=16711680 + env.JOB_WEBHOOK_FOOTER='Live Build' + } + } + sh ''' curl -X POST -H "Content-Type: application/json" --data '{"avatar_url": "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/jenkins-avatar.png","embeds": [{"'color'": '${JOB_WEBHOOK_COLOUR}',\ + "footer": {"text" : "'"${JOB_WEBHOOK_FOOTER}"'"},\ + "timestamp": "'${JOB_DATE}'",\ + "description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**ShellCheck Results:** '${SHELLCHECK_URL}'\\n**Status:** '${JOB_WEBHOOK_STATUS}'\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\ "username": "Jenkins"}' ${BUILDS_DISCORD} ''' } } } + cleanup { + sh '''#! /bin/bash + echo "Pruning builder!!" + docker builder prune -f --builder container || : + containers=$(docker ps -q) + if [[ -n "${containers}" ]]; then + BUILDX_CONTAINER_ID=$(docker ps -qf 'name=buildx_buildkit') + for container in ${containers}; do + if [[ "${container}" == "${BUILDX_CONTAINER_ID}" ]]; then + echo "skipping buildx container in docker stop" + else + echo "Stopping container ${container}" + docker stop ${container} + fi + done + fi + docker system prune -f --volumes || : + docker image prune -af || : + ''' + cleanWs() + } + } +} + +def retry_backoff(int max_attempts, int power_base, Closure c) { + int n = 0 + while (n < max_attempts) { + try { + c() + return + } catch (err) { + if ((n + 1) >= max_attempts) { + throw err + } + sleep(power_base ** n) + n++ + } } + return } diff --git a/README.md b/README.md index d27a122d..58314638 100644 --- a/README.md +++ b/README.md @@ -1,251 +1,393 @@ +<!-- DO NOT EDIT THIS FILE MANUALLY --> +<!-- Please read https://github.com/linuxserver/docker-plex/blob/master/.github/CONTRIBUTING.md --> [![linuxserver.io](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/linuxserver_medium.png)](https://linuxserver.io) -[![Blog](https://img.shields.io/static/v1.svg?style=flat-square&color=E68523&label=linuxserver.io&message=Blog)](https://blog.linuxserver.io "all the things you can do with our containers including How-To guides, opinions and much more!") -[![Discord](https://img.shields.io/discord/354974912613449730.svg?style=flat-square&color=E68523&label=Discord&logo=discord&logoColor=FFFFFF)](https://discord.gg/YWrKVTn "realtime support / chat with the community and the team.") -[![Discourse](https://img.shields.io/discourse/https/discourse.linuxserver.io/topics.svg?style=flat-square&color=E68523&logo=discourse&logoColor=FFFFFF)](https://discourse.linuxserver.io "post on our community forum.") -[![Fleet](https://img.shields.io/static/v1.svg?style=flat-square&color=E68523&label=linuxserver.io&message=Fleet)](https://fleet.linuxserver.io "an online web interface which displays all of our maintained images.") -[![Podcast](https://img.shields.io/static/v1.svg?style=flat-square&color=E68523&label=linuxserver.io&message=Podcast)](https://anchor.fm/linuxserverio "on hiatus. Coming back soon (late 2018).") -[![Open Collective](https://img.shields.io/opencollective/all/linuxserver.svg?style=flat-square&color=E68523&label=Open%20Collective%20Supporters)](https://opencollective.com/linuxserver "please consider helping us by either donating or contributing to our budget") +[![Blog](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Blog)](https://blog.linuxserver.io "all the things you can do with our containers including How-To guides, opinions and much more!") +[![Discord](https://img.shields.io/discord/354974912613449730.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Discord&logo=discord)](https://linuxserver.io/discord "realtime support / chat with the community and the team.") +[![Discourse](https://img.shields.io/discourse/https/discourse.linuxserver.io/topics.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=discourse)](https://discourse.linuxserver.io "post on our community forum.") +[![GitHub](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitHub&logo=github)](https://github.com/linuxserver "view the source for all of our repositories.") +[![Open Collective](https://img.shields.io/opencollective/all/linuxserver.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=Supporters&logo=open%20collective)](https://opencollective.com/linuxserver "please consider helping us by either donating or contributing to our budget") -The [LinuxServer.io](https://linuxserver.io) team brings you another container release featuring :- +The [LinuxServer.io](https://linuxserver.io) team brings you another container release featuring: - * regular and timely application updates - * easy user mappings (PGID, PUID) - * custom base image with s6 overlay - * weekly base OS updates with common layers across the entire LinuxServer.io ecosystem to minimise space usage, down time and bandwidth - * regular security updates +* regular and timely application updates +* easy user mappings (PGID, PUID) +* custom base image with s6 overlay +* weekly base OS updates with common layers across the entire LinuxServer.io ecosystem to minimise space usage, down time and bandwidth +* regular security updates Find us at: + * [Blog](https://blog.linuxserver.io) - all the things you can do with our containers including How-To guides, opinions and much more! -* [Discord](https://discord.gg/YWrKVTn) - realtime support / chat with the community and the team. +* [Discord](https://linuxserver.io/discord) - realtime support / chat with the community and the team. * [Discourse](https://discourse.linuxserver.io) - post on our community forum. -* [Fleet](https://fleet.linuxserver.io) - an online web interface which displays all of our maintained images. -* [Podcast](https://anchor.fm/linuxserverio) - on hiatus. Coming back soon (late 2018). +* [GitHub](https://github.com/linuxserver) - view the source for all of our repositories. * [Open Collective](https://opencollective.com/linuxserver) - please consider helping us by either donating or contributing to our budget # [linuxserver/plex](https://github.com/linuxserver/docker-plex) -[![GitHub Release](https://img.shields.io/github/release/linuxserver/docker-plex.svg?style=flat-square&color=E68523)](https://github.com/linuxserver/docker-plex/releases) -[![MicroBadger Layers](https://img.shields.io/microbadger/layers/linuxserver/plex.svg?style=flat-square&color=E68523)](https://microbadger.com/images/linuxserver/plex "Get your own version badge on microbadger.com") -[![MicroBadger Size](https://img.shields.io/microbadger/image-size/linuxserver/plex.svg?style=flat-square&color=E68523)](https://microbadger.com/images/linuxserver/plex "Get your own version badge on microbadger.com") -[![Docker Pulls](https://img.shields.io/docker/pulls/linuxserver/plex.svg?style=flat-square&color=E68523)](https://hub.docker.com/r/linuxserver/plex) -[![Docker Stars](https://img.shields.io/docker/stars/linuxserver/plex.svg?style=flat-square&color=E68523)](https://hub.docker.com/r/linuxserver/plex) -[![Build Status](https://ci.linuxserver.io/view/all/job/Docker-Pipeline-Builders/job/docker-plex/job/master/badge/icon?style=flat-square)](https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-plex/job/master/) -[![](https://lsio-ci.ams3.digitaloceanspaces.com/linuxserver/plex/latest/badge.svg)](https://lsio-ci.ams3.digitaloceanspaces.com/linuxserver/plex/latest/index.html) -[Plex](https://plex.tv) organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices. This container is packaged as a standalone Plex Media Server. has always been a top priority. Straightforward design and bulk actions mean getting things done faster. +[![Scarf.io pulls](https://scarf.sh/installs-badge/linuxserver-ci/linuxserver%2Fplex?color=94398d&label-color=555555&logo-color=ffffff&style=for-the-badge&package-type=docker)](https://scarf.sh) +[![GitHub Stars](https://img.shields.io/github/stars/linuxserver/docker-plex.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=github)](https://github.com/linuxserver/docker-plex) +[![GitHub Release](https://img.shields.io/github/release/linuxserver/docker-plex.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&logo=github)](https://github.com/linuxserver/docker-plex/releases) +[![GitHub Package Repository](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitHub%20Package&logo=github)](https://github.com/linuxserver/docker-plex/packages) +[![GitLab Container Registry](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=GitLab%20Registry&logo=gitlab)](https://gitlab.com/linuxserver.io/docker-plex/container_registry) +[![Quay.io](https://img.shields.io/static/v1.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=linuxserver.io&message=Quay.io)](https://quay.io/repository/linuxserver.io/plex) +[![Docker Pulls](https://img.shields.io/docker/pulls/linuxserver/plex.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=pulls&logo=docker)](https://hub.docker.com/r/linuxserver/plex) +[![Docker Stars](https://img.shields.io/docker/stars/linuxserver/plex.svg?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=stars&logo=docker)](https://hub.docker.com/r/linuxserver/plex) +[![Jenkins Build](https://img.shields.io/jenkins/build?labelColor=555555&logoColor=ffffff&style=for-the-badge&jobUrl=https%3A%2F%2Fci.linuxserver.io%2Fjob%2FDocker-Pipeline-Builders%2Fjob%2Fdocker-plex%2Fjob%2Fmaster%2F&logo=jenkins)](https://ci.linuxserver.io/job/Docker-Pipeline-Builders/job/docker-plex/job/master/) +[![LSIO CI](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=CI&query=CI&url=https%3A%2F%2Fci-tests.linuxserver.io%2Flinuxserver%2Fplex%2Flatest%2Fci-status.yml)](https://ci-tests.linuxserver.io/linuxserver/plex/latest/index.html) + +[Plex](https://plex.tv) organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices. This container is packaged as a standalone Plex Media Server. Straightforward design and bulk actions mean getting things done faster. -[![plex](http://the-gadgeteer.com/wp-content/uploads/2015/10/plex-logo-e1446990678679.png)](https://plex.tv) +[![plex](https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/plex-logo.png)](https://plex.tv) ## Supported Architectures -Our images support multiple architectures such as `x86-64`, `arm64` and `armhf`. We utilise the docker manifest for multi-platform awareness. More information is available from docker [here](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md#manifest-list) and our announcement [here](https://blog.linuxserver.io/2019/02/21/the-lsio-pipeline-project/). +We utilise the docker manifest for multi-platform awareness. More information is available from docker [here](https://distribution.github.io/distribution/spec/manifest-v2-2/#manifest-list) and our announcement [here](https://blog.linuxserver.io/2019/02/21/the-lsio-pipeline-project/). -Simply pulling `linuxserver/plex` should retrieve the correct image for your arch, but you can also pull specific arch images via tags. +Simply pulling `lscr.io/linuxserver/plex:latest` should retrieve the correct image for your arch, but you can also pull specific arch images via tags. The architectures supported by this image are: -| Architecture | Tag | -| :----: | --- | -| x86-64 | amd64-latest | -| arm64 | arm64v8-latest | -| armhf | arm32v7-latest | +| Architecture | Available | Tag | +| :----: | :----: | ---- | +| x86-64 | ✅ | amd64-\<version tag\> | +| arm64 | ✅ | arm64v8-\<version tag\> | +## Application Setup -## Usage +Webui can be found at `<your-ip>:32400/web` -Here are some example snippets to help you get started creating a container. +>[!NOTE] +>If there is no value set for the VERSION variable, then no updates will take place. -### docker +>[!NOTE] +>For new users, no updates will take place on the first run of the container as there is no preferences file to read your token from, to update restart the Docker container after logging in through the webui. -``` -docker create \ - --name=plex \ - --net=host \ - -e PUID=1000 \ - -e PGID=1000 \ - -e VERSION=docker \ - -e UMASK_SET=022 `#optional` \ - -v </path/to/library>:/config \ - -v <path/to/tvseries>:/tv \ - -v </path/to/movies>:/movies \ - -v </path for transcoding>:/transcode \ - --restart unless-stopped \ - linuxserver/plex -``` +Valid settings for VERSION are:- + +>[!NOTE] +>YOU CANNOT UPDATE TO A PLEXPASS ONLY (BETA) VERSION IF YOU ARE NOT LOGGED IN WITH A PLEXPASS ACCOUNT. + ++ **`docker`**: Let Docker handle the Plex Version, we keep our Dockerhub Endpoint up to date with the latest public builds. This is the same as leaving this setting out of your create command. ++ **`latest`**: will update plex to the latest version available that you are entitled to. ++ **`public`**: will update plexpass users to the latest public version, useful for plexpass users that don't want to be on the bleeding edge but still want the latest public updates. ++ **`<specific-version>`**: will select a specific version (eg 0.9.12.4.1192-9a47d21) of plex to install, note you cannot use this to access plexpass versions if you do not have plexpass. + +### Hardware Acceleration +Many desktop applications need access to a GPU to function properly and even some Desktop Environments have compositor effects that will not function without a GPU. However this is not a hard requirement and all base images will function without a video device mounted into the container. -### docker-compose +#### Intel/ATI/AMD -Compatible with docker-compose v2 schemas. +To leverage hardware acceleration you will need to mount /dev/dri video device inside of the container. +```text +--device=/dev/dri:/dev/dri ``` + +We will automatically ensure the abc user inside of the container has the proper permissions to access this device. + +#### Nvidia + +Hardware acceleration users for Nvidia will need to install the container runtime provided by Nvidia on their host, instructions can be found here: +https://github.com/NVIDIA/nvidia-container-toolkit + +We automatically add the necessary environment variable that will utilise all the features available on a GPU on the host. Once nvidia-container-toolkit is installed on your host you will need to re/create the docker container with the nvidia container runtime `--runtime=nvidia` and add an environment variable `-e NVIDIA_VISIBLE_DEVICES=all` (can also be set to a specific gpu's UUID, this can be discovered by running `nvidia-smi --query-gpu=gpu_name,gpu_uuid --format=csv` ). NVIDIA automatically mounts the GPU and drivers from your host into the container. + +#### Arm Devices + +Best effort is made to install tools to allow mounting in /dev/dri on Arm devices. In most cases if /dev/dri exists on the host it should just work. If running a Raspberry Pi 4 be sure to enable `dtoverlay=vc4-fkms-v3d` in your usercfg.txt. + +## Read-Only Operation + +This image can be run with a read-only container filesystem. For details please [read the docs](https://docs.linuxserver.io/misc/read-only/). + +### Caveats + +* Runtime update of Plex (and thus Plexpass builds) is not supported. +* Transcode directory must be mounted to a host path or tmpfs. + +## Non-Root Operation + +This image can be run with a non-root user. For details please [read the docs](https://docs.linuxserver.io/misc/non-root/). + +### Caveats + +* Runtime update of Plex (and thus Plexpass builds) is not supported. +* Transcode directory must be mounted to a host path or tmpfs. + +## Usage + +To help you get started creating a container from this image you can either use docker-compose or the docker cli. + +>[!NOTE] +>Unless a parameter is flaged as 'optional', it is *mandatory* and a value must be provided. + +### docker-compose (recommended, [click here for more info](https://docs.linuxserver.io/general/docker-compose)) + +```yaml --- -version: "2" services: plex: - image: linuxserver/plex + image: lscr.io/linuxserver/plex:latest container_name: plex network_mode: host environment: - PUID=1000 - PGID=1000 + - TZ=Etc/UTC - VERSION=docker - - UMASK_SET=022 #optional + - PLEX_CLAIM= #optional volumes: - - </path/to/library>:/config - - <path/to/tvseries>:/tv - - </path/to/movies>:/movies - - </path for transcoding>:/transcode + - /path/to/plex/library:/config + - /path/to/tvseries:/tv + - /path/to/movies:/movies restart: unless-stopped ``` +### docker cli ([click here for more info](https://docs.docker.com/engine/reference/commandline/cli/)) + +```bash +docker run -d \ + --name=plex \ + --net=host \ + -e PUID=1000 \ + -e PGID=1000 \ + -e TZ=Etc/UTC \ + -e VERSION=docker \ + -e PLEX_CLAIM= `#optional` \ + -v /path/to/plex/library:/config \ + -v /path/to/tvseries:/tv \ + -v /path/to/movies:/movies \ + --restart unless-stopped \ + lscr.io/linuxserver/plex:latest +``` + ## Parameters -Container images are configured using parameters passed at runtime (such as those above). These parameters are separated by a colon and indicate `<external>:<internal>` respectively. For example, `-p 8080:80` would expose port `80` from inside the container to be accessible from the host's IP on port `8080` outside the container. +Containers are configured using parameters passed at runtime (such as those above). These parameters are separated by a colon and indicate `<external>:<internal>` respectively. For example, `-p 8080:80` would expose port `80` from inside the container to be accessible from the host's IP on port `8080` outside the container. | Parameter | Function | | :----: | --- | | `--net=host` | Use Host Networking | | `-e PUID=1000` | for UserID - see below for explanation | | `-e PGID=1000` | for GroupID - see below for explanation | +| `-e TZ=Etc/UTC` | specify a timezone to use, see this [list](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List). | | `-e VERSION=docker` | Set whether to update plex or not - see Application Setup section. | -| `-e UMASK_SET=022` | control permissions of files and directories created by Plex | +| `-e PLEX_CLAIM=` | Optionally you can obtain a claim token from https://plex.tv/claim and input here. Keep in mind that the claim tokens expire within 4 minutes. | | `-v /config` | Plex library location. *This can grow very large, 50gb+ is likely for a large collection.* | | `-v /tv` | Media goes here. Add as many as needed e.g. `/movies`, `/tv`, etc. | | `-v /movies` | Media goes here. Add as many as needed e.g. `/movies`, `/tv`, etc. | -| `-v /transcode` | Path for transcoding folder, *optional*. | +| `--read-only=true` | Run container with a read-only filesystem. Please [read the docs](https://docs.linuxserver.io/misc/read-only/). | +| `--user=1000:1000` | Run container with a non-root user. Please [read the docs](https://docs.linuxserver.io/misc/non-root/). | + +## Environment variables from files (Docker secrets) + +You can set any environment variable from a file by using a special prepend `FILE__`. + +As an example: + +```bash +-e FILE__MYVAR=/run/secrets/mysecretvariable +``` + +Will set the environment variable `MYVAR` based on the contents of the `/run/secrets/mysecretvariable` file. + +## Umask for running applications + +For all of our images we provide the ability to override the default umask settings for services started within the containers using the optional `-e UMASK=022` setting. +Keep in mind umask is not chmod it subtracts from permissions based on it's value it does not add. Please read up [here](https://en.wikipedia.org/wiki/Umask) before asking for support. ## Optional Parameters -*Special note* - If you'd like to run Plex without requiring `--net=host` (`NOT recommended`) then you will need the following ports in your `docker create` command: +If you want to run the container in bridge network mode (instead of the recommended host network mode) you will need to specify ports. +The [official documentation for ports](https://support.plex.tv/articles/201543147-what-network-ports-do-i-need-to-allow-through-my-firewall/) lists 32400 as the only required port. +The rest of the ports are optionally used for specific purposes listed in the documentation. +If you have not already claimed your server (first time setup) you need to set `PLEX_CLAIM` to claim a server set up with bridge networking. ``` -p 32400:32400 \ - -p 32400:32400/udp \ - -p 32469:32469 \ - -p 32469:32469/udp \ + -p 1900:1900/udp \ -p 5353:5353/udp \ - -p 1900:1900/udp + -p 8324:8324 \ + -p 32410:32410/udp \ + -p 32412:32412/udp \ + -p 32413:32413/udp \ + -p 32414:32414/udp \ + -p 32469:32469 + ``` The application accepts a series of environment variables to further customize itself on boot: | Parameter | Function | | :---: | --- | -| `-v /transcode` | Path for transcoding folder| | `--device=/dev/dri:/dev/dri` | Add this option to your run command if you plan on using Quicksync hardware acceleration - see Application Setup section.| - +| `--device=/dev/dvb:/dev/dvb` | Add this option to your run command if you plan on using dvb devices.| ## User / Group Identifiers -When using volumes (`-v` flags) permissions issues can arise between the host OS and the container, we avoid this issue by allowing you to specify the user `PUID` and group `PGID`. +When using volumes (`-v` flags), permissions issues can arise between the host OS and the container, we avoid this issue by allowing you to specify the user `PUID` and group `PGID`. Ensure any volume directories on the host are owned by the same user you specify and any permissions issues will vanish like magic. -In this instance `PUID=1000` and `PGID=1000`, to find yours use `id user` as below: +In this instance `PUID=1000` and `PGID=1000`, to find yours use `id your_user` as below: +```bash +id your_user ``` - $ id username - uid=1000(dockeruser) gid=1000(dockergroup) groups=1000(dockergroup) + +Example output: + +```text +uid=1000(your_user) gid=1000(your_user) groups=1000(your_user) ``` +## Docker Mods -  -## Application Setup +[![Docker Mods](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=plex&query=%24.mods%5B%27plex%27%5D.mod_count&url=https%3A%2F%2Fraw.githubusercontent.com%2Flinuxserver%2Fdocker-mods%2Fmaster%2Fmod-list.yml)](https://mods.linuxserver.io/?mod=plex "view available mods for this container.") [![Docker Universal Mods](https://img.shields.io/badge/dynamic/yaml?color=94398d&labelColor=555555&logoColor=ffffff&style=for-the-badge&label=universal&query=%24.mods%5B%27universal%27%5D.mod_count&url=https%3A%2F%2Fraw.githubusercontent.com%2Flinuxserver%2Fdocker-mods%2Fmaster%2Fmod-list.yml)](https://mods.linuxserver.io/?mod=universal "view available universal mods.") -Webui can be found at `<your-ip>:32400/web` +We publish various [Docker Mods](https://github.com/linuxserver/docker-mods) to enable additional functionality within the containers. The list of Mods available for this image (if any) as well as universal mods that can be applied to any one of our images can be accessed via the dynamic badges above. -** Note about updates, if there is no value set for the VERSION variable, then no updates will take place.** +## Support Info -** For new users, no updates will take place on the first run of the container as there is no preferences file to read your token from, to update restart the Docker container after logging in through the webui** +* Shell access whilst the container is running: -Valid settings for VERSION are:- + ```bash + docker exec -it plex /bin/bash + ``` -`IMPORTANT NOTE:- YOU CANNOT UPDATE TO A PLEXPASS ONLY (BETA) VERSION IF YOU ARE NOT LOGGED IN WITH A PLEXPASS ACCOUNT` +* To monitor the logs of the container in realtime: -+ **`docker`**: Let Docker handle the Plex Version, we keep our Dockerhub Endpoint up to date with the latest public builds. This is the same as leaving this setting out of your create command. -+ **`latest`**: will update plex to the latest version available that you are entitled to. -+ **`public`**: will update plexpass users to the latest public version, useful for plexpass users that don't want to be on the bleeding edge but still want the latest public updates. -+ **`<specific-version>`**: will select a specific version (eg 0.9.12.4.1192-9a47d21) of plex to install, note you cannot use this to access plexpass versions if you do not have plexpass. + ```bash + docker logs -f plex + ``` -Hardware acceleration users for Intel Quicksync will need to mount their /dev/dri video device inside of the container by passing the following command when running or creating the container: +* Container version number: -```--device=/dev/dri:/dev/dri``` + ```bash + docker inspect -f '{{ index .Config.Labels "build_version" }}' plex + ``` -We will automatically ensure the abc user inside of the container has the proper permissions to access this device. +* Image version number: -Hardware acceleration users for Nvidia will need to install the container runtime provided by Nvidia on their host, instructions can be found here: + ```bash + docker inspect -f '{{ index .Config.Labels "build_version" }}' lscr.io/linuxserver/plex:latest + ``` -https://github.com/NVIDIA/nvidia-docker +## Updating Info -We automatically add the necessary environment variable that will utilise all the features available on a GPU on the host. Once nvidia-docker is installed on your host you will need to re/create the docker container with the nvidia container runtime `--runtime=nvidia` and add an environment variable `-e NVIDIA_VISIBLE_DEVICES=all` (can also be set to a specific gpu's UUID, this can be discovered by running `nvidia-smi --query-gpu=gpu_name,gpu_uuid --format=csv` ). NVIDIA automatically mounts the GPU and drivers from your host into the plex docker. +Most of our images are static, versioned, and require an image update and container recreation to update the app inside. With some exceptions (noted in the relevant readme.md), we do not recommend or support updating apps inside the container. Please consult the [Application Setup](#application-setup) section above to see if it is recommended for the image. +Below are the instructions for updating containers: +### Via Docker Compose -## Support Info +* Update images: + * All images: -* Shell access whilst the container is running: `docker exec -it plex /bin/bash` -* To monitor the logs of the container in realtime: `docker logs -f plex` -* container version number - * `docker inspect -f '{{ index .Config.Labels "build_version" }}' plex` -* image version number - * `docker inspect -f '{{ index .Config.Labels "build_version" }}' linuxserver/plex` + ```bash + docker-compose pull + ``` -## Updating Info + * Single image: -Most of our images are static, versioned, and require an image update and container recreation to update the app inside. With some exceptions (ie. nextcloud, plex), we do not recommend or support updating apps inside the container. Please consult the [Application Setup](#application-setup) section above to see if it is recommended for the image. + ```bash + docker-compose pull plex + ``` -Below are the instructions for updating containers: +* Update containers: + * All containers: -### Via Docker Run/Create -* Update the image: `docker pull linuxserver/plex` -* Stop the running container: `docker stop plex` -* Delete the container: `docker rm plex` -* Recreate a new container with the same docker create parameters as instructed above (if mapped correctly to a host folder, your `/config` folder and settings will be preserved) -* Start the new container: `docker start plex` -* You can also remove the old dangling images: `docker image prune` + ```bash + docker-compose up -d + ``` -### Via Docker Compose -* Update all images: `docker-compose pull` - * or update a single image: `docker-compose pull plex` -* Let compose update all containers as necessary: `docker-compose up -d` - * or update a single container: `docker-compose up -d plex` -* You can also remove the old dangling images: `docker image prune` + * Single container: + + ```bash + docker-compose up -d plex + ``` + +* You can also remove the old dangling images: + + ```bash + docker image prune + ``` -### Via Watchtower auto-updater (especially useful if you don't remember the original parameters) -* Pull the latest image at its tag and replace it with the same env variables in one run: - ``` - docker run --rm \ - -v /var/run/docker.sock:/var/run/docker.sock \ - containrrr/watchtower \ - --run-once plex - ``` +### Via Docker Run -**Note:** We do not endorse the use of Watchtower as a solution to automated updates of existing Docker containers. In fact we generally discourage automated updates. However, this is a useful tool for one-time manual updates of containers where you have forgotten the original parameters. In the long term, we highly recommend using Docker Compose. +* Update the image: -* You can also remove the old dangling images: `docker image prune` + ```bash + docker pull lscr.io/linuxserver/plex:latest + ``` + +* Stop the running container: + + ```bash + docker stop plex + ``` + +* Delete the container: + + ```bash + docker rm plex + ``` + +* Recreate a new container with the same docker run parameters as instructed above (if mapped correctly to a host folder, your `/config` folder and settings will be preserved) +* You can also remove the old dangling images: + + ```bash + docker image prune + ``` + +### Image Update Notifications - Diun (Docker Image Update Notifier) + +>[!TIP] +>We recommend [Diun](https://crazymax.dev/diun/) for update notifications. Other tools that automatically update containers unattended are not recommended or supported. ## Building locally If you want to make local modifications to these images for development purposes or just to customize the logic: -``` + +```bash git clone https://github.com/linuxserver/docker-plex.git cd docker-plex docker build \ --no-cache \ --pull \ - -t linuxserver/plex:latest . + -t lscr.io/linuxserver/plex:latest . ``` -The ARM variants can be built on x86_64 hardware using `multiarch/qemu-user-static` -``` -docker run --rm --privileged multiarch/qemu-user-static:register --reset +The ARM variants can be built on x86_64 hardware and vice versa using `lscr.io/linuxserver/qemu-static` + +```bash +docker run --rm --privileged lscr.io/linuxserver/qemu-static --reset ``` Once registered you can define the dockerfile to use with `-f Dockerfile.aarch64`. ## Versions +* **04.11.24:** - Add Nvidia capability needed for h265 +* **18.07.24:** - Rebase to Ubuntu Noble. +* **12.02.24:** - Use universal hardware acceleration blurb +* **09.01.24:** - Set ownership on TranscoderTempDirectory when it's been saved in Preferences. +* **16.08.23:** - Install unrar from [linuxserver repo](https://github.com/linuxserver/docker-unrar). +* **03.07.23:** - Deprecate armhf. As announced [here](https://www.linuxserver.io/blog/a-farewell-to-arm-hf) +* **16.10.22:** - Rebase to jammy. Update to s6v3. Remove opencl packages (bundled with plex). +* **18.07.22:** - Pin all opencl related driver packages. +* **16.05.22:** - Pin opencl version. +* **04.03.22:** - Increase verbosity of video device permissions fix, attempt to fix missing group rw. +* **25.12.21:** - Install Intel drivers from the official repo. +* **20.01.21:** - Deprecate `UMASK_SET` in favor of UMASK in baseimage, see above for more information. +* **10.12.20:** - Add latest Intel Compute packages from github repo for opencl support on latest gen igpu. +* **23.11.20:** - Add Bionic branch make Focal default. +* **03.05.20:** - Update exposed ports and example docs for bridge mode. +* **23.03.20:** - Remove udev hack (no longer needed), suppress uuid error in log during first start. +* **04.12.19:** - Add variable for setting PLEX_CLAIM. Remove `/transcode` volume mapping as it is now set via plex gui and defaults to a location under `/config`. * **06.08.19:** - Add variable for setting UMASK. * **10.07.19:** - Fix permissions for tuner (/dev/dvb) devices. * **20.05.19:** - Bugfix do not allow Root group for Intel QuickSync ownership rules. diff --git a/jenkins-vars.yml b/jenkins-vars.yml index 8ec86481..81f23f85 100644 --- a/jenkins-vars.yml +++ b/jenkins-vars.yml @@ -23,6 +23,6 @@ repo_vars: - CI_PORT='32400' - CI_SSL='false' - CI_DELAY='120' - - CI_DOCKERENV='TZ=US/Pacific' - - CI_AUTH='user:password' + - CI_DOCKERENV='' + - CI_AUTH='' - CI_WEBPATH='/web/index.html' diff --git a/package_versions.txt b/package_versions.txt index 9e12e961..362af80b 100755 --- a/package_versions.txt +++ b/package_versions.txt @@ -1,131 +1,148 @@ -adduser3.116ubuntu1 -apt1.6.12 -apt-utils1.6.12 -base-files10.1ubuntu2.6 -base-passwd3.5.44 -bash4.4.18-2ubuntu1.2 -bsdutils1:2.31.1-0.4ubuntu3.4 -bzip21.0.6-8.1ubuntu0.2 -ca-certificates20180409 -coreutils8.28-1ubuntu1 -curl7.58.0-2ubuntu3.8 -dash0.5.8-2.10 -debconf1.5.66ubuntu1 -debianutils4.8.4 -diffutils1:3.6-1 -dpkg1.19.0.5ubuntu2.3 -e2fsprogs1.44.1-1ubuntu1.2 -fdisk2.31.1-0.4ubuntu3.4 -findutils4.6.0+git+20170828-2 -gcc-8-base8.3.0-6ubuntu1~18.04.1 -gpgv2.2.4-1ubuntu1.2 -grep3.1-2build1 -gzip1.6-5ubuntu1 -hostname3.20 -init-system-helpers1.51 -jq1.5+dfsg-2 -krb5-locales1.16-2ubuntu0.1 -libacl12.2.52-3build1 -libapt-inst2.01.6.12 -libapt-pkg5.01.6.12 -libasn1-8-heimdal7.5.0+dfsg-1 -libattr11:2.4.47-2build1 -libaudit11:2.8.2-1ubuntu1 -libaudit-common1:2.8.2-1ubuntu1 -libblkid12.31.1-0.4ubuntu3.4 -libbz2-1.01.0.6-8.1ubuntu0.2 -libc62.27-3ubuntu1 -libcap-ng00.7.7-3.1 -libc-bin2.27-3ubuntu1 -libcom-err21.44.1-1ubuntu1.2 -libcurl47.58.0-2ubuntu3.8 -libdb5.35.3.28-13.1ubuntu1.1 -libdebconfclient00.213ubuntu1 -libext2fs21.44.1-1ubuntu1.2 -libfdisk12.31.1-0.4ubuntu3.4 -libffi63.2.1-8 -libgcc11:8.3.0-6ubuntu1~18.04.1 -libgcrypt201.8.1-4ubuntu1.1 -libgmp102:6.1.2+dfsg-2 -libgnutls303.5.18-1ubuntu1.1 -libgpg-error01.27-6 -libgssapi3-heimdal7.5.0+dfsg-1 -libgssapi-krb5-21.16-2ubuntu0.1 -libhcrypto4-heimdal7.5.0+dfsg-1 -libheimbase1-heimdal7.5.0+dfsg-1 -libheimntlm0-heimdal7.5.0+dfsg-1 -libhogweed43.4-1 -libhx509-5-heimdal7.5.0+dfsg-1 -libidn2-02.0.4-1.1build2 -libjq11.5+dfsg-2 -libk5crypto31.16-2ubuntu0.1 -libkeyutils11.5.9-9.2ubuntu2 -libkmod224-1ubuntu3.2 -libkrb5-26-heimdal7.5.0+dfsg-1 -libkrb5-31.16-2ubuntu0.1 -libkrb5support01.16-2ubuntu0.1 -libldap-2.4-22.4.45+dfsg-1ubuntu1.4 -libldap-common2.4.45+dfsg-1ubuntu1.4 -liblz4-10.0~r131-2ubuntu3 -liblzma55.2.2-1.3 -libmount12.31.1-0.4ubuntu3.4 -libncurses56.1-1ubuntu1.18.04 -libncursesw56.1-1ubuntu1.18.04 -libnettle63.4-1 -libnghttp2-141.30.0-1ubuntu1 -libonig46.7.0-1 -libp11-kit00.23.9-2 -libpam0g1.1.8-3.6ubuntu2.18.04.1 -libpam-modules1.1.8-3.6ubuntu2.18.04.1 -libpam-modules-bin1.1.8-3.6ubuntu2.18.04.1 -libpam-runtime1.1.8-3.6ubuntu2.18.04.1 -libpcre32:8.39-9 -libprocps62:3.3.12-3ubuntu1.2 -libpsl50.19.1-5build1 -libroken18-heimdal7.5.0+dfsg-1 -librtmp12.4+20151223.gitfa8646d.1-1 -libsasl2-22.1.27~101-g0780600+dfsg-3ubuntu2 -libsasl2-modules2.1.27~101-g0780600+dfsg-3ubuntu2 -libsasl2-modules-db2.1.27~101-g0780600+dfsg-3ubuntu2 -libseccomp22.4.1-0ubuntu0.18.04.2 -libselinux12.7-2build2 -libsemanage12.7-2build2 -libsemanage-common2.7-2build2 -libsepol12.7-1 -libsmartcols12.31.1-0.4ubuntu3.4 -libsqlite3-03.22.0-1ubuntu0.1 -libss21.44.1-1ubuntu1.2 -libssl1.11.1.1-1ubuntu2.1~18.04.4 -libstdc++68.3.0-6ubuntu1~18.04.1 -libsystemd0237-3ubuntu10.29 -libtasn1-64.13-2 -libtinfo56.1-1ubuntu1.18.04 -libudev1237-3ubuntu10.29 -libunistring20.9.9-0ubuntu2 -libuuid12.31.1-0.4ubuntu3.4 -libwind0-heimdal7.5.0+dfsg-1 -libzstd11.3.3+dfsg-2ubuntu1.1 -locales2.27-3ubuntu1 -login1:4.5-1ubuntu2 -lsb-base9.20170808ubuntu1 -mawk1.3.3-17ubuntu3 -mount2.31.1-0.4ubuntu3.4 -ncurses-base6.1-1ubuntu1.18.04 -ncurses-bin6.1-1ubuntu1.18.04 -openssl1.1.1-1ubuntu2.1~18.04.4 -passwd1:4.5-1ubuntu2 -perl-base5.26.1-6ubuntu0.3 -plexmediaserver1.17.0.1841-d42cfa161 -procps2:3.3.12-3ubuntu1.2 -publicsuffix20180223.1310-1 -sed4.4-2 -sensible-utils0.0.12 -sysvinit-utils2.88dsf-59.10ubuntu1 -tar1.29b-2ubuntu0.1 -tzdata2019b-0ubuntu0.18.04 -ubuntu-keyring2018.09.18.1~18.04.0 -udev237-3ubuntu10.29 -unrar1:5.5.8-1 -util-linux2.31.1-0.4ubuntu3.4 -wget1.19.4-1ubuntu2.2 -zlib1g1:1.2.11.dfsg-0ubuntu2 +NAME VERSION TYPE +adduser 3.137ubuntu1 deb +apt 2.8.3 deb +apt-utils 2.8.3 deb +base-files 13ubuntu10.3 deb +base-passwd 3.6.3build1 deb +bash 5.2.21-2ubuntu4 deb +bsdutils 1:2.39.3-9ubuntu6.4 deb +ca-certificates 20240203 deb +catatonit 0.1.7-1 deb +coreutils 9.4-3ubuntu6.1 deb +cron 3.0pl1-184ubuntu2 deb +cron-daemon-common 3.0pl1-184ubuntu2 deb +curl 8.5.0-2ubuntu10.6 deb +dash 0.5.12-6ubuntu5 deb +debconf 1.5.86ubuntu1 deb +debianutils 5.17build1 deb +diffutils 1:3.10-1build1 deb +dirmngr 2.4.4-2ubuntu17.4 deb +dpkg 1.22.6ubuntu6.5 deb +e2fsprogs 1.47.0-2.4~exp1ubuntu4.1 deb +findutils 4.9.0-5build1 deb +gcc-14-base 14.2.0-4ubuntu2~24.04 deb +gnupg 2.4.4-2ubuntu17.4 deb +gnupg-l10n 2.4.4-2ubuntu17.4 deb +gnupg-utils 2.4.4-2ubuntu17.4 deb +gpg 2.4.4-2ubuntu17.4 deb +gpg-agent 2.4.4-2ubuntu17.4 deb +gpg-wks-client 2.4.4-2ubuntu17.4 deb +gpgconf 2.4.4-2ubuntu17.4 deb +gpgsm 2.4.4-2ubuntu17.4 deb +gpgv 2.4.4-2ubuntu17.4 deb +grep 3.11-4build1 deb +gzip 1.12-1ubuntu3.1 deb +hostname 3.23+nmu2ubuntu2 deb +init-system-helpers 1.66ubuntu1 deb +jq 1.7.1-3ubuntu0.24.04.1 deb +keyboxd 2.4.4-2ubuntu17.4 deb +krb5-locales 1.20.1-6ubuntu2.6 deb +libacl1 2.3.2-1build1.1 deb +libapt-pkg6.0t64 2.8.3 deb +libassuan0 2.5.6-1build1 deb +libattr1 1:2.5.2-1build1.1 deb +libaudit-common 1:3.1.2-2.1build1.1 deb +libaudit1 1:3.1.2-2.1build1.1 deb +libblkid1 2.39.3-9ubuntu6.4 deb +libbrotli1 1.1.0-2build2 deb +libbsd0 0.12.1-1build1.1 deb +libbz2-1.0 1.0.8-5.1build0.1 deb +libc-bin 2.39-0ubuntu8.6 deb +libc6 2.39-0ubuntu8.6 deb +libcap-ng0 0.8.4-2build2 deb +libcap2 1:2.66-5ubuntu2.2 deb +libcom-err2 1.47.0-2.4~exp1ubuntu4.1 deb +libcrypt1 1:4.4.36-4build1 deb +libcurl4t64 8.5.0-2ubuntu10.6 deb +libdb5.3t64 5.3.28+dfsg2-7 deb +libdebconfclient0 0.271ubuntu3 deb +libext2fs2t64 1.47.0-2.4~exp1ubuntu4.1 deb +libffi8 3.4.6-1build1 deb +libgcc-s1 14.2.0-4ubuntu2~24.04 deb +libgcrypt20 1.10.3-2build1 deb +libgmp10 2:6.3.0+dfsg-2ubuntu6.1 deb +libgnutls30t64 3.8.3-1.1ubuntu3.4 deb +libgpg-error0 1.47-3build2.1 deb +libgssapi-krb5-2 1.20.1-6ubuntu2.6 deb +libhogweed6t64 3.9.1-2.2build1.1 deb +libidn2-0 2.3.7-2build1.1 deb +libjq1 1.7.1-3ubuntu0.24.04.1 deb +libk5crypto3 1.20.1-6ubuntu2.6 deb +libkeyutils1 1.6.3-3build1 deb +libkmod2 31+20240202-2ubuntu7.1 deb +libkrb5-3 1.20.1-6ubuntu2.6 deb +libkrb5support0 1.20.1-6ubuntu2.6 deb +libksba8 1.6.6-1build1 deb +libldap-common 2.6.7+dfsg-1~exp1ubuntu8.2 deb +libldap2 2.6.7+dfsg-1~exp1ubuntu8.2 deb +liblz4-1 1.9.4-1build1.1 deb +liblzma5 5.6.1+really5.4.5-1ubuntu0.2 deb +libmd0 1.1.0-2build1.1 deb +libmount1 2.39.3-9ubuntu6.4 deb +libncursesw6 6.4+20240113-1ubuntu2 deb +libnettle8t64 3.9.1-2.2build1.1 deb +libnghttp2-14 1.59.0-1ubuntu0.2 deb +libnpth0t64 1.6-3.1build1 deb +libonig5 6.9.9-1build1 deb +libp11-kit0 0.25.3-4ubuntu2.1 deb +libpam-modules 1.5.3-5ubuntu5.5 deb +libpam-modules-bin 1.5.3-5ubuntu5.5 deb +libpam-runtime 1.5.3-5ubuntu5.5 deb +libpam0g 1.5.3-5ubuntu5.5 deb +libpcre2-8-0 10.42-4ubuntu2.1 deb +libproc2-0 2:4.0.4-4ubuntu3.2 deb +libpsl5t64 0.21.2-1.1build1 deb +libreadline8t64 8.2-4build1 deb +librtmp1 2.4+20151223.gitfa8646d.1-2build7 deb +libsasl2-2 2.1.28+dfsg1-5ubuntu3.1 deb +libsasl2-modules 2.1.28+dfsg1-5ubuntu3.1 deb +libsasl2-modules-db 2.1.28+dfsg1-5ubuntu3.1 deb +libseccomp2 2.5.5-1ubuntu3.1 deb +libselinux1 3.5-2ubuntu2.1 deb +libsemanage-common 3.5-1build5 deb +libsemanage2 3.5-1build5 deb +libsepol2 3.5-2build1 deb +libsmartcols1 2.39.3-9ubuntu6.4 deb +libsqlite3-0 3.45.1-1ubuntu2.5 deb +libss2 1.47.0-2.4~exp1ubuntu4.1 deb +libssh-4 0.10.6-2ubuntu0.2 deb +libssl3t64 3.0.13-0ubuntu3.6 deb +libstdc++6 14.2.0-4ubuntu2~24.04 deb +libsystemd0 255.4-1ubuntu8.12 deb +libtasn1-6 4.19.0-3ubuntu0.24.04.2 deb +libtinfo6 6.4+20240113-1ubuntu2 deb +libudev1 255.4-1ubuntu8.12 deb +libunistring5 1.1-2build1.1 deb +libuuid1 2.39.3-9ubuntu6.4 deb +libxxhash0 0.8.2-2build1 deb +libzstd1 1.5.5+dfsg2-2build1.1 deb +locales 2.39-0ubuntu8.6 deb +login 1:4.13+dfsg1-4ubuntu3.2 deb +logsave 1.47.0-2.4~exp1ubuntu4.1 deb +mawk 1.3.4.20240123-1build1 deb +mount 2.39.3-9ubuntu6.4 deb +ncurses-base 6.4+20240113-1ubuntu2 deb +ncurses-bin 6.4+20240113-1ubuntu2 deb +netcat-openbsd 1.226-1ubuntu2 deb +openssl 3.0.13-0ubuntu3.6 deb +passwd 1:4.13+dfsg1-4ubuntu3.2 deb +perl-base 5.38.2-3.2ubuntu0.2 deb +pinentry-curses 1.2.1-3ubuntu5 deb +plexmediaserver 1.42.2.10156-f737b826c deb +procps 2:4.0.4-4ubuntu3.2 deb +publicsuffix 20231001.0357-0.1 deb +readline-common 8.2-4build1 deb +sed 4.9-2build1 deb +sensible-utils 0.0.22 deb +systemd-dev 255.4-1ubuntu8.12 deb +systemd-hwe-hwdb 255.1.6 deb +systemd-standalone-sysusers 255.4-1ubuntu8.12 deb +sysvinit-utils 3.08-6ubuntu3 deb +tar 1.35+dfsg-3build1 deb +tzdata 2025b-0ubuntu0.24.04.1 deb +ubuntu-keyring 2023.11.28.1 deb +udev 255.4-1ubuntu8.12 deb +unminimize 0.2.1 deb +util-linux 2.39.3-9ubuntu6.4 deb +wget 1.21.4-1ubuntu4.1 deb +zlib1g 1:1.3.dfsg-3.1ubuntu2.1 deb diff --git a/readme-vars.yml b/readme-vars.yml index 2391000a..2ad47e0d 100644 --- a/readme-vars.yml +++ b/readme-vars.yml @@ -3,116 +3,171 @@ # project information project_name: plex project_url: "https://plex.tv" -project_logo: "http://the-gadgeteer.com/wp-content/uploads/2015/10/plex-logo-e1446990678679.png" -project_blurb: "[{{ project_name|capitalize }}]({{ project_url }}) organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices. This container is packaged as a standalone Plex Media Server. has always been a top priority. Straightforward design and bulk actions mean getting things done faster." - +project_logo: "https://raw.githubusercontent.com/linuxserver/docker-templates/master/linuxserver.io/img/plex-logo.png" +project_blurb: "[{{ project_name|capitalize }}]({{ project_url }}) organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices. This container is packaged as a standalone Plex Media Server. Straightforward design and bulk actions mean getting things done faster." project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}" - +project_categories: "Media Servers,Music,Audiobooks" # supported architectures available_architectures: - - { arch: "{{ arch_x86_64 }}", tag: "amd64-latest"} - - { arch: "{{ arch_arm64 }}", tag: "arm64v8-latest"} - - { arch: "{{ arch_armhf }}", tag: "arm32v7-latest"} - + - {arch: "{{ arch_x86_64 }}", tag: "amd64-latest"} + - {arch: "{{ arch_arm64 }}", tag: "arm64v8-latest"} # container parameters common_param_env_vars_enabled: true #PGID, PUID, etc param_container_name: "{{ project_name }}" param_usage_include_vols: true param_volumes: - - { vol_path: "/config", vol_host_path: "</path/to/library>", desc: "Plex library location. *This can grow very large, 50gb+ is likely for a large collection.*" } - - { vol_path: "/tv", vol_host_path: "<path/to/tvseries>", desc: "Media goes here. Add as many as needed e.g. `/movies`, `/tv`, etc." } - - { vol_path: "/movies", vol_host_path: "</path/to/movies>", desc: "Media goes here. Add as many as needed e.g. `/movies`, `/tv`, etc." } - - { vol_path: "/transcode", vol_host_path: "</path for transcoding>", desc: "Path for transcoding folder, *optional*." } -param_usage_include_ports: false + - {vol_path: "/config", vol_host_path: "/path/to/{{ project_name }}/library", desc: "Plex library location. *This can grow very large, 50gb+ is likely for a large collection.*"} + - {vol_path: "/tv", vol_host_path: "/path/to/tvseries", desc: "Media goes here. Add as many as needed e.g. `/movies`, `/tv`, etc."} + - {vol_path: "/movies", vol_host_path: "/path/to/movies", desc: "Media goes here. Add as many as needed e.g. `/movies`, `/tv`, etc."} param_usage_include_net: true param_net: "host" param_net_desc: "Use Host Networking" param_usage_include_env: true param_env_vars: - - { env_var: "VERSION", env_value: "docker", desc: "Set whether to update plex or not - see Application Setup section."} + - {env_var: "VERSION", env_value: "docker", desc: "Set whether to update plex or not - see Application Setup section."} # optional env variables opt_param_usage_include_env: true opt_param_env_vars: - - { env_var: "UMASK_SET", env_value: "022", desc: "control permissions of files and directories created by Plex"} - + - {env_var: "PLEX_CLAIM", env_value: "", desc: "Optionally you can obtain a claim token from https://plex.tv/claim and input here. Keep in mind that the claim tokens expire within 4 minutes."} optional_parameters: | - *Special note* - If you'd like to run Plex without requiring `--net=host` (`NOT recommended`) then you will need the following ports in your `docker create` command: + If you want to run the container in bridge network mode (instead of the recommended host network mode) you will need to specify ports. + The [official documentation for ports](https://support.plex.tv/articles/201543147-what-network-ports-do-i-need-to-allow-through-my-firewall/) lists 32400 as the only required port. + The rest of the ports are optionally used for specific purposes listed in the documentation. + If you have not already claimed your server (first time setup) you need to set `PLEX_CLAIM` to claim a server set up with bridge networking. ``` -p 32400:32400 \ - -p 32400:32400/udp \ - -p 32469:32469 \ - -p 32469:32469/udp \ + -p 1900:1900/udp \ -p 5353:5353/udp \ - -p 1900:1900/udp + -p 8324:8324 \ + -p 32410:32410/udp \ + -p 32412:32412/udp \ + -p 32413:32413/udp \ + -p 32414:32414/udp \ + -p 32469:32469 + ``` The application accepts a series of environment variables to further customize itself on boot: | Parameter | Function | | :---: | --- | - | `-v /transcode` | Path for transcoding folder| | `--device=/dev/dri:/dev/dri` | Add this option to your run command if you plan on using Quicksync hardware acceleration - see Application Setup section.| - + | `--device=/dev/dvb:/dev/dvb` | Add this option to your run command if you plan on using dvb devices.| # application setup block app_setup_block_enabled: true app_setup_block: | Webui can be found at `<your-ip>:32400/web` - ** Note about updates, if there is no value set for the VERSION variable, then no updates will take place.** + ?+note[Updates](|If there is no value set for the VERSION variable, then no updates will take place.|) - ** For new users, no updates will take place on the first run of the container as there is no preferences file to read your token from, to update restart the Docker container after logging in through the webui** + ?+note[New users](|For new users, no updates will take place on the first run of the container as there is no preferences file to read your token from, to update restart the Docker container after logging in through the webui.|) Valid settings for VERSION are:- - `IMPORTANT NOTE:- YOU CANNOT UPDATE TO A PLEXPASS ONLY (BETA) VERSION IF YOU ARE NOT LOGGED IN WITH A PLEXPASS ACCOUNT` + ?+info[Plexpass](|YOU CANNOT UPDATE TO A PLEXPASS ONLY (BETA) VERSION IF YOU ARE NOT LOGGED IN WITH A PLEXPASS ACCOUNT.|) + **`docker`**: Let Docker handle the Plex Version, we keep our Dockerhub Endpoint up to date with the latest public builds. This is the same as leaving this setting out of your create command. + **`latest`**: will update plex to the latest version available that you are entitled to. + **`public`**: will update plexpass users to the latest public version, useful for plexpass users that don't want to be on the bleeding edge but still want the latest public updates. + **`<specific-version>`**: will select a specific version (eg 0.9.12.4.1192-9a47d21) of plex to install, note you cannot use this to access plexpass versions if you do not have plexpass. - - Hardware acceleration users for Intel Quicksync will need to mount their /dev/dri video device inside of the container by passing the following command when running or creating the container: - - ```--device=/dev/dri:/dev/dri``` - - We will automatically ensure the abc user inside of the container has the proper permissions to access this device. - - Hardware acceleration users for Nvidia will need to install the container runtime provided by Nvidia on their host, instructions can be found here: - - https://github.com/NVIDIA/nvidia-docker - - We automatically add the necessary environment variable that will utilise all the features available on a GPU on the host. Once nvidia-docker is installed on your host you will need to re/create the docker container with the nvidia container runtime `--runtime=nvidia` and add an environment variable `-e NVIDIA_VISIBLE_DEVICES=all` (can also be set to a specific gpu's UUID, this can be discovered by running `nvidia-smi --query-gpu=gpu_name,gpu_uuid --format=csv` ). NVIDIA automatically mounts the GPU and drivers from your host into the plex docker. - +readme_hwaccel: true +readonly_supported: true +readonly_message: | + * Runtime update of Plex (and thus Plexpass builds) is not supported. + * Transcode directory must be mounted to a host path or tmpfs. +nonroot_supported: true +nonroot_message: | + * Runtime update of Plex (and thus Plexpass builds) is not supported. + * Transcode directory must be mounted to a host path or tmpfs. +# init diagram +init_diagram: | + "plex:latest": { + docker-mods + base { + fix-attr +\nlegacy cont-init + } + docker-mods -> base + legacy-services + custom services + init-services -> legacy-services + init-services -> custom services + custom services -> legacy-services + legacy-services -> ci-service-check + init-migrations -> init-adduser + init-os-end -> init-config + init-config -> init-config-end + init-crontab-config -> init-config-end + init-plex-update -> init-config-end + init-config -> init-crontab-config + init-mods-end -> init-custom-files + init-adduser -> init-device-perms + base -> init-envfile + base -> init-migrations + init-config-end -> init-mods + init-mods-package-install -> init-mods-end + init-mods -> init-mods-package-install + init-adduser -> init-os-end + init-device-perms -> init-os-end + init-envfile -> init-os-end + init-config -> init-plex-chown + init-plex-chown -> init-plex-claim + init-plex-claim -> init-plex-update + init-custom-files -> init-services + init-services -> svc-cron + svc-cron -> legacy-services + init-services -> svc-plex + svc-plex -> legacy-services + } + Base Images: { + "baseimage-ubuntu:noble" + } + "plex:latest" <- Base Images # changelog changelogs: - - { date: "06.08.19:", desc: "Add variable for setting UMASK." } - - { date: "10.07.19:", desc: "Fix permissions for tuner (/dev/dvb) devices." } - - { date: "20.05.19:", desc: "Bugfix do not allow Root group for Intel QuickSync ownership rules." } - - { date: "23.03.19:", desc: "Switching to new Base images, shift to arm32v7 tag." } - - { date: "22.03.19:", desc: "Fix update logic for `VERSION=public`." } - - { date: "14.03.19:", desc: "Switch to new api endpoints, enable beta (plex pass) updates for armhf and aarch64." } - - { date: "15.02.19:", desc: "Clean up plex pid after unclean stop." } - - { date: "11.02.19:", desc: "Fix nvidia variables, add device variables." } - - { date: "16.01.19:", desc: "Add pipeline logic, multi arch, and HW transcoding configuration; remove avahi service." } - - { date: "07.09.18:", desc: "Rebase to ubuntu bionic, add udev package." } - - { date: "09.12.17:", desc: "Fix continuation lines." } - - { date: "12.07.17:", desc: "Add inspect commands to README, move to jenkins build and push." } - - { date: "28.05.17:", desc: "Add unrar package as per requests, for subzero plugin." } - - { date: "11.01.17:", desc: "Use Plex environment variables from pms docker, - change abc home folder to /app to alleviate usermod chowning library" } - - { date: "03.01.17:", desc: "Use case insensitive version variable matching rather than export and make lowercase." } - - { date: "17.10.16:", desc: "Allow use of uppercase version variable" } - - { date: "01.10.16:", desc: "Add TZ info to README." } - - { date: "09.09.16:", desc: "Add layer badges to README." } - - { date: "27.08.16:", desc: "Add badges to README." } - - { date: "22.08.16:", desc: "Rebased to xenial and s6 overlay" } - - { date: "07.04.16:", desc: "removed `/transcode` volume support (upstream Plex change) and modified PlexPass download method to prevent unauthorised usage of paid PMS" } - - { date: "24.09.15:", desc: "added optional support for volume transcoding (/transcode), and various typo fixes." } - - { date: "17.09.15:", desc: "Changed to run chmod only once" } - - { date: "19.09.15:", desc: "Plex updated their download servers from http to https" } - - { date: "28.08.15:", desc: "Removed plexpass from routine, and now uses VERSION as a combination fix." } - - { date: "18.07.15:", desc: "Moved autoupdate to be hosted by linuxserver.io and implemented bugfix thanks to ljm42." } - - { date: "09.07.15:", desc: "Now with ability to pick static version number." } - - { date: "08.07.15:", desc: "Now with autoupdates. (Hosted by fanart.tv)" } - - { date: "03.07.15:", desc: "Fixed a mistake that allowed plex to run as user plex rather than abc (99:100). Thanks to double16 for spotting this." } + - {date: "04.11.24:", desc: "Add Nvidia capability needed for h265"} + - {date: "18.07.24:", desc: "Rebase to Ubuntu Noble."} + - {date: "12.02.24:", desc: "Use universal hardware acceleration blurb"} + - {date: "09.01.24:", desc: "Set ownership on TranscoderTempDirectory when it's been saved in Preferences."} + - {date: "16.08.23:", desc: "Install unrar from [linuxserver repo](https://github.com/linuxserver/docker-unrar)."} + - {date: "03.07.23:", desc: "Deprecate armhf. As announced [here](https://www.linuxserver.io/blog/a-farewell-to-arm-hf)"} + - {date: "16.10.22:", desc: "Rebase to jammy. Update to s6v3. Remove opencl packages (bundled with plex)."} + - {date: "18.07.22:", desc: "Pin all opencl related driver packages."} + - {date: "16.05.22:", desc: "Pin opencl version."} + - {date: "04.03.22:", desc: "Increase verbosity of video device permissions fix, attempt to fix missing group rw."} + - {date: "25.12.21:", desc: "Install Intel drivers from the official repo."} + - {date: "20.01.21:", desc: "Deprecate `UMASK_SET` in favor of UMASK in baseimage, see above for more information."} + - {date: "10.12.20:", desc: "Add latest Intel Compute packages from github repo for opencl support on latest gen igpu."} + - {date: "23.11.20:", desc: "Add Bionic branch make Focal default."} + - {date: "03.05.20:", desc: "Update exposed ports and example docs for bridge mode."} + - {date: "23.03.20:", desc: "Remove udev hack (no longer needed), suppress uuid error in log during first start."} + - {date: "04.12.19:", desc: "Add variable for setting PLEX_CLAIM. Remove `/transcode` volume mapping as it is now set via plex gui and defaults to a location under `/config`."} + - {date: "06.08.19:", desc: "Add variable for setting UMASK."} + - {date: "10.07.19:", desc: "Fix permissions for tuner (/dev/dvb) devices."} + - {date: "20.05.19:", desc: "Bugfix do not allow Root group for Intel QuickSync ownership rules."} + - {date: "23.03.19:", desc: "Switching to new Base images, shift to arm32v7 tag."} + - {date: "22.03.19:", desc: "Fix update logic for `VERSION=public`."} + - {date: "14.03.19:", desc: "Switch to new api endpoints, enable beta (plex pass) updates for armhf and aarch64."} + - {date: "15.02.19:", desc: "Clean up plex pid after unclean stop."} + - {date: "11.02.19:", desc: "Fix nvidia variables, add device variables."} + - {date: "16.01.19:", desc: "Add pipeline logic, multi arch, and HW transcoding configuration; remove avahi service."} + - {date: "07.09.18:", desc: "Rebase to ubuntu bionic, add udev package."} + - {date: "09.12.17:", desc: "Fix continuation lines."} + - {date: "12.07.17:", desc: "Add inspect commands to README, move to jenkins build and push."} + - {date: "28.05.17:", desc: "Add unrar package as per requests, for subzero plugin."} + - {date: "11.01.17:", desc: "Use Plex environment variables from pms docker, change abc home folder to /app to alleviate usermod chowning library"} + - {date: "03.01.17:", desc: "Use case insensitive version variable matching rather than export and make lowercase."} + - {date: "17.10.16:", desc: "Allow use of uppercase version variable"} + - {date: "01.10.16:", desc: "Add TZ info to README."} + - {date: "09.09.16:", desc: "Add layer badges to README."} + - {date: "27.08.16:", desc: "Add badges to README."} + - {date: "22.08.16:", desc: "Rebased to xenial and s6 overlay"} + - {date: "07.04.16:", desc: "removed `/transcode` volume support (upstream Plex change) and modified PlexPass download method to prevent unauthorised usage of paid PMS"} + - {date: "24.09.15:", desc: "added optional support for volume transcoding (/transcode), and various typo fixes."} + - {date: "17.09.15:", desc: "Changed to run chmod only once"} + - {date: "19.09.15:", desc: "Plex updated their download servers from http to https"} + - {date: "28.08.15:", desc: "Removed plexpass from routine, and now uses VERSION as a combination fix."} + - {date: "18.07.15:", desc: "Moved autoupdate to be hosted by linuxserver.io and implemented bugfix thanks to ljm42."} + - {date: "09.07.15:", desc: "Now with ability to pick static version number."} + - {date: "08.07.15:", desc: "Now with autoupdates. (Hosted by fanart.tv)"} + - {date: "03.07.15:", desc: "Fixed a mistake that allowed plex to run as user plex rather than abc (99:100). Thanks to double16 for spotting this."} diff --git a/root/defaults/plexmediaserver b/root/defaults/plexmediaserver deleted file mode 100644 index 071c896a..00000000 --- a/root/defaults/plexmediaserver +++ /dev/null @@ -1,16 +0,0 @@ -# default script for Plex Media Server - -# the number of plugins that can run at the same time -PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS=6 - -# ulimit -s $PLEX_MEDIA_SERVER_MAX_STACK_SIZE -PLEX_MEDIA_SERVER_MAX_STACK_SIZE=3000 - -# uncomment to set it to something else -PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR="/config/Library/Application Support" - -# the user that PMS should run as, defaults to 'plex' -# note that if you change this you might need to move -# the Application Support directory to not lose your -# media library -PLEX_MEDIA_SERVER_USER=abc diff --git a/root/etc/cont-init.d/40-chown-files b/root/etc/cont-init.d/40-chown-files deleted file mode 100644 index 12b63b4b..00000000 --- a/root/etc/cont-init.d/40-chown-files +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/with-contenv bash - -# create folders -if [ ! -d "${PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR}" ]; then \ -mkdir -p "${PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR}" -chown -R abc:abc /config -fi - -# check Library permissions -PUID=${PUID:-911} -if [ ! "$(stat -c %u /config/Library)" = "$PUID" ]; then - echo "Change in ownership detected, please be patient while we chown existing files" - echo "This could take some time" - chown abc:abc -R \ - /config/Library -fi - -# remove plex pid after unclean stop -[[ -f "/config/Library/Application Support/Plex Media Server/plexmediaserver.pid" ]] && \ - rm -f "/config/Library/Application Support/Plex Media Server/plexmediaserver.pid" - -# permissions (non-recursive) on config root and folders -chown abc:abc \ - /config \ - /config/* diff --git a/root/etc/cont-init.d/50-gid-video b/root/etc/cont-init.d/50-gid-video deleted file mode 100755 index 8e0dec3d..00000000 --- a/root/etc/cont-init.d/50-gid-video +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/with-contenv bash - -FILES=$(find /dev/dri /dev/dvb -type c -print 2>/dev/null) - -for i in $FILES -do - VIDEO_GID=$(stat -c '%g' "$i") - if id -G abc | grep -qw "$VIDEO_GID"; then - touch /groupadd - else - if [ ! "${VIDEO_GID}" == '0' ]; then - VIDEO_NAME=$(getent group "${VIDEO_GID}" | awk -F: '{print $1}') - if [ -z "${VIDEO_NAME}" ]; then - VIDEO_NAME="video$(head /dev/urandom | tr -dc 'a-zA-Z0-9' | head -c8)" - groupadd "$VIDEO_NAME" - groupmod -g "$VIDEO_GID" "$VIDEO_NAME" - fi - usermod -a -G "$VIDEO_NAME" abc - touch /groupadd - fi - fi -done - -if [ -n "${FILES}" ] && [ ! -f "/groupadd" ]; then - usermod -a -G root abc -fi diff --git a/root/etc/cont-init.d/60-plex-update b/root/etc/cont-init.d/60-plex-update deleted file mode 100755 index f7087b60..00000000 --- a/root/etc/cont-init.d/60-plex-update +++ /dev/null @@ -1,127 +0,0 @@ -#!/usr/bin/with-contenv bash - -# If docker manages versioning exit -if [ "${VERSION}" ] && [ "${VERSION}" == 'docker' ]; then - echo "Docker is used for verisoning skip update check" - exit 0 -fi - -# test if plex is installed and try re-pulling latest if not -if (dpkg --get-selections plexmediaserver | grep -wq "install"); then -: -else -echo "for some reason plex doesn't appear to be installed, pulling a new copy and exiting out of update script" -curl -o /tmp/plexmediaserver.deb -L \ - "${PLEX_DOWNLOAD}/${REMOTE_VERSION}/debian/plexmediaserver_${REMOTE_VERSION}_${PLEX_ARCH}.deb" && \ -dpkg -i --force-confold /tmp/plexmediaserver.deb -rm -f /tmp/plexmediaserver.deb -exit 0 -fi - -# set no update message -[[ -e /tmp/no-version.nfo ]] && \ - rm /tmp/no-version.nfo -NOVERSION_SET='/tmp/no-version.nfo' -cat > "${NOVERSION_SET}" <<-EOFVERSION -####################################################### -# Update routine will not run because you havent set # -# the VERSION variable or you opted out of updates. # -# For more information checkout :- # -# https://github.com/linuxserver/docker-plex # -####################################################### -EOFVERSION - -# set update failed message -[[ -e /tmp/update_fail.nfo ]] && \ - rm /tmp/update_fail.nfo -UPGRADE_FAIL='/tmp/update_fail.nfo' -cat > "${UPGRADE_FAIL}" <<-EOFFAIL -######################################################## -# Upgrade attempt failed, this could be because either # -# plex update site is down, local network issues, or # -# you were trying to get a version that simply doesn't # -# exist, check over the VERSION variable thoroughly & # -# correct it or try again later. # -######################################################## -EOFFAIL - -# test for no version set or opt out for autoupdates -if [[ -z "$VERSION" ]] || [[ "$VERSION" == "0" ]] || [[ -n "$ADVANCED_DISABLEUPDATES" ]]; then -printf '\n\n\n%s\n\n\n' "$(</tmp/no-version.nfo)" -exit 0 -fi - -# set header for no preferences/token message -[[ -e /tmp/no-token.nfo ]] && \ - rm /tmp/no-token.nfo -NOTOKEN_SET='/tmp/no-token.nfo' -cat > "${NOTOKEN_SET}" <<-EOFTOKEN -##################################################### -# Login via the webui at http://<ip>:32400/web # -# and restart the docker, because there was no # -EOFTOKEN - -# if preferences files doesn't exist, exit out -if [ ! -e "/config/Library/Application Support/Plex Media Server/Preferences.xml" ]; then -cat >> "${NOTOKEN_SET}" <<-EOFTOKEN -# preference file found, possibly first startup. # -##################################################### -EOFTOKEN -printf '\n\n\n%s\n\n\n' "$(</tmp/no-token.nfo)" -exit 0 -fi - -# attempt to read plex token -PLEX_TOKEN=$( sed -n 's/.*PlexOnlineToken="//p' \ - "/config/Library/Application Support/Plex Media Server/Preferences.xml" \ - | sed "s/\".*//") - -# if plex token isn't found, exit out -if [ -z "$PLEX_TOKEN" ]; then -cat >> "${NOTOKEN_SET}" <<-EOFTOKEN -# plex token found in the preference file # -##################################################### -EOFTOKEN -printf '\n\n\n%s\n\n\n' "$(</tmp/no-token.nfo)" -exit 0 -fi - -# determine installed version of plex -INSTALLED_VERSION=$(dpkg-query -W -f='${Version}' plexmediaserver) - -# start update routine -if [[ "${VERSION,,}" = latest ]] || [[ "${VERSION,,}" = plexpass ]] || [[ "$PLEXPASS" == "1" ]]; then - if [[ "${PLEX_ARCH}" = amd64 ]]; then - PLEX_URL_ARCH="x86_64" - elif [[ "${PLEX_ARCH}" = armhf ]]; then - PLEX_URL_ARCH="armv7hf_neon" - elif [[ "${PLEX_ARCH}" = arm64 ]]; then - PLEX_URL_ARCH="aarch64" - fi -REMOTE_VERSION=$(curl -s "https://plex.tv/downloads/details/5?distro=debian&build=linux-${PLEX_URL_ARCH}&channel=8&X-Plex-Token=$PLEX_TOKEN"| grep -oP 'version="\K[^"]+' | tail -n 1 ) -elif [[ "${VERSION,,}" = public ]]; then -REMOTE_VERSION=$(curl -sX GET 'https://plex.tv/api/downloads/5.json' | jq -r '.computer.Linux.version') -else -REMOTE_VERSION="${VERSION}" -fi - -if [[ "$REMOTE_VERSION" == "$INSTALLED_VERSION" ]]; then -echo "No update required" -exit 0 -fi - -echo "Atempting to upgrade to: $REMOTE_VERSION" -rm -f /tmp/plexmediaserver_*.deb -wget -nv -P /tmp \ -"${PLEX_DOWNLOAD}/${REMOTE_VERSION}/debian/plexmediaserver_${REMOTE_VERSION}_${PLEX_ARCH}.deb" -last=$? - -# test if deb file size is ok, or if download failed -if [[ "$last" -gt "0" ]] || [[ $(stat -c %s /tmp/plexmediaserver_"${REMOTE_VERSION}"_${PLEX_ARCH}.deb) -lt 10000 ]]; then -printf '\n\n\n%s\n\n\n' "$(</tmp/update_fail.nfo)" -exit 0 -# if ok, try to install it. -else -dpkg -i --force-confold /tmp/plexmediaserver_"${REMOTE_VERSION}"_${PLEX_ARCH}.deb -rm -f /tmp/plexmediaserver_*.deb -fi diff --git a/root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-plex-update b/root/etc/s6-overlay/s6-rc.d/init-config-end/dependencies.d/init-plex-update new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-chown/dependencies.d/init-config b/root/etc/s6-overlay/s6-rc.d/init-plex-chown/dependencies.d/init-config new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-chown/run b/root/etc/s6-overlay/s6-rc.d/init-plex-chown/run new file mode 100755 index 00000000..f98e6699 --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-chown/run @@ -0,0 +1,46 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +mkdir -p /run/plex-temp + +# create folders +if [[ ! -d "${PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR}" ]]; then + mkdir -p "${PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR}" + if [[ -z ${LSIO_NON_ROOT_USER} ]]; then + lsiown -R abc:abc /config + fi +fi + +# remove plex pid after unclean stop +if [[ -f "/config/Library/Application Support/Plex Media Server/plexmediaserver.pid" ]]; then + rm -f "/config/Library/Application Support/Plex Media Server/plexmediaserver.pid" +fi + +if [[ -z ${LSIO_NON_ROOT_USER} ]]; then + # check Library permissions + PUID=${PUID:-911} + if [[ ! "$(stat -c %u /config/Library)" == "${PUID}" ]]; then + echo "Change in ownership detected, please be patient while we chown existing files" + echo "This could take some time" + lsiown -R abc:abc \ + /config/Library + fi + + # set permissions on Plex Transcoder Temp Directory + PLEX_MEDIA_SERVER_PREFERENCES="${PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR}/Plex Media Server/Preferences.xml" + if [[ -f "${PLEX_MEDIA_SERVER_PREFERENCES}" ]]; then + TranscoderTempDirectory='\bTranscoderTempDirectory="([^"]+)"' + while IFS= read -r line; do + if [[ ${line} =~ ${TranscoderTempDirectory} ]] && [[ -d "${BASH_REMATCH[1]}" ]]; then + echo "Setting permissions on ${BASH_REMATCH[1]}" + lsiown -R abc:abc "${BASH_REMATCH[1]}" + fi + done <"${PLEX_MEDIA_SERVER_PREFERENCES}" + fi + + # permissions (non-recursive) on config root and folders + lsiown abc:abc \ + /run/plex-temp \ + /config \ + /config/* +fi diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-chown/type b/root/etc/s6-overlay/s6-rc.d/init-plex-chown/type new file mode 100644 index 00000000..3d92b15f --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-chown/type @@ -0,0 +1 @@ +oneshot \ No newline at end of file diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-chown/up b/root/etc/s6-overlay/s6-rc.d/init-plex-chown/up new file mode 100644 index 00000000..2d5ad2fa --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-chown/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-plex-chown/run \ No newline at end of file diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-claim/dependencies.d/init-plex-chown b/root/etc/s6-overlay/s6-rc.d/init-plex-claim/dependencies.d/init-plex-chown new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-claim/run b/root/etc/s6-overlay/s6-rc.d/init-plex-claim/run new file mode 100755 index 00000000..a94451e8 --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-claim/run @@ -0,0 +1,89 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +PLEX_MEDIA_SERVER_PREFERENCES="${PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR}/Plex Media Server/Preferences.xml" + +if grep -qs "PlexOnlineToken" "${PLEX_MEDIA_SERVER_PREFERENCES}"; then + echo "**** Server already claimed ****" + exit 0 +elif [[ -z "$PLEX_CLAIM" ]]; then + echo "**** Server is unclaimed, but no claim token has been set ****" + exit 0 +fi + +if [[ ! -f "${PLEX_MEDIA_SERVER_PREFERENCES}" ]]; then + UMASK_SET="${UMASK_SET:-022}" + umask "$UMASK_SET" + echo "Temporarily starting Plex Media Server." + PLEX_MEDIA_SERVER_INFO_MODEL=$(uname -m) + export PLEX_MEDIA_SERVER_INFO_MODEL + PLEX_MEDIA_SERVER_INFO_PLATFORM_VERSION=$(uname -r) + export PLEX_MEDIA_SERVER_INFO_PLATFORM_VERSION + s6-setuidgid abc /bin/bash -c \ + 'LD_LIBRARY_PATH=/usr/lib/plexmediaserver:/usr/lib/plexmediaserver/lib /usr/lib/plexmediaserver/Plex\ Media\ Server' & + PID=$! + echo "Waiting for Plex to generate its config" + DBNAME="/config/Library/Application Support/Plex Media Server/Plug-in Support/Databases/com.plexapp.plugins.library.db-wal" + until [[ -f "${DBNAME}" ]]; do + sleep 1 + done + while true; do + echo "Waiting for database creation to complete..." + if [[ -z "${COMPARE_MD5+x}" ]]; then + COMPARE_MD5=$(md5sum "${DBNAME}" | cut -c1-8) + sleep 3 + else + sleep 3 + CURRENT_MD5=$(md5sum "${DBNAME}" | cut -c1-8) + if [[ "${CURRENT_MD5}" == "${COMPARE_MD5}" ]]; then + break + else + COMPARE_MD5=$(md5sum "${DBNAME}" | cut -c1-8) + fi + fi + done + until grep -qs "ProcessedMachineIdentifier" "${PLEX_MEDIA_SERVER_PREFERENCES}"; do + sleep 1 + done + while true; do + echo "Waiting for pref file creation to complete..." + if [[ -z "${PREF_COMPARE_MD5+x}" ]]; then + PREF_COMPARE_MD5=$(md5sum "${PLEX_MEDIA_SERVER_PREFERENCES}" | cut -c1-8) + sleep 3 + else + sleep 3 + PREF_CURRENT_MD5=$(md5sum "${PLEX_MEDIA_SERVER_PREFERENCES}" | cut -c1-8) + if [[ "${PREF_CURRENT_MD5}" == "${PREF_COMPARE_MD5}" ]]; then + break + else + PREF_COMPARE_MD5=$(md5sum "${PLEX_MEDIA_SERVER_PREFERENCES}" | cut -c1-8) + fi + fi + done + echo "Stopping Plex to claim server" + while ps -p $PID >/dev/null; do + kill $PID + sleep 1 + done + echo "Plex stopped" +fi + +ProcessedMachineIdentifier=$(sed -n "s/^.*ProcessedMachineIdentifier=\"\([^\"]*\)\".*$/\1/p" "${PLEX_MEDIA_SERVER_PREFERENCES}") +PlexOnlineToken="$(curl -X POST \ + -H 'X-Plex-Client-Identifier: '"${ProcessedMachineIdentifier}" \ + -H 'X-Plex-Product: Plex Media Server' \ + -H 'X-Plex-Version: 1.1' \ + -H 'X-Plex-Provides: server' \ + -H 'X-Plex-Platform: Linux' \ + -H 'X-Plex-Platform-Version: 1.0' \ + -H 'X-Plex-Device-Name: PlexMediaServer' \ + -H 'X-Plex-Device: Linux' \ + "https://plex.tv/api/claim/exchange?token=${PLEX_CLAIM}" | + sed -n 's/.*<authentication-token>\(.*\)<\/authentication-token>.*/\1/p')" + +if [[ -n "$PlexOnlineToken" ]]; then + echo "Server claimed successfully, navigate to http://serverip:32400/web to complete plex setup." + sed -i "s/\/>/ PlexOnlineToken=\"${PlexOnlineToken}\"\/>/g" "${PLEX_MEDIA_SERVER_PREFERENCES}" +else + echo "Unable to claim Plex server. Either manually claim by connecting to http://serverip:32400/web from the same network subnet, or recreate container with a new claim token." +fi diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-claim/type b/root/etc/s6-overlay/s6-rc.d/init-plex-claim/type new file mode 100644 index 00000000..3d92b15f --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-claim/type @@ -0,0 +1 @@ +oneshot \ No newline at end of file diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-claim/up b/root/etc/s6-overlay/s6-rc.d/init-plex-claim/up new file mode 100644 index 00000000..56750b8d --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-claim/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-plex-claim/run \ No newline at end of file diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-update/dependencies.d/init-plex-claim b/root/etc/s6-overlay/s6-rc.d/init-plex-update/dependencies.d/init-plex-claim new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-update/run b/root/etc/s6-overlay/s6-rc.d/init-plex-update/run new file mode 100755 index 00000000..44bab804 --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-update/run @@ -0,0 +1,142 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +if [[ -n ${LSIO_READ_ONLY_FS} ]] || [[ -n ${LSIO_NON_ROOT_USER} ]]; then + echo "Runtime update not supported with read-only or non-root operation, skipping." + exit 0 +fi + +# If docker manages versioning exit +if [[ -n "${VERSION}" ]] && [[ "${VERSION}" == "docker" ]]; then + echo "Docker is used for versioning skip update check" + exit 0 +fi + +# test if plex is installed and try re-pulling latest if not +if (dpkg --get-selections plexmediaserver | grep -wq "install"); then + : +else + echo "for some reason plex doesn't appear to be installed, pulling a new copy and exiting out of update script" + curl -o /tmp/plexmediaserver.deb -L \ + "${PLEX_DOWNLOAD}/${REMOTE_VERSION}/debian/plexmediaserver_${REMOTE_VERSION}_${PLEX_ARCH}.deb" + dpkg -i --force-confold /tmp/plexmediaserver.deb + rm -f /tmp/plexmediaserver.deb + exit 0 +fi + +# set no update message +if [[ -e /tmp/no-version.nfo ]]; then + rm /tmp/no-version.nfo +fi +NOVERSION_SET='/tmp/no-version.nfo' +cat >"${NOVERSION_SET}" <<-EOFVERSION +####################################################### +# Update routine will not run because you haven't set # +# the VERSION variable or you opted out of updates. # +# For more information checkout :- # +# https://github.com/linuxserver/docker-plex # +####################################################### +EOFVERSION + +# set update failed message +if [[ -e /tmp/update_fail.nfo ]]; then + rm /tmp/update_fail.nfo +fi +UPGRADE_FAIL='/tmp/update_fail.nfo' +cat >"${UPGRADE_FAIL}" <<-EOFFAIL +######################################################## +# Upgrade attempt failed, this could be because either # +# plex update site is down, local network issues, or # +# you were trying to get a version that simply doesn't # +# exist, check over the VERSION variable thoroughly & # +# correct it or try again later. # +######################################################## +EOFFAIL + +# test for no version set or opt out for auto updates +if [[ -z "${VERSION}" ]] || [[ "${VERSION}" == "0" ]] || [[ -n "${ADVANCED_DISABLEUPDATES}" ]]; then + printf '\n\n\n%s\n\n\n' "$(</tmp/no-version.nfo)" + exit 0 +fi + +# set header for no preferences/token message +if [[ -e /tmp/no-token.nfo ]]; then + rm /tmp/no-token.nfo +fi +NOTOKEN_SET='/tmp/no-token.nfo' +cat >"${NOTOKEN_SET}" <<-EOFTOKEN +##################################################### +# Login via the webui at http://<ip>:32400/web # +# and restart the container, because there was no # +EOFTOKEN + +# if preferences files doesn't exist, exit out +PLEX_MEDIA_SERVER_PREFERENCES="${PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR}/Plex Media Server/Preferences.xml" +if [[ ! -e "${PLEX_MEDIA_SERVER_PREFERENCES}" ]]; then + cat >>"${NOTOKEN_SET}" <<-EOFTOKEN +# preference file found, possibly first startup. # +##################################################### +EOFTOKEN + printf '\n\n\n%s\n\n\n' "$(</tmp/no-token.nfo)" + exit 0 +fi + +# attempt to read plex token +PLEX_TOKEN=$(sed -n 's/.*PlexOnlineToken="//p' \ + "${PLEX_MEDIA_SERVER_PREFERENCES}" | + sed "s/\".*//") + +# if plex token isn't found, exit out +if [[ -z "${PLEX_TOKEN}" ]]; then + cat >>"${NOTOKEN_SET}" <<-EOFTOKEN +# plex token found in the preference file # +##################################################### +EOFTOKEN + printf '\n\n\n%s\n\n\n' "$(</tmp/no-token.nfo)" + exit 0 +fi + +# determine installed version of plex +INSTALLED_VERSION=$(dpkg-query -W -f='${Version}' plexmediaserver) + +# start update routine +if [[ "${VERSION,,}" == "latest" ]] || [[ "${VERSION,,}" == "plexpass" ]] || [[ "${PLEXPASS}" == "1" ]]; then + if [[ "${PLEX_ARCH}" == "amd64" ]]; then + PLEX_URL_ARCH="x86_64" + elif [[ "${PLEX_ARCH}" == "armhf" ]]; then + PLEX_URL_ARCH="armv7hf_neon" + elif [[ "${PLEX_ARCH}" == "arm64" ]]; then + PLEX_URL_ARCH="aarch64" + fi + REMOTE_VERSION=$(curl -s "https://plex.tv/downloads/details/5?distro=debian&build=linux-${PLEX_URL_ARCH}&channel=8&X-Plex-Token=${PLEX_TOKEN}" | grep -oP 'version="\K[^"]+' | tail -n 1) +elif [[ "${VERSION,,}" == "public" ]]; then + REMOTE_VERSION=$(curl -s 'https://plex.tv/api/downloads/5.json' | jq -r '.computer.Linux.version') +else + REMOTE_VERSION="${VERSION}" +fi + +if [[ "${REMOTE_VERSION}" == "${INSTALLED_VERSION}" ]]; then + echo "No update required" + exit 0 +fi + +if [[ -z "${REMOTE_VERSION}" ]]; then + echo "*** Unable to fetch version from Plex remote endpoint or empty VERSION supplied, please check your configuration ***" + exit 0 +fi + +echo "Attempting to upgrade to: ${REMOTE_VERSION}" +rm -f /tmp/plexmediaserver_*.deb +wget -nv -P /tmp \ + "${PLEX_DOWNLOAD}/${REMOTE_VERSION}/debian/plexmediaserver_${REMOTE_VERSION}_${PLEX_ARCH}.deb" +last=$? + +# test if deb file size is ok, or if download failed +if [[ "${last}" -gt "0" ]] || [[ $(stat -c %s "/tmp/plexmediaserver_${REMOTE_VERSION}_${PLEX_ARCH}.deb") -lt 10000 ]]; then + printf '\n\n\n%s\n\n\n' "$(</tmp/update_fail.nfo)" + exit 0 +# if ok, try to install it. +else + dpkg -i --force-confold "/tmp/plexmediaserver_${REMOTE_VERSION}_${PLEX_ARCH}.deb" + rm -f /tmp/plexmediaserver_*.deb +fi diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-update/type b/root/etc/s6-overlay/s6-rc.d/init-plex-update/type new file mode 100644 index 00000000..3d92b15f --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-update/type @@ -0,0 +1 @@ +oneshot \ No newline at end of file diff --git a/root/etc/s6-overlay/s6-rc.d/init-plex-update/up b/root/etc/s6-overlay/s6-rc.d/init-plex-update/up new file mode 100644 index 00000000..6b6b1643 --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/init-plex-update/up @@ -0,0 +1 @@ +/etc/s6-overlay/s6-rc.d/init-plex-update/run \ No newline at end of file diff --git a/root/etc/s6-overlay/s6-rc.d/svc-plex/dependencies.d/init-services b/root/etc/s6-overlay/s6-rc.d/svc-plex/dependencies.d/init-services new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/s6-overlay/s6-rc.d/svc-plex/notification-fd b/root/etc/s6-overlay/s6-rc.d/svc-plex/notification-fd new file mode 100644 index 00000000..e440e5c8 --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/svc-plex/notification-fd @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/root/etc/s6-overlay/s6-rc.d/svc-plex/run b/root/etc/s6-overlay/s6-rc.d/svc-plex/run new file mode 100755 index 00000000..bb607461 --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/svc-plex/run @@ -0,0 +1,18 @@ +#!/usr/bin/with-contenv bash +# shellcheck shell=bash + +echo "Starting Plex Media Server. . . (you can ignore the libusb_init error)" +PLEX_MEDIA_SERVER_INFO_MODEL=$(uname -m) +export PLEX_MEDIA_SERVER_INFO_MODEL +PLEX_MEDIA_SERVER_INFO_PLATFORM_VERSION=$(uname -r) +export PLEX_MEDIA_SERVER_INFO_PLATFORM_VERSION + +if [[ -z ${LSIO_NON_ROOT_USER} ]]; then + exec \ + s6-notifyoncheck -d -n 300 -w 1000 -c "nc -z localhost 32400" \ + s6-setuidgid abc "/usr/lib/plexmediaserver/Plex Media Server" +else + exec \ + s6-notifyoncheck -d -n 300 -w 1000 -c "nc -z localhost 32400" \ + "/usr/lib/plexmediaserver/Plex Media Server" +fi diff --git a/root/etc/s6-overlay/s6-rc.d/svc-plex/type b/root/etc/s6-overlay/s6-rc.d/svc-plex/type new file mode 100644 index 00000000..1780f9f4 --- /dev/null +++ b/root/etc/s6-overlay/s6-rc.d/svc-plex/type @@ -0,0 +1 @@ +longrun \ No newline at end of file diff --git a/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-plex-chown b/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-plex-chown new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-plex-claim b/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-plex-claim new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-plex-update b/root/etc/s6-overlay/s6-rc.d/user/contents.d/init-plex-update new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-plex b/root/etc/s6-overlay/s6-rc.d/user/contents.d/svc-plex new file mode 100644 index 00000000..e69de29b diff --git a/root/etc/services.d/plex/run b/root/etc/services.d/plex/run deleted file mode 100644 index 6996fcd0..00000000 --- a/root/etc/services.d/plex/run +++ /dev/null @@ -1,13 +0,0 @@ -#!/usr/bin/with-contenv bash - - -UMASK_SET=${UMASK_SET:-022} - -umask "$UMASK_SET" - -echo "Starting Plex Media Server." -export PLEX_MEDIA_SERVER_INFO_MODEL=$(uname -m) -export PLEX_MEDIA_SERVER_INFO_PLATFORM_VERSION=$(uname -r) -exec \ - s6-setuidgid abc /bin/bash -c \ - 'LD_LIBRARY_PATH=/usr/lib/plexmediaserver:/usr/lib/plexmediaserver/lib /usr/lib/plexmediaserver/Plex\ Media\ Server'