Pipeline hw (#150)
* adding multi arch logic and pipeline, first version of included hardware accel info, pulling debs from new endpoint * removing web screenshotting from ci process * adding update logic for arm variants and templating readme file * Adding nvidia environment variables * Adding information on nvidia container runtime * updating pipeline logic to use static endpoint and disabling updates on those branches * bug bashing escape characters in jenkins logic * remove avahi service that is no longer needed by plex * set new artifact download locationpull/153/head
parent
391f0a773c
commit
88765a032a
@ -0,0 +1,602 @@
|
||||
pipeline {
|
||||
agent {
|
||||
label 'X86-64-MULTI'
|
||||
}
|
||||
// Input to determine if this is a package check
|
||||
parameters {
|
||||
string(defaultValue: 'false', description: 'package check run', name: 'PACKAGE_CHECK')
|
||||
}
|
||||
// Configuration for the variables used for this specific repo
|
||||
environment {
|
||||
BUILDS_DISCORD=credentials('build_webhook_url')
|
||||
GITHUB_TOKEN=credentials('498b4638-2d02-4ce5-832d-8a57d01d97ab')
|
||||
BUILD_VERSION_ARG = 'PLEX_RELEASE'
|
||||
LS_USER = 'linuxserver'
|
||||
LS_REPO = 'docker-plex'
|
||||
CONTAINER_NAME = 'plex'
|
||||
DOCKERHUB_IMAGE = 'linuxserver/plex'
|
||||
DEV_DOCKERHUB_IMAGE = 'lsiodev/plex'
|
||||
PR_DOCKERHUB_IMAGE = 'lspipepr/plex'
|
||||
DIST_IMAGE = 'ubuntu'
|
||||
MULTIARCH='true'
|
||||
CI='true'
|
||||
CI_WEB='false'
|
||||
CI_PORT='32400'
|
||||
CI_SSL='false'
|
||||
CI_DELAY='120'
|
||||
CI_DOCKERENV='TZ=US/Pacific'
|
||||
CI_AUTH='user:password'
|
||||
CI_WEBPATH='/web'
|
||||
}
|
||||
stages {
|
||||
// Setup all the basic environment variables needed for the build
|
||||
stage("Set ENV Variables base"){
|
||||
steps{
|
||||
script{
|
||||
env.EXIT_STATUS = ''
|
||||
env.LS_RELEASE = sh(
|
||||
script: '''curl -s https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases/latest | jq -r '. | .tag_name' ''',
|
||||
returnStdout: true).trim()
|
||||
env.LS_RELEASE_NOTES = sh(
|
||||
script: '''git log -1 --pretty=%B | sed -E ':a;N;$!ba;s/\\r{0,1}\\n/\\\\n/g' ''',
|
||||
returnStdout: true).trim()
|
||||
env.GITHUB_DATE = sh(
|
||||
script: '''date '+%Y-%m-%dT%H:%M:%S%:z' ''',
|
||||
returnStdout: true).trim()
|
||||
env.COMMIT_SHA = sh(
|
||||
script: '''git rev-parse HEAD''',
|
||||
returnStdout: true).trim()
|
||||
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/commit/' + env.GIT_COMMIT
|
||||
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DOCKERHUB_IMAGE + '/tags/'
|
||||
env.PULL_REQUEST = env.CHANGE_ID
|
||||
}
|
||||
script{
|
||||
env.LS_RELEASE_NUMBER = sh(
|
||||
script: '''echo ${LS_RELEASE} |sed 's/^.*-ls//g' ''',
|
||||
returnStdout: true).trim()
|
||||
}
|
||||
script{
|
||||
env.LS_TAG_NUMBER = sh(
|
||||
script: '''#! /bin/bash
|
||||
tagsha=$(git rev-list -n 1 ${LS_RELEASE} 2>/dev/null)
|
||||
if [ "${tagsha}" == "${COMMIT_SHA}" ]; then
|
||||
echo ${LS_RELEASE_NUMBER}
|
||||
elif [ -z "${GIT_COMMIT}" ]; then
|
||||
echo ${LS_RELEASE_NUMBER}
|
||||
else
|
||||
echo $((${LS_RELEASE_NUMBER} + 1))
|
||||
fi''',
|
||||
returnStdout: true).trim()
|
||||
}
|
||||
}
|
||||
}
|
||||
/* #######################
|
||||
Package Version Tagging
|
||||
####################### */
|
||||
// Grab the current package versions in Git to determine package tag
|
||||
stage("Set Package tag"){
|
||||
steps{
|
||||
script{
|
||||
env.PACKAGE_TAG = sh(
|
||||
script: '''#!/bin/bash
|
||||
if [ -e package_versions.txt ] ; then
|
||||
cat package_versions.txt | md5sum | cut -c1-8
|
||||
else
|
||||
echo none
|
||||
fi''',
|
||||
returnStdout: true).trim()
|
||||
}
|
||||
}
|
||||
}
|
||||
/* ########################
|
||||
External Release Tagging
|
||||
######################## */
|
||||
// If this is a custom command to determine version use that command
|
||||
stage("Set tag custom bash"){
|
||||
steps{
|
||||
script{
|
||||
env.EXT_RELEASE = sh(
|
||||
script: ''' curl -s 'https://plex.tv/downloads/details/1?build=linux-ubuntu-x86_64&distro=ubuntu' |grep -oP 'version="\\K[^"]+' | tail -n 1 ''',
|
||||
returnStdout: true).trim()
|
||||
env.RELEASE_LINK = 'custom_command'
|
||||
}
|
||||
}
|
||||
}
|
||||
// Sanitize the release tag and strip illegal docker or github characters
|
||||
stage("Sanitize tag"){
|
||||
steps{
|
||||
script{
|
||||
env.EXT_RELEASE_CLEAN = sh(
|
||||
script: '''echo ${EXT_RELEASE} | sed 's/[~,%@+;:/]//g' ''',
|
||||
returnStdout: true).trim()
|
||||
}
|
||||
}
|
||||
}
|
||||
// If this is a master build use live docker endpoints
|
||||
stage("Set ENV live build"){
|
||||
when {
|
||||
branch "master"
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
}
|
||||
steps {
|
||||
script{
|
||||
env.IMAGE = env.DOCKERHUB_IMAGE
|
||||
if (env.MULTIARCH == 'true') {
|
||||
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm32v6-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
||||
} else {
|
||||
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
||||
}
|
||||
env.META_TAG = env.EXT_RELEASE_CLEAN + '-ls' + env.LS_TAG_NUMBER
|
||||
}
|
||||
}
|
||||
}
|
||||
// If this is a dev build use dev docker endpoints
|
||||
stage("Set ENV dev build"){
|
||||
when {
|
||||
not {branch "master"}
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
}
|
||||
steps {
|
||||
script{
|
||||
env.IMAGE = env.DEV_DOCKERHUB_IMAGE
|
||||
if (env.MULTIARCH == 'true') {
|
||||
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm32v6-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
||||
} else {
|
||||
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
||||
}
|
||||
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-dev-' + env.COMMIT_SHA
|
||||
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.DEV_DOCKERHUB_IMAGE + '/tags/'
|
||||
}
|
||||
}
|
||||
}
|
||||
// If this is a pull request build use dev docker endpoints
|
||||
stage("Set ENV PR build"){
|
||||
when {
|
||||
not {environment name: 'CHANGE_ID', value: ''}
|
||||
}
|
||||
steps {
|
||||
script{
|
||||
env.IMAGE = env.PR_DOCKERHUB_IMAGE
|
||||
if (env.MULTIARCH == 'true') {
|
||||
env.CI_TAGS = 'amd64-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm32v6-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST + '|arm64v8-' + env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
||||
} else {
|
||||
env.CI_TAGS = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
||||
}
|
||||
env.META_TAG = env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-pr-' + env.PULL_REQUEST
|
||||
env.CODE_URL = 'https://github.com/' + env.LS_USER + '/' + env.LS_REPO + '/pull/' + env.PULL_REQUEST
|
||||
env.DOCKERHUB_LINK = 'https://hub.docker.com/r/' + env.PR_DOCKERHUB_IMAGE + '/tags/'
|
||||
}
|
||||
}
|
||||
}
|
||||
// Use helper containers to render templated files
|
||||
stage('Update-Templates') {
|
||||
when {
|
||||
branch "master"
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
expression {
|
||||
env.CONTAINER_NAME != null
|
||||
}
|
||||
}
|
||||
steps {
|
||||
sh '''#! /bin/bash
|
||||
set -e
|
||||
TEMPDIR=$(mktemp -d)
|
||||
docker pull linuxserver/jenkins-builder:latest
|
||||
docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/jenkins linuxserver/jenkins-builder:latest
|
||||
docker pull linuxserver/doc-builder:latest
|
||||
docker run --rm -e CONTAINER_NAME=${CONTAINER_NAME} -e GITHUB_BRANCH=master -v ${TEMPDIR}:/ansible/readme linuxserver/doc-builder:latest
|
||||
if [ "$(md5sum ${TEMPDIR}/${LS_REPO}/Jenkinsfile | awk '{ print $1 }')" != "$(md5sum Jenkinsfile | awk '{ print $1 }')" ] || [ "$(md5sum ${TEMPDIR}/${CONTAINER_NAME}/README.md | awk '{ print $1 }')" != "$(md5sum README.md | awk '{ print $1 }')" ]; then
|
||||
mkdir -p ${TEMPDIR}/repo
|
||||
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/repo/${LS_REPO}
|
||||
git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git checkout -f master
|
||||
cp ${TEMPDIR}/${CONTAINER_NAME}/README.md ${TEMPDIR}/repo/${LS_REPO}/
|
||||
cp ${TEMPDIR}/docker-${CONTAINER_NAME}/Jenkinsfile ${TEMPDIR}/repo/${LS_REPO}/
|
||||
cd ${TEMPDIR}/repo/${LS_REPO}/
|
||||
git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git add Jenkinsfile README.md
|
||||
git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git commit -m 'Bot Updating Templated Files'
|
||||
git --git-dir ${TEMPDIR}/repo/${LS_REPO}/.git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all
|
||||
echo "true" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER}
|
||||
else
|
||||
echo "false" > /tmp/${COMMIT_SHA}-${BUILD_NUMBER}
|
||||
fi
|
||||
mkdir -p ${TEMPDIR}/gitbook
|
||||
git clone https://github.com/linuxserver/docker-documentation.git ${TEMPDIR}/gitbook/docker-documentation
|
||||
if [ ! -f ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md ] || [ "$(md5sum ${TEMPDIR}/gitbook/docker-documentation/images/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" != "$(md5sum ${TEMPDIR}/${CONTAINER_NAME}/docker-${CONTAINER_NAME}.md | awk '{ print $1 }')" ]; then
|
||||
cp ${TEMPDIR}/${CONTAINER_NAME}/docker-${CONTAINER_NAME}.md ${TEMPDIR}/gitbook/docker-documentation/images/
|
||||
cd ${TEMPDIR}/gitbook/docker-documentation/
|
||||
git add images/docker-${CONTAINER_NAME}.md
|
||||
git commit -m 'Bot Updating Templated Files'
|
||||
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/linuxserver/docker-documentation.git --all
|
||||
fi
|
||||
rm -Rf ${TEMPDIR}'''
|
||||
script{
|
||||
env.FILES_UPDATED = sh(
|
||||
script: '''cat /tmp/${COMMIT_SHA}-${BUILD_NUMBER}''',
|
||||
returnStdout: true).trim()
|
||||
}
|
||||
}
|
||||
}
|
||||
// Exit the build if the Templated files were just updated
|
||||
stage('Template-exit') {
|
||||
when {
|
||||
branch "master"
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
environment name: 'FILES_UPDATED', value: 'true'
|
||||
expression {
|
||||
env.CONTAINER_NAME != null
|
||||
}
|
||||
}
|
||||
steps {
|
||||
script{
|
||||
env.EXIT_STATUS = 'ABORTED'
|
||||
}
|
||||
}
|
||||
}
|
||||
/* ###############
|
||||
Build Container
|
||||
############### */
|
||||
// Build Docker container for push to LS Repo
|
||||
stage('Build-Single') {
|
||||
when {
|
||||
environment name: 'MULTIARCH', value: 'false'
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
steps {
|
||||
sh "docker build --no-cache -t ${IMAGE}:${META_TAG} \
|
||||
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
||||
}
|
||||
}
|
||||
// Build MultiArch Docker containers for push to LS Repo
|
||||
stage('Build-Multi') {
|
||||
when {
|
||||
environment name: 'MULTIARCH', value: 'true'
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
parallel {
|
||||
stage('Build X86') {
|
||||
steps {
|
||||
sh "docker build --no-cache -t ${IMAGE}:amd64-${META_TAG} \
|
||||
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
||||
}
|
||||
}
|
||||
stage('Build ARMHF') {
|
||||
agent {
|
||||
label 'ARMHF'
|
||||
}
|
||||
steps {
|
||||
withCredentials([
|
||||
[
|
||||
$class: 'UsernamePasswordMultiBinding',
|
||||
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||
usernameVariable: 'DOCKERUSER',
|
||||
passwordVariable: 'DOCKERPASS'
|
||||
]
|
||||
]) {
|
||||
echo 'Logging into DockerHub'
|
||||
sh '''#! /bin/bash
|
||||
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
||||
'''
|
||||
sh "curl https://lsio-ci.ams3.digitaloceanspaces.com/qemu-arm-static -o qemu-arm-static"
|
||||
sh "chmod +x qemu-*"
|
||||
sh "docker build --no-cache -f Dockerfile.armhf -t ${IMAGE}:arm32v6-${META_TAG} \
|
||||
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
||||
sh "docker tag ${IMAGE}:arm32v6-${META_TAG} lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER}"
|
||||
sh "docker push lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER}"
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Build ARM64') {
|
||||
agent {
|
||||
label 'ARM64'
|
||||
}
|
||||
steps {
|
||||
withCredentials([
|
||||
[
|
||||
$class: 'UsernamePasswordMultiBinding',
|
||||
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||
usernameVariable: 'DOCKERUSER',
|
||||
passwordVariable: 'DOCKERPASS'
|
||||
]
|
||||
]) {
|
||||
echo 'Logging into DockerHub'
|
||||
sh '''#! /bin/bash
|
||||
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
||||
'''
|
||||
sh "curl https://lsio-ci.ams3.digitaloceanspaces.com/qemu-aarch64-static -o qemu-aarch64-static"
|
||||
sh "chmod +x qemu-*"
|
||||
sh "docker build --no-cache -f Dockerfile.aarch64 -t ${IMAGE}:arm64v8-${META_TAG} \
|
||||
--build-arg ${BUILD_VERSION_ARG}=${EXT_RELEASE} --build-arg VERSION=\"${META_TAG}\" --build-arg BUILD_DATE=${GITHUB_DATE} ."
|
||||
sh "docker tag ${IMAGE}:arm64v8-${META_TAG} lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
|
||||
sh "docker push lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Take the image we just built and dump package versions for comparison
|
||||
stage('Update-packages') {
|
||||
when {
|
||||
branch "master"
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
steps {
|
||||
sh '''#! /bin/bash
|
||||
set -e
|
||||
TEMPDIR=$(mktemp -d)
|
||||
if [ "${MULTIARCH}" == "true" ]; then
|
||||
LOCAL_CONTAINER=${IMAGE}:amd64-${META_TAG}
|
||||
else
|
||||
LOCAL_CONTAINER=${IMAGE}:${META_TAG}
|
||||
fi
|
||||
if [ "${DIST_IMAGE}" == "alpine" ]; then
|
||||
docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\
|
||||
apk info > packages && \
|
||||
apk info -v > versions && \
|
||||
paste -d " " packages versions > /tmp/package_versions.txt && \
|
||||
chmod 777 /tmp/package_versions.txt'
|
||||
elif [ "${DIST_IMAGE}" == "ubuntu" ]; then
|
||||
docker run --rm --entrypoint '/bin/sh' -v ${TEMPDIR}:/tmp ${LOCAL_CONTAINER} -c '\
|
||||
apt list -qq --installed > /tmp/package_versions.txt && \
|
||||
chmod 777 /tmp/package_versions.txt'
|
||||
fi
|
||||
NEW_PACKAGE_TAG=$(md5sum ${TEMPDIR}/package_versions.txt | cut -c1-8 )
|
||||
echo "Package tag sha from current packages in buit container is ${NEW_PACKAGE_TAG} comparing to old ${PACKAGE_TAG} from github"
|
||||
if [ "${NEW_PACKAGE_TAG}" != "${PACKAGE_TAG}" ]; then
|
||||
git clone https://github.com/${LS_USER}/${LS_REPO}.git ${TEMPDIR}/${LS_REPO}
|
||||
git --git-dir ${TEMPDIR}/${LS_REPO}/.git checkout -f master
|
||||
cp ${TEMPDIR}/package_versions.txt ${TEMPDIR}/${LS_REPO}/
|
||||
cd ${TEMPDIR}/${LS_REPO}/
|
||||
wait
|
||||
git add package_versions.txt
|
||||
git commit -m 'Bot Updating Package Versions'
|
||||
git push https://LinuxServer-CI:${GITHUB_TOKEN}@github.com/${LS_USER}/${LS_REPO}.git --all
|
||||
echo "true" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||
echo "Package tag updated, stopping build process"
|
||||
else
|
||||
echo "false" > /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||
echo "Package tag is same as previous continue with build process"
|
||||
fi
|
||||
rm -Rf ${TEMPDIR}'''
|
||||
script{
|
||||
env.PACKAGE_UPDATED = sh(
|
||||
script: '''cat /tmp/packages-${COMMIT_SHA}-${BUILD_NUMBER}''',
|
||||
returnStdout: true).trim()
|
||||
}
|
||||
}
|
||||
}
|
||||
// Exit the build if the package file was just updated
|
||||
stage('PACKAGE-exit') {
|
||||
when {
|
||||
branch "master"
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
environment name: 'PACKAGE_UPDATED', value: 'true'
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
steps {
|
||||
script{
|
||||
env.EXIT_STATUS = 'ABORTED'
|
||||
}
|
||||
}
|
||||
}
|
||||
// Exit the build if this is just a package check and there are no changes to push
|
||||
stage('PACKAGECHECK-exit') {
|
||||
when {
|
||||
branch "master"
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
environment name: 'PACKAGE_UPDATED', value: 'false'
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
expression {
|
||||
params.PACKAGE_CHECK == 'true'
|
||||
}
|
||||
}
|
||||
steps {
|
||||
script{
|
||||
env.EXIT_STATUS = 'ABORTED'
|
||||
}
|
||||
}
|
||||
}
|
||||
/* #######
|
||||
Testing
|
||||
####### */
|
||||
// Run Container tests
|
||||
stage('Test') {
|
||||
when {
|
||||
environment name: 'CI', value: 'true'
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
steps {
|
||||
withCredentials([
|
||||
string(credentialsId: 'spaces-key', variable: 'DO_KEY'),
|
||||
string(credentialsId: 'spaces-secret', variable: 'DO_SECRET')
|
||||
]) {
|
||||
script{
|
||||
env.CI_URL = 'https://lsio-ci.ams3.digitaloceanspaces.com/' + env.IMAGE + '/' + env.META_TAG + '/index.html'
|
||||
}
|
||||
sh '''#! /bin/bash
|
||||
set -e
|
||||
docker pull lsiodev/ci:latest
|
||||
if [ "${MULTIARCH}" == "true" ]; then
|
||||
docker pull lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||
docker pull lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||
docker tag lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v6-${META_TAG}
|
||||
docker tag lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
|
||||
fi
|
||||
docker run --rm \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-e IMAGE=\"${IMAGE}\" \
|
||||
-e DELAY_START=\"${CI_DELAY}\" \
|
||||
-e TAGS=\"${CI_TAGS}\" \
|
||||
-e META_TAG=\"${META_TAG}\" \
|
||||
-e PORT=\"${CI_PORT}\" \
|
||||
-e SSL=\"${CI_SSL}\" \
|
||||
-e BASE=\"${DIST_IMAGE}\" \
|
||||
-e SECRET_KEY=\"${DO_SECRET}\" \
|
||||
-e ACCESS_KEY=\"${DO_KEY}\" \
|
||||
-e DOCKER_ENV=\"${CI_DOCKERENV}\" \
|
||||
-e WEB_SCREENSHOT=\"${CI_WEB}\" \
|
||||
-e WEB_AUTH=\"${CI_AUTH}\" \
|
||||
-e WEB_PATH=\"${CI_WEBPATH}\" \
|
||||
-e DO_REGION="ams3" \
|
||||
-e DO_BUCKET="lsio-ci" \
|
||||
-t lsiodev/ci:latest \
|
||||
python /ci/ci.py'''
|
||||
}
|
||||
}
|
||||
}
|
||||
/* ##################
|
||||
Release Logic
|
||||
################## */
|
||||
// If this is an amd64 only image only push a single image
|
||||
stage('Docker-Push-Single') {
|
||||
when {
|
||||
environment name: 'MULTIARCH', value: 'false'
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
steps {
|
||||
withCredentials([
|
||||
[
|
||||
$class: 'UsernamePasswordMultiBinding',
|
||||
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||
usernameVariable: 'DOCKERUSER',
|
||||
passwordVariable: 'DOCKERPASS'
|
||||
]
|
||||
]) {
|
||||
echo 'Logging into DockerHub'
|
||||
sh '''#! /bin/bash
|
||||
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
||||
'''
|
||||
sh "docker tag ${IMAGE}:${META_TAG} ${IMAGE}:latest"
|
||||
sh "docker push ${IMAGE}:latest"
|
||||
sh "docker push ${IMAGE}:${META_TAG}"
|
||||
}
|
||||
}
|
||||
}
|
||||
// If this is a multi arch release push all images and define the manifest
|
||||
stage('Docker-Push-Multi') {
|
||||
when {
|
||||
environment name: 'MULTIARCH', value: 'true'
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
steps {
|
||||
withCredentials([
|
||||
[
|
||||
$class: 'UsernamePasswordMultiBinding',
|
||||
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||
usernameVariable: 'DOCKERUSER',
|
||||
passwordVariable: 'DOCKERPASS'
|
||||
]
|
||||
]) {
|
||||
sh '''#! /bin/bash
|
||||
echo $DOCKERPASS | docker login -u $DOCKERUSER --password-stdin
|
||||
'''
|
||||
sh '''#! /bin/bash
|
||||
if [ "${CI}" == "false" ]; then
|
||||
docker pull lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||
docker pull lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER}
|
||||
docker tag lsiodev/buildcache:arm32v6-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm32v6-${META_TAG}
|
||||
docker tag lsiodev/buildcache:arm64v8-${COMMIT_SHA}-${BUILD_NUMBER} ${IMAGE}:arm64v8-${META_TAG}
|
||||
fi'''
|
||||
sh "docker tag ${IMAGE}:amd64-${META_TAG} ${IMAGE}:amd64-latest"
|
||||
sh "docker tag ${IMAGE}:arm32v6-${META_TAG} ${IMAGE}:arm32v6-latest"
|
||||
sh "docker tag ${IMAGE}:arm64v8-${META_TAG} ${IMAGE}:arm64v8-latest"
|
||||
sh "docker push ${IMAGE}:amd64-${META_TAG}"
|
||||
sh "docker push ${IMAGE}:arm32v6-${META_TAG}"
|
||||
sh "docker push ${IMAGE}:arm64v8-${META_TAG}"
|
||||
sh "docker push ${IMAGE}:amd64-latest"
|
||||
sh "docker push ${IMAGE}:arm32v6-latest"
|
||||
sh "docker push ${IMAGE}:arm64v8-latest"
|
||||
sh "docker manifest push --purge ${IMAGE}:latest || :"
|
||||
sh "docker manifest create ${IMAGE}:latest ${IMAGE}:amd64-latest ${IMAGE}:arm32v6-latest ${IMAGE}:arm64v8-latest"
|
||||
sh "docker manifest annotate ${IMAGE}:latest ${IMAGE}:arm32v6-latest --os linux --arch arm"
|
||||
sh "docker manifest annotate ${IMAGE}:latest ${IMAGE}:arm64v8-latest --os linux --arch arm64 --variant v8"
|
||||
sh "docker manifest push --purge ${IMAGE}:${META_TAG} || :"
|
||||
sh "docker manifest create ${IMAGE}:${META_TAG} ${IMAGE}:amd64-${META_TAG} ${IMAGE}:arm32v6-${META_TAG} ${IMAGE}:arm64v8-${META_TAG}"
|
||||
sh "docker manifest annotate ${IMAGE}:${META_TAG} ${IMAGE}:arm32v6-${META_TAG} --os linux --arch arm"
|
||||
sh "docker manifest annotate ${IMAGE}:${META_TAG} ${IMAGE}:arm64v8-${META_TAG} --os linux --arch arm64 --variant v8"
|
||||
sh "docker manifest push --purge ${IMAGE}:latest"
|
||||
sh "docker manifest push --purge ${IMAGE}:${META_TAG}"
|
||||
}
|
||||
}
|
||||
}
|
||||
// If this is a public release tag it in the LS Github
|
||||
stage('Github-Tag-Push-Release') {
|
||||
when {
|
||||
branch "master"
|
||||
expression {
|
||||
env.LS_RELEASE != env.EXT_RELEASE_CLEAN + '-pkg-' + env.PACKAGE_TAG + '-ls' + env.LS_TAG_NUMBER
|
||||
}
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
steps {
|
||||
echo "Pushing New tag for current commit ${EXT_RELEASE_CLEAN}-pkg-${PACKAGE_TAG}-ls${LS_TAG_NUMBER}"
|
||||
sh '''curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/git/tags \
|
||||
-d '{"tag":"'${EXT_RELEASE_CLEAN}'-pkg-'${PACKAGE_TAG}'-ls'${LS_TAG_NUMBER}'",\
|
||||
"object": "'${COMMIT_SHA}'",\
|
||||
"message": "Tagging Release '${EXT_RELEASE_CLEAN}'-pkg-'${PACKAGE_TAG}'-ls'${LS_TAG_NUMBER}' to master",\
|
||||
"type": "commit",\
|
||||
"tagger": {"name": "LinuxServer Jenkins","email": "jenkins@linuxserver.io","date": "'${GITHUB_DATE}'"}}' '''
|
||||
echo "Pushing New release for Tag"
|
||||
sh '''#! /bin/bash
|
||||
echo "Updating to ${EXT_RELEASE_CLEAN}" > releasebody.json
|
||||
echo '{"tag_name":"'${EXT_RELEASE_CLEAN}'-pkg-'${PACKAGE_TAG}'-ls'${LS_TAG_NUMBER}'",\
|
||||
"target_commitish": "master",\
|
||||
"name": "'${EXT_RELEASE_CLEAN}'-pkg-'${PACKAGE_TAG}'-ls'${LS_TAG_NUMBER}'",\
|
||||
"body": "**LinuxServer Changes:**\\n\\n'${LS_RELEASE_NOTES}'\\n**Remote Changes:**\\n\\n' > start
|
||||
printf '","draft": false,"prerelease": false}' >> releasebody.json
|
||||
paste -d'\\0' start releasebody.json > releasebody.json.done
|
||||
curl -H "Authorization: token ${GITHUB_TOKEN}" -X POST https://api.github.com/repos/${LS_USER}/${LS_REPO}/releases -d @releasebody.json.done'''
|
||||
}
|
||||
}
|
||||
// Use helper container to sync the current README on master to the dockerhub endpoint
|
||||
stage('Sync-README') {
|
||||
when {
|
||||
environment name: 'CHANGE_ID', value: ''
|
||||
environment name: 'EXIT_STATUS', value: ''
|
||||
}
|
||||
steps {
|
||||
withCredentials([
|
||||
[
|
||||
$class: 'UsernamePasswordMultiBinding',
|
||||
credentialsId: '3f9ba4d5-100d-45b0-a3c4-633fd6061207',
|
||||
usernameVariable: 'DOCKERUSER',
|
||||
passwordVariable: 'DOCKERPASS'
|
||||
]
|
||||
]) {
|
||||
sh '''#! /bin/bash
|
||||
docker pull lsiodev/readme-sync
|
||||
docker run --rm=true \
|
||||
-e DOCKERHUB_USERNAME=$DOCKERUSER \
|
||||
-e DOCKERHUB_PASSWORD=$DOCKERPASS \
|
||||
-e GIT_REPOSITORY=${LS_USER}/${LS_REPO} \
|
||||
-e DOCKER_REPOSITORY=${IMAGE} \
|
||||
-e GIT_BRANCH=master \
|
||||
lsiodev/readme-sync bash -c 'node sync' '''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/* ######################
|
||||
Send status to Discord
|
||||
###################### */
|
||||
post {
|
||||
always {
|
||||
script{
|
||||
if (env.EXIT_STATUS == "ABORTED"){
|
||||
sh 'echo "build aborted"'
|
||||
}
|
||||
else if (currentBuild.currentResult == "SUCCESS"){
|
||||
sh ''' curl -X POST --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 1681177,\
|
||||
"description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**Status:** Success\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\
|
||||
"username": "Jenkins"}' ${BUILDS_DISCORD} '''
|
||||
}
|
||||
else {
|
||||
sh ''' curl -X POST --data '{"avatar_url": "https://wiki.jenkins-ci.org/download/attachments/2916393/headshot.png","embeds": [{"color": 16711680,\
|
||||
"description": "**Build:** '${BUILD_NUMBER}'\\n**CI Results:** '${CI_URL}'\\n**Status:** failure\\n**Job:** '${RUN_DISPLAY_URL}'\\n**Change:** '${CODE_URL}'\\n**External Release:**: '${RELEASE_LINK}'\\n**DockerHub:** '${DOCKERHUB_LINK}'\\n"}],\
|
||||
"username": "Jenkins"}' ${BUILDS_DISCORD} '''
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,27 @@
|
||||
---
|
||||
|
||||
# jenkins variables
|
||||
project_name: docker-plex
|
||||
external_type: na
|
||||
custom_version_command: curl -s 'https://plex.tv/downloads/details/1?build=linux-ubuntu-x86_64&distro=ubuntu' |grep -oP 'version="\\K[^"]+' | tail -n 1
|
||||
release_type: stable
|
||||
release_tag: latest
|
||||
ls_branch: master
|
||||
repo_vars:
|
||||
- BUILD_VERSION_ARG = 'PLEX_RELEASE'
|
||||
- LS_USER = 'linuxserver'
|
||||
- LS_REPO = 'docker-plex'
|
||||
- CONTAINER_NAME = 'plex'
|
||||
- DOCKERHUB_IMAGE = 'linuxserver/plex'
|
||||
- DEV_DOCKERHUB_IMAGE = 'lsiodev/plex'
|
||||
- PR_DOCKERHUB_IMAGE = 'lspipepr/plex'
|
||||
- DIST_IMAGE = 'ubuntu'
|
||||
- MULTIARCH='true'
|
||||
- CI='true'
|
||||
- CI_WEB='false'
|
||||
- CI_PORT='32400'
|
||||
- CI_SSL='false'
|
||||
- CI_DELAY='120'
|
||||
- CI_DOCKERENV='TZ=US/Pacific'
|
||||
- CI_AUTH='user:password'
|
||||
- CI_WEBPATH='/web'
|
@ -0,0 +1,107 @@
|
||||
---
|
||||
|
||||
# project information
|
||||
project_name: plex
|
||||
project_url: "https://plex.tv"
|
||||
project_logo: "http://the-gadgeteer.com/wp-content/uploads/2015/10/plex-logo-e1446990678679.png"
|
||||
project_blurb: "[{{ project_name|capitalize }}]({{ project_url }}) organizes video, music and photos from personal media libraries and streams them to smart TVs, streaming boxes and mobile devices. This container is packaged as a standalone Plex Media Server. has always been a top priority. Straightforward design and bulk actions mean getting things done faster.
|
||||
|
||||
*To All Arm Users* - Plex is currently transitioning to a new build system allowing everyone to ingest software releases for all popular architectures. In the mean time releases on this channel for armv7 and aarch64 will be frozen at a beta release version of 1.15, including plex pass users. For armv7 users that want a stable release please use our legacy repo [Here](https://hub.docker.com/r/lsioarmhf/plex) ."
|
||||
project_lsio_github_repo_url: "https://github.com/linuxserver/docker-{{ project_name }}"
|
||||
|
||||
# supported architectures
|
||||
available_architectures:
|
||||
- { arch: "{{ arch_x86_64 }}", tag: "amd64-latest"}
|
||||
- { arch: "{{ arch_arm64 }}", tag: "arm64v8-latest"}
|
||||
- { arch: "{{ arch_armhf }}", tag: "arm32v6-latest"}
|
||||
|
||||
# container parameters
|
||||
common_param_env_vars_enabled: true #PGID, PUID, etc
|
||||
param_container_name: "{{ project_name }}"
|
||||
param_usage_include_vols: true
|
||||
param_volumes:
|
||||
- { vol_path: "/config", vol_host_path: "</path/to/library>", desc: "Plex library location. *This can grow very large, 50gb+ is likely for a large collection.*" }
|
||||
- { vol_path: "/data/tvshows", vol_host_path: "<path/to/tvseries>", desc: "Media goes here. Add as many as needed e.g. `/data/movies`, `/data/tv`, etc." }
|
||||
- { vol_path: "/data/movies", vol_host_path: "</path/to/movies>", desc: "Media goes here. Add as many as needed e.g. `/data/movies`, `/data/tv`, etc." }
|
||||
- { vol_path: "/transcode", vol_host_path: "</path for transcoding>", desc: "Path for transcoding folder, *optional*." }
|
||||
param_usage_include_ports: false
|
||||
param_usage_include_net: true
|
||||
param_net: "host"
|
||||
param_net_desc: "Use Host Networking"
|
||||
param_usage_include_env: true
|
||||
param_env_vars:
|
||||
- { env_var: "VERSION", env_value: "docker", desc: "Set whether to update plex or not - see Application Setup section."}
|
||||
|
||||
optional_parameters: |
|
||||
*Special note* - If you'd like to run Plex without requiring `--net=host` (`NOT recommended`) then you will need the following ports in your `docker create` command:
|
||||
|
||||
```
|
||||
-p 32400:32400 \
|
||||
-p 32400:32400/udp \
|
||||
-p 32469:32469 \
|
||||
-p 32469:32469/udp \
|
||||
-p 5353:5353/udp \
|
||||
-p 1900:1900/udp
|
||||
```
|
||||
|
||||
The application accepts a series of environment variables to further customize itself on boot:
|
||||
|
||||
| Parameter | Function |
|
||||
| :---: | --- |
|
||||
| `-v /transcode` | Path for transcoding folder|
|
||||
| `--device=/dev/dri:/dev/dri` | Add this option to your run command if you plan on using Quicksync hardware acceleration - see Application Setup section.|
|
||||
|
||||
# application setup block
|
||||
app_setup_block_enabled: true
|
||||
app_setup_block: |
|
||||
Webui can be found at `<your-ip>:32400/web`
|
||||
|
||||
** Note about updates, if there is no value set for the VERSION variable, then no updates will take place.**
|
||||
|
||||
** For new users, no updates will take place on the first run of the container as there is no preferences file to read your token from, to update restart the Docker container after logging in through the webui**
|
||||
|
||||
Valid settings for VERSION are:-
|
||||
|
||||
`IMPORTANT NOTE:- YOU CANNOT UPDATE TO A PLEXPASS ONLY VERSION IF YOU DO NOT HAVE PLEXPASS`
|
||||
|
||||
+ **`docker`**: Let Docker handle the Plex Version, we keep our Dockerhub Endpoint up to date with the latest public builds. This is the same as leaving this setting out of your create command.
|
||||
+ **`latest`**: will update plex to the latest version available that you are entitled to.
|
||||
+ **`public`**: will update plexpass users to the latest public version, useful for plexpass users that don't want to be on the bleeding edge but still want the latest public updates.
|
||||
+ **`<specific-version>`**: will select a specific version (eg 0.9.12.4.1192-9a47d21) of plex to install, note you cannot use this to access plexpass versions if you do not have plexpass.
|
||||
|
||||
Hardware accelleration users for Intel Quicksync will need to mount their /dev/dri video device inside of the container by passing the following command when running or creating the container:
|
||||
|
||||
```--device=/dev/dri:/dev/dri```
|
||||
|
||||
We will automatically ensure the abc user inside of the container has the proper permissions to access this device.
|
||||
|
||||
Hardware accelleration users for Nvidia will need to install the container runtime provided by Nvidia on their host, instructions can be found here:
|
||||
|
||||
https://github.com/NVIDIA/nvidia-docker
|
||||
|
||||
We automatically add the necessary environment variables that will use all available GPU's on the host. Once nvidia-docker is installed on your host you will need to just start the docker with the nvidia container runtime ```--runtime=nvidia```. NVIDIA automatically mounts the GPU and drivers from your host into the plex docker.
|
||||
|
||||
# changelog
|
||||
changelogs:
|
||||
- { date: "16.01.19:", desc: "Add pipeline logic, multi arch, and HW transcoding configuration; remove avahi service." }
|
||||
- { date: "07.09.18:", desc: "Rebase to ubuntu bionic, add udev package." }
|
||||
- { date: "09.12.17:", desc: "Fix continuation lines." }
|
||||
- { date: "12.07.17:", desc: "Add inspect commands to README, move to jenkins build and push." }
|
||||
- { date: "28.05.17:", desc: "Add unrar package as per requests, for subzero plugin." }
|
||||
- { date: "11.01.17:", desc: "Use Plex environment variables from pms docker,
|
||||
change abc home folder to /app to alleviate usermod chowning library" }
|
||||
- { date: "03.01.17:", desc: "Use case insensitive version variable matching rather than export and make lowercase." }
|
||||
- { date: "17.10.16:", desc: "Allow use of uppercase version variable" }
|
||||
- { date: "01.10.16:", desc: "Add TZ info to README." }
|
||||
- { date: "09.09.16:", desc: "Add layer badges to README." }
|
||||
- { date: "27.08.16:", desc: "Add badges to README." }
|
||||
- { date: "22.08.16:", desc: "Rebased to xenial and s6 overlay" }
|
||||
- { date: "07.04.16:", desc: "removed `/transcode` volume support (upstream Plex change) and modified PlexPass download method to prevent unauthorised usage of paid PMS" }
|
||||
- { date: "24.09.15:", desc: "added optional support for volume transcoding (/transcode), and various typo fixes." }
|
||||
- { date: "17.09.15:", desc: "Changed to run chmod only once" }
|
||||
- { date: "19.09.15:", desc: "Plex updated their download servers from http to https" }
|
||||
- { date: "28.08.15:", desc: "Removed plexpass from routine, and now uses VERSION as a combination fix." }
|
||||
- { date: "18.07.15:", desc: "Moved autoupdate to be hosted by linuxserver.io and implemented bugfix thanks to ljm42." }
|
||||
- { date: "09.07.15:", desc: "Now with ability to pick static version number." }
|
||||
- { date: "08.07.15:", desc: "Now with autoupdates. (Hosted by fanart.tv)" }
|
||||
- { date: "03.07.15:", desc: "Fixed a mistake that allowed plex to run as user plex rather than abc (99:100). Thanks to double16 for spotting this." }
|
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
# Check for the existence of the Intel video device
|
||||
if [ -e /dev/dri ]; then
|
||||
VIDEO_GID=$(stat -c '%g' /dev/dri/* | head -n 1)
|
||||
else
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if this GID matches the current abc user
|
||||
ABCGID=$(getent group abc | awk -F: '{print $3}')
|
||||
if [ "${ABCGID}" == "${VIDEO_GID}" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Check if the GID is taken and swap to 65533
|
||||
CURRENT=$(getent group ${VIDEO_GID} | awk -F: '{print $1}')
|
||||
if [ -z "${CURRENT}" ] || [ "${CURRENT}" == 'video' ]; then
|
||||
groupmod -g ${VIDEO_GID} video
|
||||
usermod -a -G video abc
|
||||
else
|
||||
groupmod -g 65533 ${CURRENT}
|
||||
groupmod -g ${VIDEO_GID} video
|
||||
usermod -a -G video abc
|
||||
fi
|
@ -1,8 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
until [[ -e /var/run/dbus/system_bus_socket ]]; do
|
||||
sleep 1s
|
||||
done
|
||||
|
||||
echo "Starting Avahi daemon"
|
||||
exec avahi-daemon --no-chroot
|
@ -1,4 +0,0 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
echo "Starting dbus-daemon"
|
||||
exec dbus-daemon --system --nofork
|
Loading…
Reference in new issue