Remove docker overlay, update kubernetes
This commit is contained in:
@@ -1,593 +0,0 @@
|
|||||||
{
|
|
||||||
callPackage,
|
|
||||||
coreutils,
|
|
||||||
docker,
|
|
||||||
e2fsprogs,
|
|
||||||
findutils,
|
|
||||||
go,
|
|
||||||
jshon,
|
|
||||||
jq,
|
|
||||||
lib,
|
|
||||||
pkgs,
|
|
||||||
pigz,
|
|
||||||
nixUnstable,
|
|
||||||
perl,
|
|
||||||
runCommand,
|
|
||||||
rsync,
|
|
||||||
shadow,
|
|
||||||
stdenv,
|
|
||||||
storeDir ? builtins.storeDir,
|
|
||||||
utillinux,
|
|
||||||
vmTools,
|
|
||||||
writeReferencesToFile,
|
|
||||||
writeScript,
|
|
||||||
writeText,
|
|
||||||
}:
|
|
||||||
|
|
||||||
# WARNING: this API is unstable and may be subject to backwards-incompatible changes in the future.
|
|
||||||
|
|
||||||
rec {
|
|
||||||
|
|
||||||
examples = import ./examples.nix {
|
|
||||||
inherit pkgs buildImage pullImage shadowSetup buildImageWithNixDb;
|
|
||||||
};
|
|
||||||
|
|
||||||
pullImage = callPackage ./pull.nix {};
|
|
||||||
|
|
||||||
# We need to sum layer.tar, not a directory, hence tarsum instead of nix-hash.
|
|
||||||
# And we cannot untar it, because then we cannot preserve permissions ecc.
|
|
||||||
tarsum = runCommand "tarsum" {
|
|
||||||
buildInputs = [ go ];
|
|
||||||
} ''
|
|
||||||
mkdir tarsum
|
|
||||||
cd tarsum
|
|
||||||
|
|
||||||
cp ${./tarsum.go} tarsum.go
|
|
||||||
export GOPATH=$(pwd)
|
|
||||||
mkdir -p src/github.com/docker/docker/pkg
|
|
||||||
ln -sT ${docker.src}/components/engine/pkg/tarsum src/github.com/docker/docker/pkg/tarsum
|
|
||||||
go build
|
|
||||||
|
|
||||||
cp tarsum $out
|
|
||||||
'';
|
|
||||||
|
|
||||||
# buildEnv creates symlinks to dirs, which is hard to edit inside the overlay VM
|
|
||||||
mergeDrvs = {
|
|
||||||
derivations,
|
|
||||||
onlyDeps ? false
|
|
||||||
}:
|
|
||||||
runCommand "merge-drvs" {
|
|
||||||
inherit derivations onlyDeps;
|
|
||||||
} ''
|
|
||||||
if [[ -n "$onlyDeps" ]]; then
|
|
||||||
echo $derivations > $out
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
mkdir $out
|
|
||||||
for derivation in $derivations; do
|
|
||||||
echo "Merging $derivation..."
|
|
||||||
if [[ -d "$derivation" ]]; then
|
|
||||||
# If it's a directory, copy all of its contents into $out.
|
|
||||||
cp -drf --preserve=mode -f $derivation/* $out/
|
|
||||||
else
|
|
||||||
# Otherwise treat the derivation as a tarball and extract it
|
|
||||||
# into $out.
|
|
||||||
tar -C $out -xpf $drv || true
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
'';
|
|
||||||
|
|
||||||
# Helper for setting up the base files for managing users and
|
|
||||||
# groups, only if such files don't exist already. It is suitable for
|
|
||||||
# being used in a runAsRoot script.
|
|
||||||
shadowSetup = ''
|
|
||||||
export PATH=${shadow}/bin:$PATH
|
|
||||||
mkdir -p /etc/pam.d
|
|
||||||
if [[ ! -f /etc/passwd ]]; then
|
|
||||||
echo "root:x:0:0::/root:${stdenv.shell}" > /etc/passwd
|
|
||||||
echo "root:!x:::::::" > /etc/shadow
|
|
||||||
fi
|
|
||||||
if [[ ! -f /etc/group ]]; then
|
|
||||||
echo "root:x:0:" > /etc/group
|
|
||||||
echo "root:x::" > /etc/gshadow
|
|
||||||
fi
|
|
||||||
if [[ ! -f /etc/pam.d/other ]]; then
|
|
||||||
cat > /etc/pam.d/other <<EOF
|
|
||||||
account sufficient pam_unix.so
|
|
||||||
auth sufficient pam_rootok.so
|
|
||||||
password requisite pam_unix.so nullok sha512
|
|
||||||
session required pam_unix.so
|
|
||||||
EOF
|
|
||||||
fi
|
|
||||||
if [[ ! -f /etc/login.defs ]]; then
|
|
||||||
touch /etc/login.defs
|
|
||||||
fi
|
|
||||||
'';
|
|
||||||
|
|
||||||
# Run commands in a virtual machine.
|
|
||||||
runWithOverlay = {
|
|
||||||
name,
|
|
||||||
fromImage ? null,
|
|
||||||
fromImageName ? null,
|
|
||||||
fromImageTag ? null,
|
|
||||||
diskSize ? 1024,
|
|
||||||
preMount ? "",
|
|
||||||
postMount ? "",
|
|
||||||
postUmount ? ""
|
|
||||||
}:
|
|
||||||
vmTools.runInLinuxVM (
|
|
||||||
runCommand name {
|
|
||||||
preVM = vmTools.createEmptyImage {
|
|
||||||
size = diskSize;
|
|
||||||
fullName = "docker-run-disk";
|
|
||||||
};
|
|
||||||
inherit fromImage fromImageName fromImageTag;
|
|
||||||
|
|
||||||
buildInputs = [ utillinux e2fsprogs jshon rsync ];
|
|
||||||
} ''
|
|
||||||
rm -rf $out
|
|
||||||
|
|
||||||
mkdir disk
|
|
||||||
mkfs /dev/${vmTools.hd}
|
|
||||||
mount /dev/${vmTools.hd} disk
|
|
||||||
cd disk
|
|
||||||
|
|
||||||
if [[ -n "$fromImage" ]]; then
|
|
||||||
echo "Unpacking base image..."
|
|
||||||
mkdir image
|
|
||||||
tar -C image -xpf "$fromImage"
|
|
||||||
|
|
||||||
# If the image name isn't set, read it from the image repository json.
|
|
||||||
if [[ -z "$fromImageName" ]]; then
|
|
||||||
fromImageName=$(jshon -k < image/repositories | head -n 1)
|
|
||||||
echo "From-image name wasn't set. Read $fromImageName."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# If the tag isn't set, use the name as an index into the json
|
|
||||||
# and read the first key found.
|
|
||||||
if [[ -z "$fromImageTag" ]]; then
|
|
||||||
fromImageTag=$(jshon -e $fromImageName -k < image/repositories \
|
|
||||||
| head -n1)
|
|
||||||
echo "From-image tag wasn't set. Read $fromImageTag."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Use the name and tag to get the parent ID field.
|
|
||||||
parentID=$(jshon -e $fromImageName -e $fromImageTag -u \
|
|
||||||
< image/repositories)
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Unpack all of the parent layers into the image.
|
|
||||||
lowerdir=""
|
|
||||||
while [[ -n "$parentID" ]]; do
|
|
||||||
echo "Unpacking layer $parentID"
|
|
||||||
mkdir -p image/$parentID/layer
|
|
||||||
tar -C image/$parentID/layer -xpf image/$parentID/layer.tar
|
|
||||||
rm image/$parentID/layer.tar
|
|
||||||
|
|
||||||
find image/$parentID/layer -name ".wh.*" -exec bash -c 'name="$(basename {}|sed "s/^.wh.//")"; mknod "$(dirname {})/$name" c 0 0; rm {}' \;
|
|
||||||
|
|
||||||
# Get the next lower directory and continue the loop.
|
|
||||||
lowerdir=$lowerdir''${lowerdir:+:}image/$parentID/layer
|
|
||||||
parentID=$(cat image/$parentID/json \
|
|
||||||
| (jshon -e parent -u 2>/dev/null || true))
|
|
||||||
done
|
|
||||||
|
|
||||||
mkdir work
|
|
||||||
mkdir layer
|
|
||||||
mkdir mnt
|
|
||||||
|
|
||||||
${lib.optionalString (preMount != "") ''
|
|
||||||
# Execute pre-mount steps
|
|
||||||
echo "Executing pre-mount steps..."
|
|
||||||
${preMount}
|
|
||||||
''}
|
|
||||||
|
|
||||||
if [ -n "$lowerdir" ]; then
|
|
||||||
mount -t overlay overlay -olowerdir=$lowerdir,workdir=work,upperdir=layer mnt
|
|
||||||
else
|
|
||||||
mount --bind layer mnt
|
|
||||||
fi
|
|
||||||
|
|
||||||
${lib.optionalString (postMount != "") ''
|
|
||||||
# Execute post-mount steps
|
|
||||||
echo "Executing post-mount steps..."
|
|
||||||
${postMount}
|
|
||||||
''}
|
|
||||||
|
|
||||||
umount mnt
|
|
||||||
|
|
||||||
(
|
|
||||||
cd layer
|
|
||||||
cmd='name="$(basename {})"; touch "$(dirname {})/.wh.$name"; rm "{}"'
|
|
||||||
find . -type c -exec bash -c "$cmd" \;
|
|
||||||
)
|
|
||||||
|
|
||||||
${postUmount}
|
|
||||||
'');
|
|
||||||
|
|
||||||
exportImage = { name ? fromImage.name, fromImage, fromImageName ? null, fromImageTag ? null, diskSize ? 1024 }:
|
|
||||||
runWithOverlay {
|
|
||||||
inherit name fromImage fromImageName fromImageTag diskSize;
|
|
||||||
|
|
||||||
postMount = ''
|
|
||||||
echo "Packing raw image..."
|
|
||||||
tar -C mnt --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf $out .
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
# Create an executable shell script which has the coreutils in its
|
|
||||||
# PATH. Since root scripts are executed in a blank environment, even
|
|
||||||
# things like `ls` or `echo` will be missing.
|
|
||||||
shellScript = name: text:
|
|
||||||
writeScript name ''
|
|
||||||
#!${stdenv.shell}
|
|
||||||
set -e
|
|
||||||
export PATH=${coreutils}/bin:/bin
|
|
||||||
${text}
|
|
||||||
'';
|
|
||||||
|
|
||||||
nixRegistration = contents: runCommand "nix-registration" {
|
|
||||||
buildInputs = [ nixUnstable perl ];
|
|
||||||
# For obtaining the closure of `contents'.
|
|
||||||
exportReferencesGraph =
|
|
||||||
let contentsList = if builtins.isList contents then contents else [ contents ];
|
|
||||||
in map (x: [("closure-" + baseNameOf x) x]) contentsList;
|
|
||||||
}
|
|
||||||
''
|
|
||||||
mkdir $out
|
|
||||||
printRegistration=1 perl ${pkgs.pathsFromGraph} closure-* > $out/db.dump
|
|
||||||
perl ${pkgs.pathsFromGraph} closure-* > $out/storePaths
|
|
||||||
'';
|
|
||||||
|
|
||||||
# Create a "layer" (set of files).
|
|
||||||
mkPureLayer = {
|
|
||||||
# Name of the layer
|
|
||||||
name,
|
|
||||||
# JSON containing configuration and metadata for this layer.
|
|
||||||
baseJson,
|
|
||||||
# Files to add to the layer.
|
|
||||||
contents ? null,
|
|
||||||
# When copying the contents into the image, preserve symlinks to
|
|
||||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
|
||||||
# into directories.
|
|
||||||
keepContentsDirlinks ? false,
|
|
||||||
# Additional commands to run on the layer before it is tar'd up.
|
|
||||||
extraCommands ? "", uid ? 0, gid ? 0
|
|
||||||
}:
|
|
||||||
runCommand "docker-layer-${name}" {
|
|
||||||
inherit baseJson contents extraCommands;
|
|
||||||
buildInputs = [ jshon rsync ];
|
|
||||||
}
|
|
||||||
''
|
|
||||||
mkdir layer
|
|
||||||
if [[ -n "$contents" ]]; then
|
|
||||||
echo "Adding contents..."
|
|
||||||
for item in $contents; do
|
|
||||||
echo "Adding $item"
|
|
||||||
rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
|
|
||||||
done
|
|
||||||
else
|
|
||||||
echo "No contents to add to layer."
|
|
||||||
fi
|
|
||||||
|
|
||||||
chmod ug+w layer
|
|
||||||
|
|
||||||
if [[ -n $extraCommands ]]; then
|
|
||||||
(cd layer; eval "$extraCommands")
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Tar up the layer and throw it into 'layer.tar'.
|
|
||||||
echo "Packing layer..."
|
|
||||||
mkdir $out
|
|
||||||
tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=${toString uid} --group=${toString gid} -cf $out/layer.tar .
|
|
||||||
|
|
||||||
# Compute a checksum of the tarball.
|
|
||||||
echo "Computing layer checksum..."
|
|
||||||
tarsum=$(${tarsum} < $out/layer.tar)
|
|
||||||
|
|
||||||
# Add a 'checksum' field to the JSON, with the value set to the
|
|
||||||
# checksum of the tarball.
|
|
||||||
cat ${baseJson} | jshon -s "$tarsum" -i checksum > $out/json
|
|
||||||
|
|
||||||
# Indicate to docker that we're using schema version 1.0.
|
|
||||||
echo -n "1.0" > $out/VERSION
|
|
||||||
|
|
||||||
echo "Finished building layer '${name}'"
|
|
||||||
'';
|
|
||||||
|
|
||||||
# Make a "root" layer; required if we need to execute commands as a
|
|
||||||
# privileged user on the image. The commands themselves will be
|
|
||||||
# performed in a virtual machine sandbox.
|
|
||||||
mkRootLayer = {
|
|
||||||
# Name of the image.
|
|
||||||
name,
|
|
||||||
# Script to run as root. Bash.
|
|
||||||
runAsRoot,
|
|
||||||
# Files to add to the layer. If null, an empty layer will be created.
|
|
||||||
contents ? null,
|
|
||||||
# When copying the contents into the image, preserve symlinks to
|
|
||||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
|
||||||
# into directories.
|
|
||||||
keepContentsDirlinks ? false,
|
|
||||||
# JSON containing configuration and metadata for this layer.
|
|
||||||
baseJson,
|
|
||||||
# Existing image onto which to append the new layer.
|
|
||||||
fromImage ? null,
|
|
||||||
# Name of the image we're appending onto.
|
|
||||||
fromImageName ? null,
|
|
||||||
# Tag of the image we're appending onto.
|
|
||||||
fromImageTag ? null,
|
|
||||||
# How much disk to allocate for the temporary virtual machine.
|
|
||||||
diskSize ? 1024,
|
|
||||||
# Commands (bash) to run on the layer; these do not require sudo.
|
|
||||||
extraCommands ? ""
|
|
||||||
}:
|
|
||||||
# Generate an executable script from the `runAsRoot` text.
|
|
||||||
let runAsRootScript = shellScript "run-as-root.sh" runAsRoot;
|
|
||||||
in runWithOverlay {
|
|
||||||
name = "docker-layer-${name}";
|
|
||||||
|
|
||||||
inherit fromImage fromImageName fromImageTag diskSize;
|
|
||||||
|
|
||||||
preMount = lib.optionalString (contents != null && contents != []) ''
|
|
||||||
echo "Adding contents..."
|
|
||||||
for item in ${toString contents}; do
|
|
||||||
echo "Adding $item..."
|
|
||||||
rsync -a${if keepContentsDirlinks then "K" else "k"} --chown=0:0 $item/ layer/
|
|
||||||
done
|
|
||||||
|
|
||||||
chmod ug+w layer
|
|
||||||
'';
|
|
||||||
|
|
||||||
postMount = ''
|
|
||||||
mkdir -p mnt/{dev,proc,sys} mnt${storeDir}
|
|
||||||
|
|
||||||
# Mount /dev, /sys and the nix store as shared folders.
|
|
||||||
mount --rbind /dev mnt/dev
|
|
||||||
mount --rbind /sys mnt/sys
|
|
||||||
mount --rbind ${storeDir} mnt${storeDir}
|
|
||||||
|
|
||||||
# Execute the run as root script. See 'man unshare' for
|
|
||||||
# details on what's going on here; basically this command
|
|
||||||
# means that the runAsRootScript will be executed in a nearly
|
|
||||||
# completely isolated environment.
|
|
||||||
unshare -imnpuf --mount-proc chroot mnt ${runAsRootScript}
|
|
||||||
|
|
||||||
# Unmount directories and remove them.
|
|
||||||
umount -R mnt/dev mnt/sys mnt${storeDir}
|
|
||||||
rmdir --ignore-fail-on-non-empty \
|
|
||||||
mnt/dev mnt/proc mnt/sys mnt${storeDir} \
|
|
||||||
mnt$(dirname ${storeDir})
|
|
||||||
'';
|
|
||||||
|
|
||||||
postUmount = ''
|
|
||||||
(cd layer; eval "${extraCommands}")
|
|
||||||
|
|
||||||
echo "Packing layer..."
|
|
||||||
mkdir $out
|
|
||||||
tar -C layer --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" -cf $out/layer.tar .
|
|
||||||
|
|
||||||
# Compute the tar checksum and add it to the output json.
|
|
||||||
echo "Computing checksum..."
|
|
||||||
ts=$(${tarsum} < $out/layer.tar)
|
|
||||||
cat ${baseJson} | jshon -s "$ts" -i checksum > $out/json
|
|
||||||
# Indicate to docker that we're using schema version 1.0.
|
|
||||||
echo -n "1.0" > $out/VERSION
|
|
||||||
|
|
||||||
echo "Finished building layer '${name}'"
|
|
||||||
'';
|
|
||||||
};
|
|
||||||
|
|
||||||
# 1. extract the base image
|
|
||||||
# 2. create the layer
|
|
||||||
# 3. add layer deps to the layer itself, diffing with the base image
|
|
||||||
# 4. compute the layer id
|
|
||||||
# 5. put the layer in the image
|
|
||||||
# 6. repack the image
|
|
||||||
buildImage = args@{
|
|
||||||
# Image name.
|
|
||||||
name,
|
|
||||||
# Image tag.
|
|
||||||
tag ? "latest",
|
|
||||||
# Parent image, to append to.
|
|
||||||
fromImage ? null,
|
|
||||||
# Name of the parent image; will be read from the image otherwise.
|
|
||||||
fromImageName ? null,
|
|
||||||
# Tag of the parent image; will be read from the image otherwise.
|
|
||||||
fromImageTag ? null,
|
|
||||||
# Files to put on the image (a nix store path or list of paths).
|
|
||||||
contents ? null,
|
|
||||||
# When copying the contents into the image, preserve symlinks to
|
|
||||||
# directories (see `rsync -K`). Otherwise, transform those symlinks
|
|
||||||
# into directories.
|
|
||||||
keepContentsDirlinks ? false,
|
|
||||||
# Docker config; e.g. what command to run on the container.
|
|
||||||
config ? null,
|
|
||||||
# Optional bash script to run on the files prior to fixturizing the layer.
|
|
||||||
extraCommands ? "", uid ? 0, gid ? 0,
|
|
||||||
# Optional bash script to run as root on the image when provisioning.
|
|
||||||
runAsRoot ? null,
|
|
||||||
# Size of the virtual machine disk to provision when building the image.
|
|
||||||
diskSize ? 1024,
|
|
||||||
# Time of creation of the image.
|
|
||||||
created ? "1970-01-01T00:00:01Z",
|
|
||||||
}:
|
|
||||||
|
|
||||||
let
|
|
||||||
baseName = baseNameOf name;
|
|
||||||
|
|
||||||
# Create a JSON blob of the configuration. Set the date to unix zero.
|
|
||||||
baseJson = writeText "${baseName}-config.json" (builtins.toJSON {
|
|
||||||
inherit created config;
|
|
||||||
architecture = "amd64";
|
|
||||||
os = "linux";
|
|
||||||
});
|
|
||||||
|
|
||||||
layer =
|
|
||||||
if runAsRoot == null
|
|
||||||
then mkPureLayer {
|
|
||||||
name = baseName;
|
|
||||||
inherit baseJson contents keepContentsDirlinks extraCommands uid gid;
|
|
||||||
} else mkRootLayer {
|
|
||||||
name = baseName;
|
|
||||||
inherit baseJson fromImage fromImageName fromImageTag
|
|
||||||
contents keepContentsDirlinks runAsRoot diskSize
|
|
||||||
extraCommands;
|
|
||||||
};
|
|
||||||
result = runCommand "docker-image-${baseName}.tar.gz" {
|
|
||||||
buildInputs = [ jshon pigz coreutils findutils jq ];
|
|
||||||
# Image name and tag must be lowercase
|
|
||||||
imageName = lib.toLower name;
|
|
||||||
imageTag = lib.toLower tag;
|
|
||||||
inherit fromImage baseJson;
|
|
||||||
layerClosure = writeReferencesToFile layer;
|
|
||||||
passthru.buildArgs = args;
|
|
||||||
passthru.layer = layer;
|
|
||||||
} ''
|
|
||||||
# Print tar contents:
|
|
||||||
# 1: Interpreted as relative to the root directory
|
|
||||||
# 2: With no trailing slashes on directories
|
|
||||||
# This is useful for ensuring that the output matches the
|
|
||||||
# values generated by the "find" command
|
|
||||||
ls_tar() {
|
|
||||||
for f in $(tar -tf $1 | xargs realpath -ms --relative-to=.); do
|
|
||||||
if [[ "$f" != "." ]]; then
|
|
||||||
echo "/$f"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
}
|
|
||||||
|
|
||||||
mkdir image
|
|
||||||
touch baseFiles
|
|
||||||
if [[ -n "$fromImage" ]]; then
|
|
||||||
echo "Unpacking base image..."
|
|
||||||
tar -C image -xpf "$fromImage"
|
|
||||||
# Do not import the base image configuration and manifest
|
|
||||||
chmod a+w image image/*.json
|
|
||||||
rm -f image/*.json
|
|
||||||
|
|
||||||
if [[ -z "$fromImageName" ]]; then
|
|
||||||
fromImageName=$(jshon -k < image/repositories|head -n1)
|
|
||||||
fi
|
|
||||||
if [[ -z "$fromImageTag" ]]; then
|
|
||||||
fromImageTag=$(jshon -e $fromImageName -k \
|
|
||||||
< image/repositories|head -n1)
|
|
||||||
fi
|
|
||||||
parentID=$(jshon -e $fromImageName -e $fromImageTag -u \
|
|
||||||
< image/repositories)
|
|
||||||
|
|
||||||
for l in image/*/layer.tar; do
|
|
||||||
ls_tar $l >> baseFiles
|
|
||||||
done
|
|
||||||
fi
|
|
||||||
|
|
||||||
chmod -R ug+rw image
|
|
||||||
|
|
||||||
mkdir temp
|
|
||||||
cp ${layer}/* temp/
|
|
||||||
chmod ug+w temp/*
|
|
||||||
|
|
||||||
for dep in $(cat $layerClosure); do
|
|
||||||
find $dep >> layerFiles
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "Adding layer..."
|
|
||||||
# Record the contents of the tarball with ls_tar.
|
|
||||||
ls_tar temp/layer.tar >> baseFiles
|
|
||||||
|
|
||||||
# Get the files in the new layer which were *not* present in
|
|
||||||
# the old layer, and record them as newFiles.
|
|
||||||
comm <(sort -n baseFiles|uniq) \
|
|
||||||
<(sort -n layerFiles|uniq|grep -v ${layer}) -1 -3 > newFiles
|
|
||||||
# Append the new files to the layer.
|
|
||||||
tar -rpf temp/layer.tar --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" \
|
|
||||||
--owner=0 --group=0 --no-recursion --files-from newFiles
|
|
||||||
|
|
||||||
echo "Adding meta..."
|
|
||||||
|
|
||||||
# If we have a parentID, add it to the json metadata.
|
|
||||||
if [[ -n "$parentID" ]]; then
|
|
||||||
cat temp/json | jshon -s "$parentID" -i parent > tmpjson
|
|
||||||
mv tmpjson temp/json
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Take the sha256 sum of the generated json and use it as the layer ID.
|
|
||||||
# Compute the size and add it to the json under the 'Size' field.
|
|
||||||
layerID=$(sha256sum temp/json|cut -d ' ' -f 1)
|
|
||||||
size=$(stat --printf="%s" temp/layer.tar)
|
|
||||||
cat temp/json | jshon -s "$layerID" -i id -n $size -i Size > tmpjson
|
|
||||||
mv tmpjson temp/json
|
|
||||||
|
|
||||||
# Use the temp folder we've been working on to create a new image.
|
|
||||||
mv temp image/$layerID
|
|
||||||
|
|
||||||
# Create image json and image manifest
|
|
||||||
imageJson=$(cat ${baseJson} | jq ". + {\"rootfs\": {\"diff_ids\": [], \"type\": \"layers\"}}")
|
|
||||||
manifestJson=$(jq -n "[{\"RepoTags\":[\"$imageName:$imageTag\"]}]")
|
|
||||||
currentID=$layerID
|
|
||||||
while [[ -n "$currentID" ]]; do
|
|
||||||
layerChecksum=$(sha256sum image/$currentID/layer.tar | cut -d ' ' -f1)
|
|
||||||
imageJson=$(echo "$imageJson" | jq ".history |= [{\"created\": \"${created}\"}] + .")
|
|
||||||
imageJson=$(echo "$imageJson" | jq ".rootfs.diff_ids |= [\"sha256:$layerChecksum\"] + .")
|
|
||||||
manifestJson=$(echo "$manifestJson" | jq ".[0].Layers |= [\"$currentID/layer.tar\"] + .")
|
|
||||||
|
|
||||||
currentID=$(cat image/$currentID/json | (jshon -e parent -u 2>/dev/null || true))
|
|
||||||
done
|
|
||||||
|
|
||||||
imageJsonChecksum=$(echo "$imageJson" | sha256sum | cut -d ' ' -f1)
|
|
||||||
echo "$imageJson" > "image/$imageJsonChecksum.json"
|
|
||||||
manifestJson=$(echo "$manifestJson" | jq ".[0].Config = \"$imageJsonChecksum.json\"")
|
|
||||||
echo "$manifestJson" > image/manifest.json
|
|
||||||
|
|
||||||
# Store the json under the name image/repositories.
|
|
||||||
jshon -n object \
|
|
||||||
-n object -s "$layerID" -i "$imageTag" \
|
|
||||||
-i "$imageName" > image/repositories
|
|
||||||
|
|
||||||
# Make the image read-only.
|
|
||||||
chmod -R a-w image
|
|
||||||
|
|
||||||
echo "Cooking the image..."
|
|
||||||
tar -C image --hard-dereference --sort=name --mtime="@$SOURCE_DATE_EPOCH" --owner=0 --group=0 --xform s:'./':: -c . | pigz -nT > $out
|
|
||||||
|
|
||||||
echo "Finished."
|
|
||||||
'';
|
|
||||||
|
|
||||||
in
|
|
||||||
result;
|
|
||||||
|
|
||||||
# Build an image and populate its nix database with the provided
|
|
||||||
# contents. The main purpose is to be able to use nix commands in
|
|
||||||
# the container.
|
|
||||||
# Be careful since this doesn't work well with multilayer.
|
|
||||||
buildImageWithNixDb = args@{ contents ? null, extraCommands ? "", ... }:
|
|
||||||
buildImage (args // {
|
|
||||||
extraCommands = ''
|
|
||||||
echo "Generating the nix database..."
|
|
||||||
echo "Warning: only the database of the deepest Nix layer is loaded."
|
|
||||||
echo " If you want to use nix commands in the container, it would"
|
|
||||||
echo " be better to only have one layer that contains a nix store."
|
|
||||||
# This requires Nix 1.12 or higher
|
|
||||||
export NIX_REMOTE=local?root=$PWD
|
|
||||||
${nixUnstable}/bin/nix-store --load-db < ${nixRegistration contents}/db.dump
|
|
||||||
|
|
||||||
# We fill the store in order to run the 'verify' command that
|
|
||||||
# generates hash and size of output paths.
|
|
||||||
# Note when Nix 1.12 is be the stable one, the database dump
|
|
||||||
# generated by the exportReferencesGraph function will
|
|
||||||
# contains sha and size. See
|
|
||||||
# https://github.com/NixOS/nix/commit/c2b0d8749f7e77afc1c4b3e8dd36b7ee9720af4a
|
|
||||||
storePaths=$(cat ${nixRegistration contents}/storePaths)
|
|
||||||
echo "Copying everything to /nix/store (will take a while)..."
|
|
||||||
cp -prd $storePaths nix/store/
|
|
||||||
${nixUnstable}/bin/nix-store --verify --check-contents
|
|
||||||
|
|
||||||
mkdir -p nix/var/nix/gcroots/docker/
|
|
||||||
for i in ${lib.concatStringsSep " " contents}; do
|
|
||||||
ln -s $i nix/var/nix/gcroots/docker/$(basename $i)
|
|
||||||
done;
|
|
||||||
'' + extraCommands;
|
|
||||||
});
|
|
||||||
}
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Deterministic layer json: https://github.com/docker/hub-feedback/issues/488
|
|
||||||
|
|
||||||
import sys
|
|
||||||
reload(sys)
|
|
||||||
sys.setdefaultencoding('UTF8')
|
|
||||||
import json
|
|
||||||
|
|
||||||
# If any of the keys below are equal to a certain value
|
|
||||||
# then we can delete it because it's the default value
|
|
||||||
SAFEDELS = {
|
|
||||||
"Size": 0,
|
|
||||||
"config": {
|
|
||||||
"ExposedPorts": None,
|
|
||||||
"MacAddress": "",
|
|
||||||
"NetworkDisabled": False,
|
|
||||||
"PortSpecs": None,
|
|
||||||
"VolumeDriver": ""
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SAFEDELS["container_config"] = SAFEDELS["config"]
|
|
||||||
|
|
||||||
def makedet(j, safedels):
|
|
||||||
for k,v in safedels.items():
|
|
||||||
if k not in j:
|
|
||||||
continue
|
|
||||||
if type(v) == dict:
|
|
||||||
makedet(j[k], v)
|
|
||||||
elif j[k] == v:
|
|
||||||
del j[k]
|
|
||||||
|
|
||||||
def main():
|
|
||||||
j = json.load(sys.stdin)
|
|
||||||
makedet(j, SAFEDELS)
|
|
||||||
json.dump(j, sys.stdout, sort_keys=True)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
|
||||||
@@ -1,127 +0,0 @@
|
|||||||
# Examples of using the docker tools to build packages.
|
|
||||||
#
|
|
||||||
# This file defines several docker images. In order to use an image,
|
|
||||||
# build its derivation with `nix-build`, and then load the result with
|
|
||||||
# `docker load`. For example:
|
|
||||||
#
|
|
||||||
# $ nix-build '<nixpkgs>' -A dockerTools.examples.redis
|
|
||||||
# $ docker load < result
|
|
||||||
|
|
||||||
{ pkgs, buildImage, pullImage, shadowSetup, buildImageWithNixDb }:
|
|
||||||
|
|
||||||
rec {
|
|
||||||
# 1. basic example
|
|
||||||
bash = buildImage {
|
|
||||||
name = "bash";
|
|
||||||
contents = pkgs.bashInteractive;
|
|
||||||
};
|
|
||||||
|
|
||||||
# 2. service example, layered on another image
|
|
||||||
redis = buildImage {
|
|
||||||
name = "redis";
|
|
||||||
tag = "latest";
|
|
||||||
|
|
||||||
# for example's sake, we can layer redis on top of bash or debian
|
|
||||||
fromImage = bash;
|
|
||||||
# fromImage = debian;
|
|
||||||
|
|
||||||
contents = pkgs.redis;
|
|
||||||
runAsRoot = ''
|
|
||||||
mkdir -p /data
|
|
||||||
'';
|
|
||||||
|
|
||||||
config = {
|
|
||||||
Cmd = [ "/bin/redis-server" ];
|
|
||||||
WorkingDir = "/data";
|
|
||||||
Volumes = {
|
|
||||||
"/data" = {};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# 3. another service example
|
|
||||||
nginx = let
|
|
||||||
nginxPort = "80";
|
|
||||||
nginxConf = pkgs.writeText "nginx.conf" ''
|
|
||||||
user nginx nginx;
|
|
||||||
daemon off;
|
|
||||||
error_log /dev/stdout info;
|
|
||||||
pid /dev/null;
|
|
||||||
events {}
|
|
||||||
http {
|
|
||||||
access_log /dev/stdout;
|
|
||||||
server {
|
|
||||||
listen ${nginxPort};
|
|
||||||
index index.html;
|
|
||||||
location / {
|
|
||||||
root ${nginxWebRoot};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
'';
|
|
||||||
nginxWebRoot = pkgs.writeTextDir "index.html" ''
|
|
||||||
<html><body><h1>Hello from NGINX</h1></body></html>
|
|
||||||
'';
|
|
||||||
in
|
|
||||||
buildImage {
|
|
||||||
name = "nginx-container";
|
|
||||||
contents = pkgs.nginx;
|
|
||||||
|
|
||||||
runAsRoot = ''
|
|
||||||
#!${pkgs.stdenv.shell}
|
|
||||||
${shadowSetup}
|
|
||||||
groupadd --system nginx
|
|
||||||
useradd --system --gid nginx nginx
|
|
||||||
'';
|
|
||||||
|
|
||||||
config = {
|
|
||||||
Cmd = [ "nginx" "-c" nginxConf ];
|
|
||||||
ExposedPorts = {
|
|
||||||
"${nginxPort}/tcp" = {};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# 4. example of pulling an image. could be used as a base for other images
|
|
||||||
nixFromDockerHub = pullImage {
|
|
||||||
imageName = "nixos/nix";
|
|
||||||
imageTag = "1.11";
|
|
||||||
# this hash will need change if the tag is updated at docker hub
|
|
||||||
sha256 = "0nncn9pn5miygan51w34c2p9qssi96jgsaqv44dxxdprc8pg0g83";
|
|
||||||
};
|
|
||||||
|
|
||||||
# 5. example of multiple contents, emacs and vi happily coexisting
|
|
||||||
editors = buildImage {
|
|
||||||
name = "editors";
|
|
||||||
contents = [
|
|
||||||
pkgs.coreutils
|
|
||||||
pkgs.bash
|
|
||||||
pkgs.emacs
|
|
||||||
pkgs.vim
|
|
||||||
pkgs.nano
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
# 6. nix example to play with the container nix store
|
|
||||||
# docker run -it --rm nix nix-store -qR $(nix-build '<nixpkgs>' -A nix)
|
|
||||||
nix = buildImageWithNixDb {
|
|
||||||
name = "nix";
|
|
||||||
contents = [
|
|
||||||
# nix-store uses cat program to display results as specified by
|
|
||||||
# the image env variable NIX_PAGER.
|
|
||||||
pkgs.coreutils
|
|
||||||
pkgs.nix
|
|
||||||
];
|
|
||||||
config = {
|
|
||||||
Env = [ "NIX_PAGER=cat" ];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
# 7. example of adding something on top of an image pull by our
|
|
||||||
# dockerTools chain.
|
|
||||||
onTopOfPulledImage = buildImage {
|
|
||||||
name = "onTopOfPulledImage";
|
|
||||||
fromImage = nixFromDockerHub;
|
|
||||||
contents = [ pkgs.hello ];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
@@ -5,8 +5,36 @@ with lib;
|
|||||||
let
|
let
|
||||||
cfg = config.services.kubernetes;
|
cfg = config.services.kubernetes;
|
||||||
|
|
||||||
skipAttrs = attrs: map (filterAttrs (k: v: k != "enable"))
|
# YAML config; see:
|
||||||
(filter (v: !(hasAttr "enable" v) || v.enable) attrs);
|
# https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/
|
||||||
|
# https://github.com/kubernetes/kubernetes/blob/release-1.10/pkg/kubelet/apis/kubeletconfig/v1beta1/types.go
|
||||||
|
#
|
||||||
|
# TODO: migrate the following flags to this config file
|
||||||
|
#
|
||||||
|
# --pod-manifest-path
|
||||||
|
# --address
|
||||||
|
# --port
|
||||||
|
# --tls-cert-file
|
||||||
|
# --tls-private-key-file
|
||||||
|
# --client-ca-file
|
||||||
|
# --authentication-token-webhook
|
||||||
|
# --authentication-token-webhook-cache-ttl
|
||||||
|
# --authorization-mode
|
||||||
|
# --healthz-bind-address
|
||||||
|
# --healthz-port
|
||||||
|
# --allow-privileged
|
||||||
|
# --cluster-dns
|
||||||
|
# --cluster-domain
|
||||||
|
# --hairpin-mode
|
||||||
|
# --feature-gates
|
||||||
|
kubeletConfig = pkgs.runCommand "kubelet-config.yaml" { } ''
|
||||||
|
echo > $out ${pkgs.lib.escapeShellArg (builtins.toJSON {
|
||||||
|
kind = "KubeletConfiguration";
|
||||||
|
apiVersion = "kubelet.config.k8s.io/v1beta1";
|
||||||
|
${if cfg.kubelet.applyManifests then "staticPodPath" else null} =
|
||||||
|
manifests;
|
||||||
|
})}
|
||||||
|
'';
|
||||||
|
|
||||||
infraContainer = pkgs.dockerTools.buildImage {
|
infraContainer = pkgs.dockerTools.buildImage {
|
||||||
name = "pause";
|
name = "pause";
|
||||||
@@ -15,8 +43,7 @@ let
|
|||||||
config.Cmd = "/bin/pause";
|
config.Cmd = "/bin/pause";
|
||||||
};
|
};
|
||||||
|
|
||||||
mkKubeConfig = name: cfg: pkgs.writeText "${name}-kubeconfig" (
|
mkKubeConfig = name: cfg: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON (
|
||||||
builtins.toJSON (
|
|
||||||
let name' =
|
let name' =
|
||||||
if name == "kubelet"
|
if name == "kubelet"
|
||||||
then "system:node:${config.services.kubernetes.kubelet.hostname}"
|
then "system:node:${config.services.kubernetes.kubelet.hostname}"
|
||||||
@@ -51,12 +78,14 @@ let
|
|||||||
mkKubeConfigOptions = prefix: {
|
mkKubeConfigOptions = prefix: {
|
||||||
server = mkOption {
|
server = mkOption {
|
||||||
description = "${prefix} kube-apiserver server address.";
|
description = "${prefix} kube-apiserver server address.";
|
||||||
default = "http://${cfg.apiserver.address}:${toString cfg.apiserver.port}";
|
default = "http://${if cfg.apiserver.advertiseAddress != null
|
||||||
|
then cfg.apiserver.advertiseAddress
|
||||||
|
else "127.0.0.1"}:${toString cfg.apiserver.port}";
|
||||||
type = types.str;
|
type = types.str;
|
||||||
};
|
};
|
||||||
|
|
||||||
caFile = mkOption {
|
caFile = mkOption {
|
||||||
description = "${prefix} certificate authrority file used to connect to kube-apiserver.";
|
description = "${prefix} certificate authority file used to connect to kube-apiserver.";
|
||||||
type = types.nullOr types.path;
|
type = types.nullOr types.path;
|
||||||
default = cfg.caFile;
|
default = cfg.caFile;
|
||||||
};
|
};
|
||||||
@@ -81,12 +110,18 @@ let
|
|||||||
keyFile = mkDefault cfg.kubeconfig.keyFile;
|
keyFile = mkDefault cfg.kubeconfig.keyFile;
|
||||||
};
|
};
|
||||||
|
|
||||||
cniConfig = pkgs.buildEnv {
|
cniConfig =
|
||||||
|
if cfg.kubelet.cni.config != [] && !(isNull cfg.kubelet.cni.configDir) then
|
||||||
|
throw "Verbatim CNI-config and CNI configDir cannot both be set."
|
||||||
|
else if !(isNull cfg.kubelet.cni.configDir) then
|
||||||
|
cfg.kubelet.cni.configDir
|
||||||
|
else
|
||||||
|
(pkgs.buildEnv {
|
||||||
name = "kubernetes-cni-config";
|
name = "kubernetes-cni-config";
|
||||||
paths = imap (i: entry:
|
paths = imap (i: entry:
|
||||||
pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
|
pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
|
||||||
) cfg.kubelet.cni.config;
|
) cfg.kubelet.cni.config;
|
||||||
};
|
});
|
||||||
|
|
||||||
manifests = pkgs.buildEnv {
|
manifests = pkgs.buildEnv {
|
||||||
name = "kubernetes-manifests";
|
name = "kubernetes-manifests";
|
||||||
@@ -136,7 +171,6 @@ let
|
|||||||
'';
|
'';
|
||||||
in {
|
in {
|
||||||
|
|
||||||
|
|
||||||
###### interface
|
###### interface
|
||||||
|
|
||||||
options.services.kubernetes = {
|
options.services.kubernetes = {
|
||||||
@@ -223,18 +257,13 @@ in {
|
|||||||
type = types.listOf types.str;
|
type = types.listOf types.str;
|
||||||
};
|
};
|
||||||
|
|
||||||
address = mkOption {
|
bindAddress = mkOption {
|
||||||
description = "Kubernetes apiserver listening address.";
|
|
||||||
default = "127.0.0.1";
|
|
||||||
type = types.str;
|
|
||||||
};
|
|
||||||
|
|
||||||
publicAddress = mkOption {
|
|
||||||
description = ''
|
description = ''
|
||||||
Kubernetes apiserver public listening address used for read only and
|
The IP address on which to listen for the --secure-port port.
|
||||||
secure port.
|
The associated interface(s) must be reachable by the rest
|
||||||
|
of the cluster, and by CLI/web clients.
|
||||||
'';
|
'';
|
||||||
default = cfg.apiserver.address;
|
default = "0.0.0.0";
|
||||||
type = types.str;
|
type = types.str;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -289,7 +318,7 @@ in {
|
|||||||
tokenAuthFile = mkOption {
|
tokenAuthFile = mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Kubernetes apiserver token authentication file. See
|
Kubernetes apiserver token authentication file. See
|
||||||
<link xlink:href="https://kubernetes.io/docs/admin/authentication.html"/>
|
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
|
||||||
'';
|
'';
|
||||||
default = null;
|
default = null;
|
||||||
type = types.nullOr types.path;
|
type = types.nullOr types.path;
|
||||||
@@ -298,7 +327,7 @@ in {
|
|||||||
basicAuthFile = mkOption {
|
basicAuthFile = mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Kubernetes apiserver basic authentication file. See
|
Kubernetes apiserver basic authentication file. See
|
||||||
<link xlink:href="https://kubernetes.io/docs/admin/authentication.html"/>
|
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authentication"/>
|
||||||
'';
|
'';
|
||||||
default = pkgs.writeText "users" ''
|
default = pkgs.writeText "users" ''
|
||||||
kubernetes,admin,0
|
kubernetes,admin,0
|
||||||
@@ -308,22 +337,31 @@ in {
|
|||||||
|
|
||||||
authorizationMode = mkOption {
|
authorizationMode = mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/RBAC). See
|
Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
|
||||||
<link xlink:href="https://kubernetes.io/docs/admin/authorization.html"/>
|
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
|
||||||
'';
|
'';
|
||||||
default = ["RBAC" "Node"];
|
default = ["RBAC" "Node"];
|
||||||
type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "RBAC" "Node"]);
|
type = types.listOf (types.enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
|
||||||
};
|
};
|
||||||
|
|
||||||
authorizationPolicy = mkOption {
|
authorizationPolicy = mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Kubernetes apiserver authorization policy file. See
|
Kubernetes apiserver authorization policy file. See
|
||||||
<link xlink:href="https://kubernetes.io/docs/admin/authorization.html"/>
|
<link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/authorization/"/>
|
||||||
'';
|
'';
|
||||||
default = [];
|
default = [];
|
||||||
type = types.listOf types.attrs;
|
type = types.listOf types.attrs;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
webhookConfig = mkOption {
|
||||||
|
description = ''
|
||||||
|
Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
|
||||||
|
See <link xlink:href="https://kubernetes.io/docs/reference/access-authn-authz/webhook/"/>
|
||||||
|
'';
|
||||||
|
default = null;
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
};
|
||||||
|
|
||||||
allowPrivileged = mkOption {
|
allowPrivileged = mkOption {
|
||||||
description = "Whether to allow privileged containers on Kubernetes.";
|
description = "Whether to allow privileged containers on Kubernetes.";
|
||||||
default = true;
|
default = true;
|
||||||
@@ -342,16 +380,16 @@ in {
|
|||||||
runtimeConfig = mkOption {
|
runtimeConfig = mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Api runtime configuration. See
|
Api runtime configuration. See
|
||||||
<link xlink:href="https://kubernetes.io/docs/admin/cluster-management.html"/>
|
<link xlink:href="https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/"/>
|
||||||
'';
|
'';
|
||||||
default = "authentication.k8s.io/v1beta1=true";
|
default = "authentication.k8s.io/v1beta1=true";
|
||||||
example = "api/all=false,api/v1=true";
|
example = "api/all=false,api/v1=true";
|
||||||
type = types.str;
|
type = types.str;
|
||||||
};
|
};
|
||||||
|
|
||||||
admissionControl = mkOption {
|
enableAdmissionPlugins = mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Kubernetes admission control plugins to use. See
|
Kubernetes admission control plugins to enable. See
|
||||||
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
|
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
|
||||||
'';
|
'';
|
||||||
default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"];
|
default = ["NamespaceLifecycle" "LimitRanger" "ServiceAccount" "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds" "NodeRestriction"];
|
||||||
@@ -363,6 +401,15 @@ in {
|
|||||||
type = types.listOf types.str;
|
type = types.listOf types.str;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
disableAdmissionPlugins = mkOption {
|
||||||
|
description = ''
|
||||||
|
Kubernetes admission control plugins to disable. See
|
||||||
|
<link xlink:href="https://kubernetes.io/docs/admin/admission-controllers/"/>
|
||||||
|
'';
|
||||||
|
default = [];
|
||||||
|
type = types.listOf types.str;
|
||||||
|
};
|
||||||
|
|
||||||
serviceAccountKeyFile = mkOption {
|
serviceAccountKeyFile = mkOption {
|
||||||
description = ''
|
description = ''
|
||||||
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
|
Kubernetes apiserver PEM-encoded x509 RSA private or public key file,
|
||||||
@@ -583,6 +630,7 @@ in {
|
|||||||
type = types.bool;
|
type = types.bool;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
# TODO: remove this deprecated flag
|
||||||
cadvisorPort = mkOption {
|
cadvisorPort = mkOption {
|
||||||
description = "Kubernetes kubelet local cadvisor port.";
|
description = "Kubernetes kubelet local cadvisor port.";
|
||||||
default = 4194;
|
default = 4194;
|
||||||
@@ -639,6 +687,12 @@ in {
|
|||||||
}]
|
}]
|
||||||
'';
|
'';
|
||||||
};
|
};
|
||||||
|
|
||||||
|
configDir = mkOption {
|
||||||
|
description = "Path to Kubernetes CNI configuration directory.";
|
||||||
|
type = types.nullOr types.path;
|
||||||
|
default = null;
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
manifests = mkOption {
|
manifests = mkOption {
|
||||||
@@ -792,13 +846,13 @@ in {
|
|||||||
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ cfg.path;
|
path = with pkgs; [ gitMinimal openssh docker utillinux iproute ethtool thin-provisioning-tools iptables socat ] ++ cfg.path;
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Slice = "kubernetes.slice";
|
Slice = "kubernetes.slice";
|
||||||
|
CPUAccounting = true;
|
||||||
|
MemoryAccounting = true;
|
||||||
ExecStart = ''${cfg.package}/bin/kubelet \
|
ExecStart = ''${cfg.package}/bin/kubelet \
|
||||||
${optionalString cfg.kubelet.applyManifests
|
|
||||||
"--pod-manifest-path=${manifests}"} \
|
|
||||||
${optionalString (taints != "")
|
${optionalString (taints != "")
|
||||||
"--register-with-taints=${taints}"} \
|
"--register-with-taints=${taints}"} \
|
||||||
--kubeconfig=${mkKubeConfig "kubelet" cfg.kubelet.kubeconfig} \
|
--kubeconfig=${mkKubeConfig "kubelet" cfg.kubelet.kubeconfig} \
|
||||||
--require-kubeconfig \
|
--config=${kubeletConfig} \
|
||||||
--address=${cfg.kubelet.address} \
|
--address=${cfg.kubelet.address} \
|
||||||
--port=${toString cfg.kubelet.port} \
|
--port=${toString cfg.kubelet.port} \
|
||||||
--register-node=${boolToString cfg.kubelet.registerNode} \
|
--register-node=${boolToString cfg.kubelet.registerNode} \
|
||||||
@@ -838,7 +892,7 @@ in {
|
|||||||
};
|
};
|
||||||
|
|
||||||
# Allways include cni plugins
|
# Allways include cni plugins
|
||||||
services.kubernetes.kubelet.cni.packages = [pkgs.cni];
|
services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins];
|
||||||
|
|
||||||
boot.kernelModules = ["br_netfilter"];
|
boot.kernelModules = ["br_netfilter"];
|
||||||
|
|
||||||
@@ -863,7 +917,7 @@ in {
|
|||||||
|
|
||||||
(mkIf cfg.apiserver.enable {
|
(mkIf cfg.apiserver.enable {
|
||||||
systemd.services.kube-apiserver = {
|
systemd.services.kube-apiserver = {
|
||||||
description = "Kubernetes Kubelet Service";
|
description = "Kubernetes APIServer Service";
|
||||||
wantedBy = [ "kubernetes.target" ];
|
wantedBy = [ "kubernetes.target" ];
|
||||||
after = [ "network.target" "docker.service" ];
|
after = [ "network.target" "docker.service" ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
@@ -877,7 +931,7 @@ in {
|
|||||||
${optionalString (cfg.etcd.keyFile != null)
|
${optionalString (cfg.etcd.keyFile != null)
|
||||||
"--etcd-keyfile=${cfg.etcd.keyFile}"} \
|
"--etcd-keyfile=${cfg.etcd.keyFile}"} \
|
||||||
--insecure-port=${toString cfg.apiserver.port} \
|
--insecure-port=${toString cfg.apiserver.port} \
|
||||||
--bind-address=0.0.0.0 \
|
--bind-address=${cfg.apiserver.bindAddress} \
|
||||||
${optionalString (cfg.apiserver.advertiseAddress != null)
|
${optionalString (cfg.apiserver.advertiseAddress != null)
|
||||||
"--advertise-address=${cfg.apiserver.advertiseAddress}"} \
|
"--advertise-address=${cfg.apiserver.advertiseAddress}"} \
|
||||||
--allow-privileged=${boolToString cfg.apiserver.allowPrivileged}\
|
--allow-privileged=${boolToString cfg.apiserver.allowPrivileged}\
|
||||||
@@ -905,11 +959,15 @@ in {
|
|||||||
(concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.apiserver.authorizationPolicy)
|
(concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.apiserver.authorizationPolicy)
|
||||||
}"
|
}"
|
||||||
} \
|
} \
|
||||||
|
${optionalString (elem "Webhook" cfg.apiserver.authorizationMode)
|
||||||
|
"--authorization-webhook-config-file=${cfg.apiserver.webhookConfig}"
|
||||||
|
} \
|
||||||
--secure-port=${toString cfg.apiserver.securePort} \
|
--secure-port=${toString cfg.apiserver.securePort} \
|
||||||
--service-cluster-ip-range=${cfg.apiserver.serviceClusterIpRange} \
|
--service-cluster-ip-range=${cfg.apiserver.serviceClusterIpRange} \
|
||||||
${optionalString (cfg.apiserver.runtimeConfig != "")
|
${optionalString (cfg.apiserver.runtimeConfig != "")
|
||||||
"--runtime-config=${cfg.apiserver.runtimeConfig}"} \
|
"--runtime-config=${cfg.apiserver.runtimeConfig}"} \
|
||||||
--admission_control=${concatStringsSep "," cfg.apiserver.admissionControl} \
|
--enable-admission-plugins=${concatStringsSep "," cfg.apiserver.enableAdmissionPlugins} \
|
||||||
|
--disable-admission-plugins=${concatStringsSep "," cfg.apiserver.disableAdmissionPlugins} \
|
||||||
${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
|
${optionalString (cfg.apiserver.serviceAccountKeyFile!=null)
|
||||||
"--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
|
"--service-account-key-file=${cfg.apiserver.serviceAccountKeyFile}"} \
|
||||||
${optionalString cfg.verbose "--v=6"} \
|
${optionalString cfg.verbose "--v=6"} \
|
||||||
@@ -1000,7 +1058,7 @@ in {
|
|||||||
description = "Kubernetes Proxy Service";
|
description = "Kubernetes Proxy Service";
|
||||||
wantedBy = [ "kubernetes.target" ];
|
wantedBy = [ "kubernetes.target" ];
|
||||||
after = [ "kube-apiserver.service" ];
|
after = [ "kube-apiserver.service" ];
|
||||||
path = [pkgs.iptables pkgs.conntrack_tools pkgs.kmod];
|
path = [pkgs.iptables pkgs.conntrack_tools];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Slice = "kubernetes.slice";
|
Slice = "kubernetes.slice";
|
||||||
ExecStart = ''${cfg.package}/bin/kube-proxy \
|
ExecStart = ''${cfg.package}/bin/kube-proxy \
|
||||||
@@ -1065,6 +1123,7 @@ in {
|
|||||||
wantedBy = [ "kubernetes.target" ];
|
wantedBy = [ "kubernetes.target" ];
|
||||||
after = [ "kube-apiserver.service" ];
|
after = [ "kube-apiserver.service" ];
|
||||||
environment.ADDON_PATH = "/etc/kubernetes/addons/";
|
environment.ADDON_PATH = "/etc/kubernetes/addons/";
|
||||||
|
path = [ pkgs.gawk ];
|
||||||
serviceConfig = {
|
serviceConfig = {
|
||||||
Slice = "kubernetes.slice";
|
Slice = "kubernetes.slice";
|
||||||
ExecStart = "${cfg.package}/bin/kube-addons";
|
ExecStart = "${cfg.package}/bin/kube-addons";
|
||||||
@@ -1094,7 +1153,7 @@ in {
|
|||||||
];
|
];
|
||||||
|
|
||||||
environment.systemPackages = [ cfg.package ];
|
environment.systemPackages = [ cfg.package ];
|
||||||
users.extraUsers = singleton {
|
users.users = singleton {
|
||||||
name = "kubernetes";
|
name = "kubernetes";
|
||||||
uid = config.ids.uids.kubernetes;
|
uid = config.ids.uids.kubernetes;
|
||||||
description = "Kubernetes user";
|
description = "Kubernetes user";
|
||||||
@@ -1103,7 +1162,7 @@ in {
|
|||||||
home = cfg.dataDir;
|
home = cfg.dataDir;
|
||||||
createHome = true;
|
createHome = true;
|
||||||
};
|
};
|
||||||
users.extraGroups.kubernetes.gid = config.ids.gids.kubernetes;
|
users.groups.kubernetes.gid = config.ids.gids.kubernetes;
|
||||||
|
|
||||||
# dns addon is enabled by default
|
# dns addon is enabled by default
|
||||||
services.kubernetes.addons.dns.enable = mkDefault true;
|
services.kubernetes.addons.dns.enable = mkDefault true;
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
self: super: {
|
self: super: {
|
||||||
dockerTools = super.callPackage ./default.nix { go = self.go_1_9; };
|
|
||||||
# glusterfs = super.glusterfs.overrideAttrs (old: {
|
# glusterfs = super.glusterfs.overrideAttrs (old: {
|
||||||
# buildInputs = old.buildInputs ++ [ self.lvm2 ];
|
# buildInputs = old.buildInputs ++ [ self.lvm2 ];
|
||||||
# });
|
# });
|
||||||
# super.config.services.kubernetes = super.callPackage ./overlay/kubernetes.nix {};
|
#super.config.services.kubernetes = super.callPackage ./overlay/kubernetes.nix {};
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,32 +0,0 @@
|
|||||||
{ stdenv, lib, docker, vmTools, utillinux, curl, kmod, dhcp, cacert, e2fsprogs }:
|
|
||||||
let
|
|
||||||
nameReplace = name: builtins.replaceStrings ["/" ":"] ["-" "-"] name;
|
|
||||||
in
|
|
||||||
# For simplicity we only support sha256.
|
|
||||||
{ imageName, imageTag ? "latest", imageId ? "${imageName}:${imageTag}"
|
|
||||||
, sha256, name ? (nameReplace "docker-image-${imageName}-${imageTag}.tar") }:
|
|
||||||
let
|
|
||||||
pullImage = vmTools.runInLinuxVM (
|
|
||||||
stdenv.mkDerivation {
|
|
||||||
inherit name imageId;
|
|
||||||
|
|
||||||
certs = "${cacert}/etc/ssl/certs/ca-bundle.crt";
|
|
||||||
|
|
||||||
builder = ./pull.sh;
|
|
||||||
|
|
||||||
nativeBuildInputs = [ curl utillinux docker kmod dhcp cacert e2fsprogs ];
|
|
||||||
|
|
||||||
outputHashAlgo = "sha256";
|
|
||||||
outputHash = sha256;
|
|
||||||
|
|
||||||
impureEnvVars = lib.fetchers.proxyImpureEnvVars;
|
|
||||||
|
|
||||||
preVM = vmTools.createEmptyImage {
|
|
||||||
size = 2048;
|
|
||||||
fullName = "${name}-disk";
|
|
||||||
};
|
|
||||||
|
|
||||||
QEMU_OPTS = "-netdev user,id=net0 -device virtio-net-pci,netdev=net0";
|
|
||||||
});
|
|
||||||
in
|
|
||||||
pullImage
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
source $stdenv/setup
|
|
||||||
|
|
||||||
mkdir -p /var/lib/docker
|
|
||||||
mkfs.ext4 /dev/vda
|
|
||||||
mount -t ext4 /dev/vda /var/lib/docker
|
|
||||||
|
|
||||||
modprobe virtio_net
|
|
||||||
dhclient eth0
|
|
||||||
|
|
||||||
mkdir -p /etc/ssl/certs/
|
|
||||||
cp "$certs" "/etc/ssl/certs/"
|
|
||||||
|
|
||||||
# from https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount
|
|
||||||
mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup
|
|
||||||
cd /sys/fs/cgroup
|
|
||||||
for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do
|
|
||||||
mkdir -p $sys
|
|
||||||
if ! mountpoint -q $sys; then
|
|
||||||
if ! mount -n -t cgroup -o $sys cgroup $sys; then
|
|
||||||
rmdir $sys || true
|
|
||||||
fi
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
# run docker daemon
|
|
||||||
dockerd -H tcp://127.0.0.1:5555 -H unix:///var/run/docker.sock &
|
|
||||||
|
|
||||||
until docker ps 2>/dev/null; do
|
|
||||||
printf '.'
|
|
||||||
sleep 1
|
|
||||||
done
|
|
||||||
|
|
||||||
rm -r $out
|
|
||||||
|
|
||||||
docker pull ${imageId}
|
|
||||||
docker save ${imageId} > $out
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"github.com/docker/docker/pkg/tarsum"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
ts, err := tarsum.NewTarSum(os.Stdin, true, tarsum.Version1)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(ioutil.Discard, ts); err != nil {
|
|
||||||
fmt.Println(err)
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
fmt.Println(ts.Sum(nil))
|
|
||||||
}
|
|
||||||
Reference in New Issue
Block a user