Compare commits
288 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d20bde221b | ||
|
|
91ab080476 | ||
|
|
3fb82b0fde | ||
|
|
730ee6952a | ||
|
|
76d2578b77 | ||
|
|
ca9bf8ce5c | ||
|
|
de7e563ad2 | ||
|
|
d102ba94e5 | ||
|
|
cf6d74a2be | ||
|
|
80cea505c4 | ||
|
|
c0c72219a9 | ||
|
|
6bf15d6577 | ||
|
|
2fdcf19223 | ||
|
|
c024dcd8d9 | ||
|
|
0570d264ed | ||
|
|
f0a9a9fb04 | ||
|
|
bf0a9190c8 | ||
|
|
1add81a1ce | ||
|
|
8bfe96968d | ||
|
|
aab53102db | ||
|
|
1265766b3a | ||
|
|
3765d41b5c | ||
|
|
939d2992f1 | ||
|
|
a92ea9c386 | ||
|
|
d880f69836 | ||
|
|
ea989a78c4 | ||
|
|
a615d551a5 | ||
|
|
6995122edd | ||
|
|
ad6c06d075 | ||
|
|
fe3c9d22d5 | ||
|
|
7d54465eb0 | ||
|
|
1505156bfa | ||
|
|
6ddcd3f716 | ||
|
|
2f2fb40c56 | ||
|
|
c3933ab7ae | ||
|
|
5c1503aff5 | ||
|
|
f34f2ce857 | ||
|
|
8d5724e70e | ||
|
|
23c4420630 | ||
|
|
09eae003cf | ||
|
|
4ba50052a5 | ||
|
|
9c5393dfa0 | ||
|
|
9c76d246f6 | ||
|
|
096c6b5848 | ||
|
|
8b51f53bfe | ||
|
|
8ab895b38a | ||
|
|
3a0b1b197c | ||
|
|
3d8fed70e7 | ||
|
|
c2bca952a4 | ||
|
|
0c5468ba29 | ||
|
|
18b1f446ab | ||
|
|
f23ef9b293 | ||
|
|
e74f8c12e4 | ||
|
|
4dffbf031c | ||
|
|
49ed8a0b2c | ||
|
|
05977b507d | ||
|
|
7a41d55e61 | ||
|
|
e7174adaf1 | ||
|
|
f38ac6ff0b | ||
|
|
17f87be607 | ||
|
|
658df9e905 | ||
|
|
aa31e6baf6 | ||
|
|
a6e70b653d | ||
|
|
e061deee3e | ||
|
|
79ecb74140 | ||
|
|
ce2b04e34e | ||
|
|
1111f81fbf | ||
|
|
f057c22649 | ||
|
|
f4742240c2 | ||
|
|
6dcb04d9d6 | ||
|
|
dbd8611288 | ||
|
|
b135d68b6f | ||
|
|
6987b36981 | ||
|
|
cec9ffb36b | ||
|
|
47298e7f37 | ||
|
|
c7519e31a2 | ||
|
|
226d7239e5 | ||
|
|
fa044d0129 | ||
|
|
7418b6adee | ||
|
|
4eb10fd63a | ||
|
|
cee3e47550 | ||
|
|
7459a75cac | ||
|
|
a2b28d03b8 | ||
|
|
f226e090fe | ||
|
|
0cc62575a7 | ||
|
|
e5a1a26a4a | ||
|
|
89717949c4 | ||
|
|
fd48454649 | ||
|
|
8fe0d7231e | ||
|
|
8607e26f5e | ||
|
|
9cf80692d2 | ||
|
|
798a27075c | ||
|
|
4b6c41f950 | ||
|
|
eebb82bb3f | ||
|
|
09131ad78f | ||
|
|
7c402d329a | ||
|
|
a80868209f | ||
|
|
113da00d0a | ||
|
|
187ba13f73 | ||
|
|
874c4b757b | ||
|
|
3d76736421 | ||
|
|
20b130a666 | ||
|
|
5eab0a9730 | ||
|
|
23f7feff20 | ||
|
|
c7adfe432a | ||
|
|
4f85381d8c | ||
|
|
d8cf7df70b | ||
|
|
c08698c11a | ||
|
|
8ce0aafcd3 | ||
|
|
41289bdd58 | ||
|
|
de74f56e87 | ||
|
|
bbb770b6c0 | ||
|
|
fea14ae5a4 | ||
|
|
7da8fa586b | ||
|
|
a4c4de3cf4 | ||
|
|
fded8bdcc4 | ||
|
|
49d17bded0 | ||
|
|
fea57446a5 | ||
|
|
41fc498166 | ||
|
|
3e76bbd51f | ||
|
|
7cb09b78db | ||
|
|
062f94d0f7 | ||
|
|
cb463d01d2 | ||
|
|
1322db7ec3 | ||
|
|
07bc55786b | ||
|
|
397ec03285 | ||
|
|
138e061c17 | ||
|
|
b72da80f13 | ||
|
|
3de08b138e | ||
|
|
040d9fcee6 | ||
|
|
f7f6949c53 | ||
|
|
f950671a1c | ||
|
|
c1fb17a645 | ||
|
|
e3f42c86f3 | ||
|
|
cf36276d21 | ||
|
|
7d36ab2ff9 | ||
|
|
5f8b01bce3 | ||
|
|
346213415d | ||
|
|
2a973474fd | ||
|
|
02e3ead7eb | ||
|
|
95a665c057 | ||
|
|
99eadced3a | ||
|
|
e23e0ef5bf | ||
|
|
f1f53e7b84 | ||
|
|
5dfa5398b8 | ||
|
|
669d91dfe3 | ||
|
|
e5ad7aea55 | ||
|
|
ceda92b0f2 | ||
|
|
0563a3c795 | ||
|
|
5f36094b21 | ||
|
|
f0e2d705dc | ||
|
|
ca3fcd6993 | ||
|
|
2040cc5978 | ||
|
|
1ccea93c16 | ||
|
|
9c7b02aba9 | ||
|
|
ca4fe84df2 | ||
|
|
f9a21d2ff9 | ||
|
|
1685ed342f | ||
|
|
f71ecf0802 | ||
|
|
236b2f1ed6 | ||
|
|
7ecf7c6f28 | ||
|
|
fca7769b33 | ||
|
|
6c3d02f049 | ||
|
|
6629aff889 | ||
|
|
68d6c343a5 | ||
|
|
607092ed32 | ||
|
|
1e2cdba3a3 | ||
|
|
0b8333f7bc | ||
|
|
a522eae6e9 | ||
|
|
cf1bb48fa8 | ||
|
|
c3bfdfb5de | ||
|
|
d6642fc191 | ||
|
|
e6e8d0015d | ||
|
|
18de6b3883 | ||
|
|
f0b8561990 | ||
|
|
7e2986f51c | ||
|
|
7aab18e0a4 | ||
|
|
8ece19132d | ||
|
|
d3309ebb1a | ||
|
|
38d76d7ec8 | ||
|
|
08c2ac2e48 | ||
|
|
440a7a19a5 | ||
|
|
7ec893ae3f | ||
|
|
d43675afa3 | ||
|
|
7e21322167 | ||
|
|
17c185b40a | ||
|
|
3ba80f1071 | ||
|
|
83cd82906d | ||
|
|
403a205b2a | ||
|
|
1c34c2a1ee | ||
|
|
a8e1746476 | ||
|
|
98faf1e5f9 | ||
|
|
2c2b8dfab9 | ||
|
|
d25b9adff1 | ||
|
|
c9050fef72 | ||
|
|
5c53f1c26d | ||
|
|
a40fcbd990 | ||
|
|
f1f3e35086 | ||
|
|
4119893f76 | ||
|
|
b2aecc497a | ||
|
|
8d8f990db8 | ||
|
|
27ed5bdc98 | ||
|
|
5a0a3dd959 | ||
|
|
c7d9bc7dea | ||
|
|
d4a7059bef | ||
|
|
a7a5ef516b | ||
|
|
f5699865da | ||
|
|
187a04f501 | ||
|
|
547d1f6b9b | ||
|
|
4a4a2e769f | ||
|
|
210d272ffe | ||
|
|
f3cf5dc858 | ||
|
|
6ab7d727e2 | ||
|
|
19fee6fa5e | ||
|
|
aac258ad1c | ||
|
|
a5211510db | ||
|
|
2336174036 | ||
|
|
d9ca4187b9 | ||
|
|
26e1c82fd0 | ||
|
|
d0b35cec63 | ||
|
|
9a581e0a3e | ||
|
|
b792c7df88 | ||
|
|
5b3b0009bc | ||
|
|
319856b52f | ||
|
|
23e65017c1 | ||
|
|
1d379d07bd | ||
|
|
5ded3accab | ||
|
|
93bc8e3a29 | ||
|
|
e8b23bc582 | ||
|
|
b3beb91b52 | ||
|
|
b39a2ad69a | ||
|
|
62c1fb49ee | ||
|
|
d7a125a706 | ||
|
|
94331af79c | ||
|
|
ed26f5d76c | ||
|
|
d2651d8613 | ||
|
|
c45bf4c8dd | ||
|
|
17cc8a2fb7 | ||
|
|
1b2adcf02c | ||
|
|
61f701be7a | ||
|
|
10c44c9374 | ||
|
|
b095881d6f | ||
|
|
497ae17bf0 | ||
|
|
4907bc533d | ||
|
|
a47f278df0 | ||
|
|
bceef5e4f6 | ||
|
|
88bec8766a | ||
|
|
0c2ba8f5b2 | ||
|
|
e87b2ff6f6 | ||
|
|
88229a5f26 | ||
|
|
b931633808 | ||
|
|
3870154ae3 | ||
|
|
ebfb3cda79 | ||
|
|
89a7c7f022 | ||
|
|
d25aefbf8d | ||
|
|
775fa06102 | ||
|
|
e995ed2f45 | ||
|
|
cb87448371 | ||
|
|
2d1c515d25 | ||
|
|
c290324185 | ||
|
|
f1b34887f8 | ||
|
|
7c12efc851 | ||
|
|
e410f69f52 | ||
|
|
173707fb2d | ||
|
|
38ab3afae1 | ||
|
|
710f7bff27 | ||
|
|
6466099ada | ||
|
|
72fc43bd10 | ||
|
|
148fd8579e | ||
|
|
ecb9ecee37 | ||
|
|
b249633b5a | ||
|
|
7515c10a69 | ||
|
|
cf551180e7 | ||
|
|
9f18ae0e4b | ||
|
|
ca1a8e9117 | ||
|
|
5bef5f15ff | ||
|
|
d57adabf0c | ||
|
|
dcd70b62eb | ||
|
|
63f2c4ea00 | ||
|
|
0dec8ad9f4 | ||
|
|
cbc650a9aa | ||
|
|
5e559df1d0 | ||
|
|
d7ad16e6d6 | ||
|
|
008bee5d50 | ||
|
|
375c6605b6 | ||
|
|
fcae03a955 | ||
|
|
5eaa3207b7 | ||
|
|
d1c0ed9bf3 |
65
.circleci/config.yml
Normal file
65
.circleci/config.yml
Normal file
@@ -0,0 +1,65 @@
|
||||
version: 2
|
||||
|
||||
.job_template: &job_template
|
||||
machine:
|
||||
enabled: true
|
||||
steps:
|
||||
- checkout
|
||||
- run:
|
||||
command: ./circle-test.sh
|
||||
- persist_to_workspace:
|
||||
root: .
|
||||
paths: [ 'ci-workspace' ]
|
||||
|
||||
jobs:
|
||||
amd64:
|
||||
<<: *job_template
|
||||
arm64:
|
||||
<<: *job_template
|
||||
armhf:
|
||||
<<: *job_template
|
||||
armel:
|
||||
<<: *job_template
|
||||
deploy:
|
||||
docker:
|
||||
- image: circleci/python:latest
|
||||
steps:
|
||||
- setup_remote_docker:
|
||||
version: 18.06.0-ce
|
||||
- checkout
|
||||
- attach_workspace:
|
||||
at: .
|
||||
- run:
|
||||
command: ./circle-deploy.sh
|
||||
|
||||
|
||||
|
||||
workflows:
|
||||
version: 2
|
||||
build:
|
||||
jobs:
|
||||
- amd64:
|
||||
filters:
|
||||
tags:
|
||||
only: /^v.*/
|
||||
- arm64:
|
||||
filters:
|
||||
tags:
|
||||
only: /^v.*/
|
||||
- armhf:
|
||||
filters:
|
||||
tags:
|
||||
only: /^v.*/
|
||||
- armel:
|
||||
filters:
|
||||
tags:
|
||||
only: /^v.*/
|
||||
- deploy:
|
||||
requires:
|
||||
- amd64
|
||||
- arm64
|
||||
- armhf
|
||||
- armel
|
||||
filters:
|
||||
tags:
|
||||
only: /^v.*/
|
||||
@@ -1 +1,5 @@
|
||||
**/*.sw*
|
||||
.tox
|
||||
.git
|
||||
**/__pycache__
|
||||
.pipenv
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -3,8 +3,11 @@
|
||||
.cache
|
||||
__pycache__
|
||||
.tox
|
||||
.pipenv
|
||||
.eggs
|
||||
UNKNOWN.egg-info
|
||||
.env
|
||||
ci-workspace
|
||||
|
||||
# WIP/test stuff
|
||||
doco.yml
|
||||
|
||||
24
.travis.yml
24
.travis.yml
@@ -1,24 +0,0 @@
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
language: python
|
||||
env:
|
||||
global:
|
||||
- QEMU_VER=v2.9.1
|
||||
matrix:
|
||||
- ARCH=amd64
|
||||
- ARCH=armhf
|
||||
- ARCH=aarch64
|
||||
python:
|
||||
- "2.7"
|
||||
install:
|
||||
- pip install -r requirements.txt
|
||||
script:
|
||||
# prepare qemu
|
||||
- docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
# generate and build dockerfile
|
||||
- ./Dockerfile.py --arch=${ARCH} -v
|
||||
- docker images
|
||||
# run docker build & tests
|
||||
# 2 parallel max b/c race condition with docker fixture (I think?)
|
||||
- py.test -vv -n 2 -k "${ARCH}" ./test/
|
||||
5
CHANGELOG.md
Normal file
5
CHANGELOG.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Docker Pi-Hole changelog
|
||||
|
||||
Notes about releases will be documented on [docker-pi-hole's github releases page](https://github.com/pi-hole/docker-pi-hole/releases). Breaking changes will be copied to the top of the docker repo's README file to assist with common upgrade issues.
|
||||
|
||||
See the [Pi-hole releases](https://github.com/pi-hole/pi-hole/releases) for details on updates unreleated to docker image releases
|
||||
117
Dockerfile.py
117
Dockerfile.py
@@ -1,33 +1,34 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
#!/usr/bin/env python3
|
||||
""" Dockerfile.py - generates and build dockerfiles
|
||||
|
||||
Usage:
|
||||
Dockerfile.py [--arch=<arch> ...] [--skip=<arch> ...] [-v] [--no-build | --no-generate] [--no-cache]
|
||||
Dockerfile.py [--hub_tag=<tag>] [--arch=<arch> ...] [-v] [-t] [--no-build | --no-generate] [--no-cache]
|
||||
|
||||
Options:
|
||||
--no-build Skip building the docker images
|
||||
--no-cache Build without using any cache data
|
||||
--no-generate Skip generating Dockerfiles from template
|
||||
--arch=<arch> What Architecture(s) to build [default: amd64 armel armhf aarch64]
|
||||
--skip=<arch> What Architectures(s) to skip [default: None]
|
||||
--hub_tag=<tag> What the Docker Hub Image should be tagged as [default: None]
|
||||
--arch=<arch> What Architecture(s) to build [default: amd64 armel armhf arm64]
|
||||
-v Print docker's command output [default: False]
|
||||
-t Print docker's build time [default: False]
|
||||
|
||||
Examples:
|
||||
"""
|
||||
|
||||
from docopt import docopt
|
||||
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from docopt import docopt
|
||||
import os
|
||||
import testinfra
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
base_vars = {
|
||||
'name': 'pihole/pihole',
|
||||
'maintainer' : 'adam@diginc.us',
|
||||
's6_version' : 'v1.21.4.0',
|
||||
's6_version' : 'v1.22.1.0',
|
||||
}
|
||||
|
||||
os_base_vars = {
|
||||
@@ -35,45 +36,53 @@ os_base_vars = {
|
||||
'php_error_log': '/var/log/lighttpd/error.log'
|
||||
}
|
||||
|
||||
__version__ = None
|
||||
dot = os.path.abspath('.')
|
||||
with open('{}/VERSION'.format(dot), 'r') as v:
|
||||
raw_version = v.read().strip()
|
||||
__version__ = raw_version.replace('release/', 'release-')
|
||||
|
||||
images = {
|
||||
'v4.0': [
|
||||
__version__: [
|
||||
{
|
||||
'base': 'debian:stretch',
|
||||
'arch': 'amd64'
|
||||
'base': 'pihole/debian-base:latest',
|
||||
'arch': 'amd64',
|
||||
's6arch': 'amd64',
|
||||
},
|
||||
{
|
||||
'base': 'multiarch/debian-debootstrap:armel-stretch-slim',
|
||||
'arch': 'armel'
|
||||
'arch': 'armel',
|
||||
's6arch': 'arm',
|
||||
},
|
||||
{
|
||||
'base': 'multiarch/debian-debootstrap:armhf-stretch-slim',
|
||||
'arch': 'armhf'
|
||||
'arch': 'arm',
|
||||
's6arch' : 'arm',
|
||||
},
|
||||
{
|
||||
'base': 'multiarch/debian-debootstrap:arm64-stretch-slim',
|
||||
'arch': 'aarch64'
|
||||
'arch': 'arm64',
|
||||
's6arch' : 'aarch64',
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
def generate_dockerfiles(args):
|
||||
if args['--no-generate']:
|
||||
print " ::: Skipping Dockerfile generation"
|
||||
print(" ::: Skipping Dockerfile generation")
|
||||
return
|
||||
|
||||
for version, archs in images.iteritems():
|
||||
for version, archs in images.items():
|
||||
for image in archs:
|
||||
if image['arch'] not in args['--arch'] or image['arch'] in args['--skip']:
|
||||
return
|
||||
s6arch = image['arch']
|
||||
if image['arch'] == 'armel':
|
||||
s6arch = 'arm'
|
||||
if image['arch'] not in args['--arch']:
|
||||
continue
|
||||
s6arch = image['s6arch'] if image['s6arch'] else image['arch']
|
||||
merged_data = dict(
|
||||
{ 'version': version }.items() +
|
||||
base_vars.items() +
|
||||
os_base_vars.items() +
|
||||
image.items() +
|
||||
{ 's6arch': s6arch }.items()
|
||||
list({ 'version': version }.items()) +
|
||||
list(base_vars.items()) +
|
||||
list(os_base_vars.items()) +
|
||||
list(image.items()) +
|
||||
list({ 's6arch': s6arch }.items())
|
||||
)
|
||||
j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
|
||||
trim_blocks=True)
|
||||
@@ -86,43 +95,55 @@ def generate_dockerfiles(args):
|
||||
|
||||
def build_dockerfiles(args):
|
||||
if args['--no-build']:
|
||||
print " ::: Skipping Dockerfile building"
|
||||
print(" ::: Skipping Dockerfile building")
|
||||
return
|
||||
|
||||
for arch in args['--arch']:
|
||||
# TODO: include from external .py that can be shared with Dockerfile.py / Tests / deploy scripts '''
|
||||
build('pihole', 'v4.0', arch, args)
|
||||
build('pihole', arch, args)
|
||||
|
||||
|
||||
def build(docker_repo, version, arch, args):
|
||||
run_local = testinfra.get_backend(
|
||||
"local://"
|
||||
).get_module("Command").run
|
||||
def run_and_stream_command_output(command, args):
|
||||
print("Running", command)
|
||||
build_result = subprocess.Popen(command.split(), stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
bufsize=1, universal_newlines=True)
|
||||
if args['-v']:
|
||||
while build_result.poll() is None:
|
||||
for line in build_result.stdout:
|
||||
print(line, end='')
|
||||
build_result.wait()
|
||||
if build_result.returncode != 0:
|
||||
print(" ::: Error running".format(command))
|
||||
print(build_result.stderr)
|
||||
|
||||
|
||||
def build(docker_repo, arch, args):
|
||||
dockerfile = 'Dockerfile_{}'.format(arch)
|
||||
repo_tag = '{}:{}_{}'.format(docker_repo, version, arch)
|
||||
repo_tag = '{}:{}_{}'.format(docker_repo, __version__, arch)
|
||||
cached_image = '{}/{}'.format('pihole', repo_tag)
|
||||
print(" ::: Building {}".format(repo_tag))
|
||||
time=''
|
||||
if args['-t']:
|
||||
time='time '
|
||||
no_cache = ''
|
||||
if args['--no-cache']:
|
||||
no_cache = '--no-cache'
|
||||
build_command = 'docker build {no_cache} --pull --cache-from="{cache},{create_tag}" -f {dockerfile} -t {create_tag} .'\
|
||||
.format(no_cache=no_cache, cache=cached_image, dockerfile=dockerfile, create_tag=repo_tag)
|
||||
print " ::: Building {} into {}".format(dockerfile, repo_tag)
|
||||
build_command = '{time}docker build {no_cache} --pull --cache-from="{cache},{create_tag}" -f {dockerfile} -t {create_tag} .'\
|
||||
.format(time=time, no_cache=no_cache, cache=cached_image, dockerfile=dockerfile, create_tag=repo_tag)
|
||||
print(" ::: Building {} into {}".format(dockerfile, repo_tag))
|
||||
run_and_stream_command_output(build_command, args)
|
||||
if args['-v']:
|
||||
print build_command, '\n'
|
||||
build_result = run_local(build_command)
|
||||
if args['-v']:
|
||||
print build_result.stdout
|
||||
print build_result.stderr
|
||||
if build_result.rc != 0:
|
||||
print " ::: Building {} encountered an error".format(dockerfile)
|
||||
print build_result.stderr
|
||||
assert build_result.rc == 0
|
||||
print(build_command, '\n')
|
||||
if args['--hub_tag']:
|
||||
hub_tag_command = "{time}docker tag {create_tag} {hub_tag}"\
|
||||
.format(time=time, create_tag=repo_tag, hub_tag=args['--hub_tag'])
|
||||
print(" ::: Tagging {} into {}".format(repo_tag, args['--hub_tag']))
|
||||
run_and_stream_command_output(hub_tag_command, args)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = docopt(__doc__, version='Dockerfile 1.0')
|
||||
# print args
|
||||
args = docopt(__doc__, version='Dockerfile 1.1')
|
||||
if args['-v']:
|
||||
print(args)
|
||||
|
||||
generate_dockerfiles(args)
|
||||
build_dockerfiles(args)
|
||||
|
||||
8
Dockerfile.sh
Executable file
8
Dockerfile.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env sh
|
||||
# alpine sh only
|
||||
|
||||
set -eux
|
||||
./Dockerfile.py -v --arch="${ARCH}" --hub_tag="${ARCH_IMAGE}"
|
||||
# TODO: Add junitxml output and have circleci consume it
|
||||
# 2 parallel max b/c race condition with docker fixture (I think?)
|
||||
py.test -vv -n 2 -k "${ARCH}" ./test/
|
||||
@@ -1,16 +1,14 @@
|
||||
FROM {{ pihole.base }}
|
||||
|
||||
COPY install.sh /usr/local/bin/docker-install.sh
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
ENV ARCH {{ pihole.arch }}
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/{{ pihole.s6_version }}/s6-overlay-{{ pihole.s6arch }}.tar.gz
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl procps && \
|
||||
curl -L -s $S6OVERLAY_RELEASE | tar xvzf - -C / && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/* && \
|
||||
mv /init /s6-init
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
|
||||
RUN apt-get update && bash -ex docker-install.sh 2>&1
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
@@ -34,16 +32,18 @@ EXPOSE 443
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION {{ pihole.version }}
|
||||
ENV ARCH {{ pihole.arch }}
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="{{ pihole.name }}:{{ pihole.version }}_{{ pihole.arch }}"
|
||||
LABEL maintainer="{{ pihole.maintainer }}"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
HEALTHCHECK CMD dig @127.0.0.1 pi.hole || exit 1
|
||||
HEALTHCHECK CMD dig +norecurse +retry=0 @127.0.0.1 pi.hole || exit 1
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
FROM debian:stretch
|
||||
FROM pihole/debian-base:latest
|
||||
|
||||
COPY install.sh /usr/local/bin/docker-install.sh
|
||||
ENV ARCH amd64
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.22.1.0/s6-overlay-amd64.tar.gz
|
||||
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.4.0/s6-overlay-amd64.tar.gz
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl procps && \
|
||||
curl -L -s $S6OVERLAY_RELEASE | tar xvzf - -C / && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/* && \
|
||||
mv /init /s6-init
|
||||
|
||||
RUN apt-get update && bash -ex docker-install.sh 2>&1
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
@@ -34,13 +32,15 @@ EXPOSE 443
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
ENV FTL_CMD no-daemon
|
||||
|
||||
ENV VERSION v4.0
|
||||
ENV ARCH amd64
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION v4.4
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="pihole/pihole:v4.0_amd64"
|
||||
LABEL image="pihole/pihole:v4.4_amd64"
|
||||
LABEL maintainer="adam@diginc.us"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
FROM multiarch/debian-debootstrap:arm64-stretch-slim
|
||||
|
||||
COPY install.sh /usr/local/bin/docker-install.sh
|
||||
ENV ARCH arm64
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.22.1.0/s6-overlay-aarch64.tar.gz
|
||||
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.4.0/s6-overlay-aarch64.tar.gz
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl procps && \
|
||||
curl -L -s $S6OVERLAY_RELEASE | tar xvzf - -C / && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/* && \
|
||||
mv /init /s6-init
|
||||
|
||||
RUN apt-get update && bash -ex docker-install.sh 2>&1
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
@@ -34,13 +32,15 @@ EXPOSE 443
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
ENV FTL_CMD no-daemon
|
||||
|
||||
ENV VERSION v4.0
|
||||
ENV ARCH aarch64
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION v4.4
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="pihole/pihole:v4.0_aarch64"
|
||||
LABEL image="pihole/pihole:v4.4_arm64"
|
||||
LABEL maintainer="adam@diginc.us"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
FROM multiarch/debian-debootstrap:armel-stretch-slim
|
||||
|
||||
COPY install.sh /usr/local/bin/docker-install.sh
|
||||
ENV ARCH armel
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.22.1.0/s6-overlay-arm.tar.gz
|
||||
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.4.0/s6-overlay-arm.tar.gz
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl procps && \
|
||||
curl -L -s $S6OVERLAY_RELEASE | tar xvzf - -C / && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/* && \
|
||||
mv /init /s6-init
|
||||
|
||||
RUN apt-get update && bash -ex docker-install.sh 2>&1
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
@@ -34,13 +32,15 @@ EXPOSE 443
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
ENV FTL_CMD no-daemon
|
||||
|
||||
ENV VERSION v4.0
|
||||
ENV ARCH armel
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION v4.4
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="pihole/pihole:v4.0_armel"
|
||||
LABEL image="pihole/pihole:v4.4_armel"
|
||||
LABEL maintainer="adam@diginc.us"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
|
||||
@@ -1,16 +1,14 @@
|
||||
FROM multiarch/debian-debootstrap:armhf-stretch-slim
|
||||
|
||||
COPY install.sh /usr/local/bin/docker-install.sh
|
||||
ENV ARCH armhf
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.7.0/s6-overlay-armhf.tar.gz
|
||||
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.4.0/s6-overlay-armhf.tar.gz
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y curl procps && \
|
||||
curl -L -s $S6OVERLAY_RELEASE | tar xvzf - -C / && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/* && \
|
||||
mv /init /s6-init
|
||||
|
||||
RUN apt-get update && bash -ex docker-install.sh 2>&1
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
@@ -34,13 +32,15 @@ EXPOSE 443
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
ENV FTL_CMD no-daemon
|
||||
|
||||
ENV VERSION v4.0
|
||||
ENV ARCH armhf
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION v4.4
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="pihole/pihole:v4.0_armhf"
|
||||
LABEL image="pihole/pihole:v4.4_armhf"
|
||||
LABEL maintainer="adam@diginc.us"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
|
||||
23
Dockerfile_build
Normal file
23
Dockerfile_build
Normal file
@@ -0,0 +1,23 @@
|
||||
FROM docker:latest
|
||||
|
||||
# Based on https://github.com/Ilhicas/alpine-pipenv
|
||||
ARG packages
|
||||
RUN apk --update add python3 python3-dev curl gcc make \
|
||||
musl-dev libffi-dev openssl-dev ${packages} \
|
||||
&& rm -rf /var/cache/apk/* \
|
||||
&& pip3 install -U pip pipenv
|
||||
|
||||
|
||||
# -v "$(pwd):/$(pwd)" -w "$(pwd)" to prevent nested docker path confusion
|
||||
COPY ./Dockerfile.sh /usr/local/bin/
|
||||
COPY Pipfile* /root/
|
||||
WORKDIR /root
|
||||
|
||||
RUN pipenv install --system \
|
||||
&& sed -i 's|/bin/sh|/bin/bash|g' /usr/lib/python3.8/site-packages/testinfra/backend/docker.py
|
||||
|
||||
|
||||
RUN echo "set -ex && Dockerfile.sh && \$@" > /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
ENTRYPOINT entrypoint.sh
|
||||
CMD Dockerfile.sh
|
||||
63
Pipfile
Normal file
63
Pipfile
Normal file
@@ -0,0 +1,63 @@
|
||||
[[source]]
|
||||
name = "pypi"
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
|
||||
[dev-packages]
|
||||
|
||||
[packages]
|
||||
apipkg = "==1.5"
|
||||
atomicwrites = "==1.3.0"
|
||||
attrs = "==19.3.0"
|
||||
bcrypt = "==3.1.7"
|
||||
cached-property = "==1.5.1"
|
||||
certifi = "==2019.11.28"
|
||||
cffi = "==1.13.2"
|
||||
chardet = "==3.0.4"
|
||||
configparser = "==4.0.2"
|
||||
contextlib2 = "==0.6.0.post1"
|
||||
coverage = "==5.0.1"
|
||||
cryptography = "==2.8"
|
||||
docker = "==4.1.0"
|
||||
dockerpty = "==0.4.1"
|
||||
docopt = "==0.6.2"
|
||||
enum34 = "==1.1.6"
|
||||
execnet = "==1.7.1"
|
||||
filelock = "==3.0.12"
|
||||
funcsigs = "==1.0.2"
|
||||
idna = "==2.8"
|
||||
importlib-metadata = "==1.3.0"
|
||||
ipaddress = "==1.0.23"
|
||||
jsonschema = "==3.2.0"
|
||||
more-itertools = "==5.0.0"
|
||||
pathlib2 = "==2.3.5"
|
||||
pluggy = "==0.13.1"
|
||||
py = "==1.8.1"
|
||||
pycparser = "==2.19"
|
||||
pyparsing = "==2.4.6"
|
||||
pyrsistent = "==0.15.6"
|
||||
pytest = "==4.6.8"
|
||||
pytest-cov = "==2.8.1"
|
||||
pytest-forked = "==1.1.3"
|
||||
pytest-xdist = "==1.31.0"
|
||||
requests = "==2.22.0"
|
||||
scandir = "==1.10.0"
|
||||
six = "==1.13.0"
|
||||
subprocess32 = "==3.5.4"
|
||||
testinfra = "==3.3.0"
|
||||
texttable = "==1.6.2"
|
||||
toml = "==0.10.0"
|
||||
tox = "==3.14.3"
|
||||
urllib3 = "==1.25.7"
|
||||
virtualenv = "==16.7.9"
|
||||
wcwidth = "==0.1.7"
|
||||
zipp = "==0.6.0"
|
||||
"backports.shutil_get_terminal_size" = "==1.0.0"
|
||||
"backports.ssl_match_hostname" = "==3.7.0.1"
|
||||
Jinja2 = "==2.10.3"
|
||||
MarkupSafe = "==1.1.1"
|
||||
PyYAML = "==5.2"
|
||||
websocket_client = "==0.57.0"
|
||||
|
||||
[requires]
|
||||
python_version = "3.8"
|
||||
581
Pipfile.lock
generated
Normal file
581
Pipfile.lock
generated
Normal file
@@ -0,0 +1,581 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "ee7705112b315cad899e08bd6eac8f47e9a200a0d47a1920cc192995b79f8673"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.8"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"apipkg": {
|
||||
"hashes": [
|
||||
"sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6",
|
||||
"sha256:58587dd4dc3daefad0487f6d9ae32b4542b185e1c36db6993290e7c41ca2b47c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.5"
|
||||
},
|
||||
"atomicwrites": {
|
||||
"hashes": [
|
||||
"sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4",
|
||||
"sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c",
|
||||
"sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==19.3.0"
|
||||
},
|
||||
"backports.shutil-get-terminal-size": {
|
||||
"hashes": [
|
||||
"sha256:0975ba55054c15e346944b38956a4c9cbee9009391e41b86c68990effb8c1f64",
|
||||
"sha256:713e7a8228ae80341c70586d1cc0a8caa5207346927e23d09dcbcaf18eadec80"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.0"
|
||||
},
|
||||
"backports.ssl-match-hostname": {
|
||||
"hashes": [
|
||||
"sha256:bb82e60f9fbf4c080eabd957c39f0641f0fc247d9a16e31e26d594d8f42b9fd2"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.7.0.1"
|
||||
},
|
||||
"bcrypt": {
|
||||
"hashes": [
|
||||
"sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89",
|
||||
"sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42",
|
||||
"sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294",
|
||||
"sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161",
|
||||
"sha256:6305557019906466fc42dbc53b46da004e72fd7a551c044a827e572c82191752",
|
||||
"sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31",
|
||||
"sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5",
|
||||
"sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c",
|
||||
"sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0",
|
||||
"sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de",
|
||||
"sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e",
|
||||
"sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052",
|
||||
"sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09",
|
||||
"sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105",
|
||||
"sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133",
|
||||
"sha256:ce4e4f0deb51d38b1611a27f330426154f2980e66582dc5f438aad38b5f24fc1",
|
||||
"sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7",
|
||||
"sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.1.7"
|
||||
},
|
||||
"cached-property": {
|
||||
"hashes": [
|
||||
"sha256:3a026f1a54135677e7da5ce819b0c690f156f37976f3e30c5430740725203d7f",
|
||||
"sha256:9217a59f14a5682da7c4b8829deadbfc194ac22e9908ccf7c8820234e80a1504"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.5.1"
|
||||
},
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:017c25db2a153ce562900032d5bc68e9f191e44e9a0f762f373977de9df1fbb3",
|
||||
"sha256:25b64c7da4cd7479594d035c08c2d809eb4aab3a26e5a990ea98cc450c320f1f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2019.11.28"
|
||||
},
|
||||
"cffi": {
|
||||
"hashes": [
|
||||
"sha256:0b49274afc941c626b605fb59b59c3485c17dc776dc3cc7cc14aca74cc19cc42",
|
||||
"sha256:0e3ea92942cb1168e38c05c1d56b0527ce31f1a370f6117f1d490b8dcd6b3a04",
|
||||
"sha256:135f69aecbf4517d5b3d6429207b2dff49c876be724ac0c8bf8e1ea99df3d7e5",
|
||||
"sha256:19db0cdd6e516f13329cba4903368bff9bb5a9331d3410b1b448daaadc495e54",
|
||||
"sha256:2781e9ad0e9d47173c0093321bb5435a9dfae0ed6a762aabafa13108f5f7b2ba",
|
||||
"sha256:291f7c42e21d72144bb1c1b2e825ec60f46d0a7468f5346841860454c7aa8f57",
|
||||
"sha256:2c5e309ec482556397cb21ede0350c5e82f0eb2621de04b2633588d118da4396",
|
||||
"sha256:2e9c80a8c3344a92cb04661115898a9129c074f7ab82011ef4b612f645939f12",
|
||||
"sha256:32a262e2b90ffcfdd97c7a5e24a6012a43c61f1f5a57789ad80af1d26c6acd97",
|
||||
"sha256:3c9fff570f13480b201e9ab69453108f6d98244a7f495e91b6c654a47486ba43",
|
||||
"sha256:415bdc7ca8c1c634a6d7163d43fb0ea885a07e9618a64bda407e04b04333b7db",
|
||||
"sha256:42194f54c11abc8583417a7cf4eaff544ce0de8187abaf5d29029c91b1725ad3",
|
||||
"sha256:4424e42199e86b21fc4db83bd76909a6fc2a2aefb352cb5414833c030f6ed71b",
|
||||
"sha256:4a43c91840bda5f55249413037b7a9b79c90b1184ed504883b72c4df70778579",
|
||||
"sha256:599a1e8ff057ac530c9ad1778293c665cb81a791421f46922d80a86473c13346",
|
||||
"sha256:5c4fae4e9cdd18c82ba3a134be256e98dc0596af1e7285a3d2602c97dcfa5159",
|
||||
"sha256:5ecfa867dea6fabe2a58f03ac9186ea64da1386af2159196da51c4904e11d652",
|
||||
"sha256:62f2578358d3a92e4ab2d830cd1c2049c9c0d0e6d3c58322993cc341bdeac22e",
|
||||
"sha256:6471a82d5abea994e38d2c2abc77164b4f7fbaaf80261cb98394d5793f11b12a",
|
||||
"sha256:6d4f18483d040e18546108eb13b1dfa1000a089bcf8529e30346116ea6240506",
|
||||
"sha256:71a608532ab3bd26223c8d841dde43f3516aa5d2bf37b50ac410bb5e99053e8f",
|
||||
"sha256:74a1d8c85fb6ff0b30fbfa8ad0ac23cd601a138f7509dc617ebc65ef305bb98d",
|
||||
"sha256:7b93a885bb13073afb0aa73ad82059a4c41f4b7d8eb8368980448b52d4c7dc2c",
|
||||
"sha256:7d4751da932caaec419d514eaa4215eaf14b612cff66398dd51129ac22680b20",
|
||||
"sha256:7f627141a26b551bdebbc4855c1157feeef18241b4b8366ed22a5c7d672ef858",
|
||||
"sha256:8169cf44dd8f9071b2b9248c35fc35e8677451c52f795daa2bb4643f32a540bc",
|
||||
"sha256:aa00d66c0fab27373ae44ae26a66a9e43ff2a678bf63a9c7c1a9a4d61172827a",
|
||||
"sha256:ccb032fda0873254380aa2bfad2582aedc2959186cce61e3a17abc1a55ff89c3",
|
||||
"sha256:d754f39e0d1603b5b24a7f8484b22d2904fa551fe865fd0d4c3332f078d20d4e",
|
||||
"sha256:d75c461e20e29afc0aee7172a0950157c704ff0dd51613506bd7d82b718e7410",
|
||||
"sha256:dcd65317dd15bc0451f3e01c80da2216a31916bdcffd6221ca1202d96584aa25",
|
||||
"sha256:e570d3ab32e2c2861c4ebe6ffcad6a8abf9347432a37608fe1fbd157b3f0036b",
|
||||
"sha256:fd43a88e045cf992ed09fa724b5315b790525f2676883a6ea64e3263bae6549d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.13.2"
|
||||
},
|
||||
"chardet": {
|
||||
"hashes": [
|
||||
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
|
||||
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.0.4"
|
||||
},
|
||||
"configparser": {
|
||||
"hashes": [
|
||||
"sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c",
|
||||
"sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==4.0.2"
|
||||
},
|
||||
"contextlib2": {
|
||||
"hashes": [
|
||||
"sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e",
|
||||
"sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.6.0.post1"
|
||||
},
|
||||
"coverage": {
|
||||
"hashes": [
|
||||
"sha256:0101888bd1592a20ccadae081ba10e8b204d20235d18d05c6f7d5e904a38fc10",
|
||||
"sha256:04b961862334687549eb91cd5178a6fbe977ad365bddc7c60f2227f2f9880cf4",
|
||||
"sha256:1ca43dbd739c0fc30b0a3637a003a0d2c7edc1dd618359d58cc1e211742f8bd1",
|
||||
"sha256:1cbb88b34187bdb841f2599770b7e6ff8e259dc3bb64fc7893acf44998acf5f8",
|
||||
"sha256:232f0b52a5b978288f0bbc282a6c03fe48cd19a04202df44309919c142b3bb9c",
|
||||
"sha256:24bcfa86fd9ce86b73a8368383c39d919c497a06eebb888b6f0c12f13e920b1a",
|
||||
"sha256:25b8f60b5c7da71e64c18888f3067d5b6f1334b9681876b2fb41eea26de881ae",
|
||||
"sha256:2714160a63da18aed9340c70ed514973971ee7e665e6b336917ff4cca81a25b1",
|
||||
"sha256:2ca2cd5264e84b2cafc73f0045437f70c6378c0d7dbcddc9ee3fe192c1e29e5d",
|
||||
"sha256:2cc707fc9aad2592fc686d63ef72dc0031fc98b6fb921d2f5395d9ab84fbc3ef",
|
||||
"sha256:348630edea485f4228233c2f310a598abf8afa5f8c716c02a9698089687b6085",
|
||||
"sha256:40fbfd6b044c9db13aeec1daf5887d322c710d811f944011757526ef6e323fd9",
|
||||
"sha256:46c9c6a1d1190c0b75ec7c0f339088309952b82ae8d67a79ff1319eb4e749b96",
|
||||
"sha256:591506e088901bdc25620c37aec885e82cc896528f28c57e113751e3471fc314",
|
||||
"sha256:5ac71bba1e07eab403b082c4428f868c1c9e26a21041436b4905c4c3d4e49b08",
|
||||
"sha256:5f622f19abda4e934938e24f1d67599249abc201844933a6f01aaa8663094489",
|
||||
"sha256:65bead1ac8c8930cf92a1ccaedcce19a57298547d5d1db5c9d4d068a0675c38b",
|
||||
"sha256:7362a7f829feda10c7265b553455de596b83d1623b3d436b6d3c51c688c57bf6",
|
||||
"sha256:7f2675750c50151f806070ec11258edf4c328340916c53bac0adbc465abd6b1e",
|
||||
"sha256:960d7f42277391e8b1c0b0ae427a214e1b31a1278de6b73f8807b20c2e913bba",
|
||||
"sha256:a50b0888d8a021a3342d36a6086501e30de7d840ab68fca44913e97d14487dc1",
|
||||
"sha256:b7dbc5e8c39ea3ad3db22715f1b5401cd698a621218680c6daf42c2f9d36e205",
|
||||
"sha256:bb3d29df5d07d5399d58a394d0ef50adf303ab4fbf66dfd25b9ef258effcb692",
|
||||
"sha256:c0fff2733f7c2950f58a4fd09b5db257b00c6fec57bf3f68c5bae004d804b407",
|
||||
"sha256:c792d3707a86c01c02607ae74364854220fb3e82735f631cd0a345dea6b4cee5",
|
||||
"sha256:c90bda74e16bcd03861b09b1d37c0a4158feda5d5a036bb2d6e58de6ff65793e",
|
||||
"sha256:cfce79ce41cc1a1dc7fc85bb41eeeb32d34a4cf39a645c717c0550287e30ff06",
|
||||
"sha256:eeafb646f374988c22c8e6da5ab9fb81367ecfe81c70c292623373d2a021b1a1",
|
||||
"sha256:f425f50a6dd807cb9043d15a4fcfba3b5874a54d9587ccbb748899f70dc18c47",
|
||||
"sha256:fcd4459fe35a400b8f416bc57906862693c9f88b66dc925e7f2a933e77f6b18b",
|
||||
"sha256:ff3936dd5feaefb4f91c8c1f50a06c588b5dc69fba4f7d9c79a6617ad80bb7df"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.0.1"
|
||||
},
|
||||
"cryptography": {
|
||||
"hashes": [
|
||||
"sha256:02079a6addc7b5140ba0825f542c0869ff4df9a69c360e339ecead5baefa843c",
|
||||
"sha256:1df22371fbf2004c6f64e927668734070a8953362cd8370ddd336774d6743595",
|
||||
"sha256:369d2346db5934345787451504853ad9d342d7f721ae82d098083e1f49a582ad",
|
||||
"sha256:3cda1f0ed8747339bbdf71b9f38ca74c7b592f24f65cdb3ab3765e4b02871651",
|
||||
"sha256:44ff04138935882fef7c686878e1c8fd80a723161ad6a98da31e14b7553170c2",
|
||||
"sha256:4b1030728872c59687badcca1e225a9103440e467c17d6d1730ab3d2d64bfeff",
|
||||
"sha256:58363dbd966afb4f89b3b11dfb8ff200058fbc3b947507675c19ceb46104b48d",
|
||||
"sha256:6ec280fb24d27e3d97aa731e16207d58bd8ae94ef6eab97249a2afe4ba643d42",
|
||||
"sha256:7270a6c29199adc1297776937a05b59720e8a782531f1f122f2eb8467f9aab4d",
|
||||
"sha256:73fd30c57fa2d0a1d7a49c561c40c2f79c7d6c374cc7750e9ac7c99176f6428e",
|
||||
"sha256:7f09806ed4fbea8f51585231ba742b58cbcfbfe823ea197d8c89a5e433c7e912",
|
||||
"sha256:90df0cc93e1f8d2fba8365fb59a858f51a11a394d64dbf3ef844f783844cc793",
|
||||
"sha256:971221ed40f058f5662a604bd1ae6e4521d84e6cad0b7b170564cc34169c8f13",
|
||||
"sha256:a518c153a2b5ed6b8cc03f7ae79d5ffad7315ad4569b2d5333a13c38d64bd8d7",
|
||||
"sha256:b0de590a8b0979649ebeef8bb9f54394d3a41f66c5584fff4220901739b6b2f0",
|
||||
"sha256:b43f53f29816ba1db8525f006fa6f49292e9b029554b3eb56a189a70f2a40879",
|
||||
"sha256:d31402aad60ed889c7e57934a03477b572a03af7794fa8fb1780f21ea8f6551f",
|
||||
"sha256:de96157ec73458a7f14e3d26f17f8128c959084931e8997b9e655a39c8fde9f9",
|
||||
"sha256:df6b4dca2e11865e6cfbfb708e800efb18370f5a46fd601d3755bc7f85b3a8a2",
|
||||
"sha256:ecadccc7ba52193963c0475ac9f6fa28ac01e01349a2ca48509667ef41ffd2cf",
|
||||
"sha256:fb81c17e0ebe3358486cd8cc3ad78adbae58af12fc2bf2bc0bb84e8090fa5ce8"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.8"
|
||||
},
|
||||
"docker": {
|
||||
"hashes": [
|
||||
"sha256:6e06c5e70ba4fad73e35f00c55a895a448398f3ada7faae072e2bb01348bafc1",
|
||||
"sha256:8f93775b8bdae3a2df6bc9a5312cce564cade58d6555f2c2570165a1270cd8a7"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==4.1.0"
|
||||
},
|
||||
"dockerpty": {
|
||||
"hashes": [
|
||||
"sha256:69a9d69d573a0daa31bcd1c0774eeed5c15c295fe719c61aca550ed1393156ce"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.4.1"
|
||||
},
|
||||
"docopt": {
|
||||
"hashes": [
|
||||
"sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.6.2"
|
||||
},
|
||||
"enum34": {
|
||||
"hashes": [
|
||||
"sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850",
|
||||
"sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a",
|
||||
"sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79",
|
||||
"sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.1.6"
|
||||
},
|
||||
"execnet": {
|
||||
"hashes": [
|
||||
"sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50",
|
||||
"sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.7.1"
|
||||
},
|
||||
"filelock": {
|
||||
"hashes": [
|
||||
"sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59",
|
||||
"sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.0.12"
|
||||
},
|
||||
"funcsigs": {
|
||||
"hashes": [
|
||||
"sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca",
|
||||
"sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.2"
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
"sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407",
|
||||
"sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.8"
|
||||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:073a852570f92da5f744a3472af1b61e28e9f78ccf0c9117658dc32b15de7b45",
|
||||
"sha256:d95141fbfa7ef2ec65cfd945e2af7e5a6ddbd7c8d9a25e66ff3be8e3daf9f60f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"ipaddress": {
|
||||
"hashes": [
|
||||
"sha256:6e0f4a39e66cb5bb9a137b00276a2eff74f93b71dcbdad6f10ff7df9d3557fcc",
|
||||
"sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.23"
|
||||
},
|
||||
"jinja2": {
|
||||
"hashes": [
|
||||
"sha256:74320bb91f31270f9551d46522e33af46a80c3d619f4a4bf42b3164d30b5911f",
|
||||
"sha256:9fe95f19286cfefaa917656583d020be14e7859c6b0252588391e47db34527de"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.10.3"
|
||||
},
|
||||
"jsonschema": {
|
||||
"hashes": [
|
||||
"sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163",
|
||||
"sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.2.0"
|
||||
},
|
||||
"markupsafe": {
|
||||
"hashes": [
|
||||
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
|
||||
"sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161",
|
||||
"sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235",
|
||||
"sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5",
|
||||
"sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff",
|
||||
"sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
|
||||
"sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1",
|
||||
"sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e",
|
||||
"sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183",
|
||||
"sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66",
|
||||
"sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1",
|
||||
"sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1",
|
||||
"sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e",
|
||||
"sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b",
|
||||
"sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905",
|
||||
"sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735",
|
||||
"sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d",
|
||||
"sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e",
|
||||
"sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d",
|
||||
"sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c",
|
||||
"sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21",
|
||||
"sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2",
|
||||
"sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5",
|
||||
"sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b",
|
||||
"sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6",
|
||||
"sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f",
|
||||
"sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f",
|
||||
"sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.1.1"
|
||||
},
|
||||
"more-itertools": {
|
||||
"hashes": [
|
||||
"sha256:38a936c0a6d98a38bcc2d03fdaaedaba9f412879461dd2ceff8d37564d6522e4",
|
||||
"sha256:c0a5785b1109a6bd7fac76d6837fd1feca158e54e521ccd2ae8bfe393cc9d4fc",
|
||||
"sha256:fe7a7cae1ccb57d33952113ff4fa1bc5f879963600ed74918f1236e212ee50b9"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.0.0"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:aec3fdbb8bc9e4bb65f0634b9f551ced63983a529d6a8931817d52fdd0816ddb",
|
||||
"sha256:fe1d8331dfa7cc0a883b49d75fc76380b2ab2734b220fbb87d774e4fd4b851f8"
|
||||
],
|
||||
"version": "==20.0"
|
||||
},
|
||||
"pathlib2": {
|
||||
"hashes": [
|
||||
"sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db",
|
||||
"sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.3.5"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
|
||||
"sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.13.1"
|
||||
},
|
||||
"py": {
|
||||
"hashes": [
|
||||
"sha256:5e27081401262157467ad6e7f851b7aa402c5852dbcb3dae06768434de5752aa",
|
||||
"sha256:c20fdd83a5dbc0af9efd622bee9a5564e278f6380fffcacc43ba6f43db2813b0"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.8.1"
|
||||
},
|
||||
"pycparser": {
|
||||
"hashes": [
|
||||
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.19"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
"sha256:4c830582a84fb022400b85429791bc551f1f4871c33f23e44f353119e92f969f",
|
||||
"sha256:c342dccb5250c08d45fd6f8b4a559613ca603b57498511740e65cd11a2e7dcec"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.4.6"
|
||||
},
|
||||
"pyrsistent": {
|
||||
"hashes": [
|
||||
"sha256:f3b280d030afb652f79d67c5586157c5c1355c9a58dfc7940566e28d28f3df1b"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.15.6"
|
||||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:6192875be8af57b694b7c4904e909680102befcb99e610ef3d9f786952f795aa",
|
||||
"sha256:f8447ebf8fd3d362868a5d3f43a9df786dfdfe9608843bd9002a2d47a104808f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==4.6.8"
|
||||
},
|
||||
"pytest-cov": {
|
||||
"hashes": [
|
||||
"sha256:cc6742d8bac45070217169f5f72ceee1e0e55b0221f54bcf24845972d3a47f2b",
|
||||
"sha256:cdbdef4f870408ebdbfeb44e63e07eb18bb4619fae852f6e760645fa36172626"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.8.1"
|
||||
},
|
||||
"pytest-forked": {
|
||||
"hashes": [
|
||||
"sha256:1805699ed9c9e60cb7a8179b8d4fa2b8898098e82d229b0825d8095f0f261100",
|
||||
"sha256:1ae25dba8ee2e56fb47311c9638f9e58552691da87e82d25b0ce0e4bf52b7d87"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.1.3"
|
||||
},
|
||||
"pytest-xdist": {
|
||||
"hashes": [
|
||||
"sha256:0f46020d3d9619e6d17a65b5b989c1ebbb58fc7b1da8fb126d70f4bac4dfeed1",
|
||||
"sha256:7dc0d027d258cd0defc618fb97055fbd1002735ca7a6d17037018cf870e24011"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.31.0"
|
||||
},
|
||||
"pyyaml": {
|
||||
"hashes": [
|
||||
"sha256:0e7f69397d53155e55d10ff68fdfb2cf630a35e6daf65cf0bdeaf04f127c09dc",
|
||||
"sha256:2e9f0b7c5914367b0916c3c104a024bb68f269a486b9d04a2e8ac6f6597b7803",
|
||||
"sha256:35ace9b4147848cafac3db142795ee42deebe9d0dad885ce643928e88daebdcc",
|
||||
"sha256:38a4f0d114101c58c0f3a88aeaa44d63efd588845c5a2df5290b73db8f246d15",
|
||||
"sha256:483eb6a33b671408c8529106df3707270bfacb2447bf8ad856a4b4f57f6e3075",
|
||||
"sha256:4b6be5edb9f6bb73680f5bf4ee08ff25416d1400fbd4535fe0069b2994da07cd",
|
||||
"sha256:7f38e35c00e160db592091751d385cd7b3046d6d51f578b29943225178257b31",
|
||||
"sha256:8100c896ecb361794d8bfdb9c11fce618c7cf83d624d73d5ab38aef3bc82d43f",
|
||||
"sha256:c0ee8eca2c582d29c3c2ec6e2c4f703d1b7f1fb10bc72317355a746057e7346c",
|
||||
"sha256:e4c015484ff0ff197564917b4b4246ca03f411b9bd7f16e02a2f586eb48b6d04",
|
||||
"sha256:ebc4ed52dcc93eeebeae5cf5deb2ae4347b3a81c3fa12b0b8c976544829396a4"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.2"
|
||||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
"sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4",
|
||||
"sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.22.0"
|
||||
},
|
||||
"scandir": {
|
||||
"hashes": [
|
||||
"sha256:2586c94e907d99617887daed6c1d102b5ca28f1085f90446554abf1faf73123e",
|
||||
"sha256:2ae41f43797ca0c11591c0c35f2f5875fa99f8797cb1a1fd440497ec0ae4b022",
|
||||
"sha256:2b8e3888b11abb2217a32af0766bc06b65cc4a928d8727828ee68af5a967fa6f",
|
||||
"sha256:2c712840c2e2ee8dfaf36034080108d30060d759c7b73a01a52251cc8989f11f",
|
||||
"sha256:4d4631f6062e658e9007ab3149a9b914f3548cb38bfb021c64f39a025ce578ae",
|
||||
"sha256:67f15b6f83e6507fdc6fca22fedf6ef8b334b399ca27c6b568cbfaa82a364173",
|
||||
"sha256:7d2d7a06a252764061a020407b997dd036f7bd6a175a5ba2b345f0a357f0b3f4",
|
||||
"sha256:8c5922863e44ffc00c5c693190648daa6d15e7c1207ed02d6f46a8dcc2869d32",
|
||||
"sha256:92c85ac42f41ffdc35b6da57ed991575bdbe69db895507af88b9f499b701c188",
|
||||
"sha256:b24086f2375c4a094a6b51e78b4cf7ca16c721dcee2eddd7aa6494b42d6d519d",
|
||||
"sha256:cb925555f43060a1745d0a321cca94bcea927c50114b623d73179189a4e100ac"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.10.0"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd",
|
||||
"sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.13.0"
|
||||
},
|
||||
"subprocess32": {
|
||||
"hashes": [
|
||||
"sha256:88e37c1aac5388df41cc8a8456bb49ebffd321a3ad4d70358e3518176de3a56b",
|
||||
"sha256:eb2937c80497978d181efa1b839ec2d9622cf9600a039a79d0e108d1f9aec79d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.5.4"
|
||||
},
|
||||
"testinfra": {
|
||||
"hashes": [
|
||||
"sha256:780e6c2ab392ea93c26cee1777c968a144c2189a56b3e239a3a66e6d256925b5",
|
||||
"sha256:c3492b39c8d2c98d8419ce1a91d7fe348213f9b98b91198d2e7e88b3954b050b"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.3.0"
|
||||
},
|
||||
"texttable": {
|
||||
"hashes": [
|
||||
"sha256:7dc282a5b22564fe0fdc1c771382d5dd9a54742047c61558e071c8cd595add86",
|
||||
"sha256:eff3703781fbc7750125f50e10f001195174f13825a92a45e9403037d539b4f4"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.6.2"
|
||||
},
|
||||
"toml": {
|
||||
"hashes": [
|
||||
"sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c",
|
||||
"sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.10.0"
|
||||
},
|
||||
"tox": {
|
||||
"hashes": [
|
||||
"sha256:06ba73b149bf838d5cd25dc30c2dd2671ae5b2757cf98e5c41a35fe449f131b3",
|
||||
"sha256:806d0a9217584558cc93747a945a9d9bff10b141a5287f0c8429a08828a22192"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.14.3"
|
||||
},
|
||||
"urllib3": {
|
||||
"hashes": [
|
||||
"sha256:a8a318824cc77d1fd4b2bec2ded92646630d7fe8619497b142c84a9e6f5a7293",
|
||||
"sha256:f3c5fd51747d450d4dcf6f923c81f78f811aab8205fda64b0aba34a4e48b0745"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.25.7"
|
||||
},
|
||||
"virtualenv": {
|
||||
"hashes": [
|
||||
"sha256:0d62c70883c0342d59c11d0ddac0d954d0431321a41ab20851facf2b222598f3",
|
||||
"sha256:55059a7a676e4e19498f1aad09b8313a38fcc0cdbe4fdddc0e9b06946d21b4bb"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==16.7.9"
|
||||
},
|
||||
"wcwidth": {
|
||||
"hashes": [
|
||||
"sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e",
|
||||
"sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.1.7"
|
||||
},
|
||||
"websocket-client": {
|
||||
"hashes": [
|
||||
"sha256:0fc45c961324d79c781bab301359d5a1b00b13ad1b10415a4780229ef71a5549",
|
||||
"sha256:d735b91d6d1692a6a181f2a8c9e0238e5f6373356f561bb9dc4c7af36f452010"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.57.0"
|
||||
},
|
||||
"zipp": {
|
||||
"hashes": [
|
||||
"sha256:3718b1cbcd963c7d4c5511a8240812904164b7f381b647143a89d3b98f9bcd8e",
|
||||
"sha256:f06903e9f1f43b12d371004b4ac7b06ab39a44adc747266928ae6debfa7b3335"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.6.0"
|
||||
}
|
||||
},
|
||||
"develop": {}
|
||||
}
|
||||
221
README.md
221
README.md
@@ -1,90 +1,138 @@
|
||||
# Docker Pi-hole
|
||||
|
||||
<p align="center">
|
||||
<a href="https://pi-hole.net"><img src="https://pi-hole.github.io/graphics/Vortex/Vortex_with_text.png" width="150" height="255" alt="Pi-hole"></a><br/>
|
||||
</p>
|
||||
<!-- Delete above HTML and insert markdown for dockerhub :  -->
|
||||
|
||||
## Quick Start
|
||||
|
||||
[Docker-compose](https://docs.docker.com/compose/install/) example:
|
||||
|
||||
```yaml
|
||||
version: "3"
|
||||
|
||||
# More info at https://github.com/pi-hole/docker-pi-hole/ and https://docs.pi-hole.net/
|
||||
services:
|
||||
pihole:
|
||||
container_name: pihole
|
||||
image: pihole/pihole:latest
|
||||
ports:
|
||||
- "53:53/tcp"
|
||||
- "53:53/udp"
|
||||
- "67:67/udp"
|
||||
- "80:80/tcp"
|
||||
- "443:443/tcp"
|
||||
environment:
|
||||
TZ: 'America/Chicago'
|
||||
# WEBPASSWORD: 'set a secure password here or it will be random'
|
||||
# Volumes store your data between container upgrades
|
||||
volumes:
|
||||
- './etc-pihole/:/etc/pihole/'
|
||||
- './etc-dnsmasq.d/:/etc/dnsmasq.d/'
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
- 1.1.1.1
|
||||
# Recommended but not required (DHCP needs NET_ADMIN)
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
restart: unless-stopped
|
||||
```
|
||||
|
||||
[Here is an equivalent docker run script](https://github.com/pi-hole/docker-pi-hole/blob/master/docker_run.sh).
|
||||
|
||||
## Upgrade Notices:
|
||||
|
||||
### Docker Pi-Hole v4.2.2
|
||||
|
||||
- ServerIP no longer a required enviroment variable **unless you run network 'host' mode**! Feel free to remove it unless you need it to customize lighttpd
|
||||
- --cap-add NET_ADMIN no longer required unless using DHCP, leaving in examples for consistency
|
||||
|
||||
### Docker Pi-Hole v4.1.1+
|
||||
|
||||
Starting with the v4.1.1 release your Pi-hole container may encounter issues starting the DNS service unless ran with the following setting:
|
||||
|
||||
- `--dns=127.0.0.1 --dns=1.1.1.1` The second server can be any DNS IP of your choosing, but the **first dns must be 127.0.0.1**
|
||||
- A WARNING stating "Misconfigured DNS in /etc/resolv.conf" may show in docker logs without this.
|
||||
- 4.1 required --cap-add NET_ADMIN until 4.2.1-1
|
||||
|
||||
These are the raw [docker run cli](https://docs.docker.com/engine/reference/commandline/cli/) versions of the commands. We provide no official support for docker GUIs but the community forums may be able to help if you do not see a place for these settings. Remember, always consult your manual too!
|
||||
|
||||
## Overview
|
||||
|
||||
#### Renamed from `diginc/pi-hole` to `pihole/pihole`
|
||||
|
||||
A [Docker](https://www.docker.com/what-docker) project to make a lightweight x86 and ARM container with [Pi-hole](https://pi-hole.net) functionality.
|
||||
|
||||
1) Install docker for your [x86-64 system](https://www.docker.com/community-edition) or [ARMv7 system](https://www.raspberrypi.org/blog/docker-comes-to-raspberry-pi/) using those links.
|
||||
2) Use the appropriate tag (x86 can use default tag, ARM users need to use images from [pihole/pihole:v4.0_armhf](https://store.docker.com/community/images/pihole/pihole/tags)) in the below `docker run` command
|
||||
1) Install docker for your [x86-64 system](https://www.docker.com/community-edition) or [ARMv7 system](https://www.raspberrypi.org/blog/docker-comes-to-raspberry-pi/) using those links. [Docker-compose](https://docs.docker.com/compose/install/) is also recommended.
|
||||
2) Use the above quick start example, customize if desired.
|
||||
3) Enjoy!
|
||||
|
||||
[](https://travis-ci.org/pi-hole/docker-pi-hole) [](https://store.docker.com/community/images/pihole/pihole) [](https://store.docker.com/community/images/pihole/pihole)
|
||||
|
||||
[](https://gitter.im/pihole/docker-pi-hole?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
|
||||
|
||||
## Running Pi-hole Docker
|
||||
|
||||
[DockerCloud](https://store.docker.com/community/images/pihole/pihole) automatically builds the latest docker-pi-hole changes into images which can easily be pulled and ran with a simple `docker run` command. Changes and updates under development or testing can be found in the [dev tags](#development) section.
|
||||
This container uses 2 popular ports, port 53 and port 80, so **may conflict with existing applications ports**. If you have no other services or docker containers using port 53/80 (if you do, keep reading below for a reverse proxy example), the minimum arguments required to run this container are in the script [docker_run.sh](https://github.com/pi-hole/docker-pi-hole/blob/master/docker_run.sh)
|
||||
|
||||
One crucial thing to know before starting is this container needs port 53 and port 80, two very popular ports that may conflict with existing applications. If you have no other services or docker containers using port 53/80 (if you do, keep reading below for a reverse proxy example), the minimum arguments required to run this container are in the script [docker_run.sh](https://github.com/pi-hole/docker-pi-hole/blob/master/docker_run.sh) or summarized here:
|
||||
If you're using a Red Hat based distribution with an SELinux Enforcing policy add `:z` to line with volumes like so:
|
||||
|
||||
```
|
||||
IP_LOOKUP="$(ip route get 8.8.8.8 | awk '{ print $NF; exit }')" # May not work for VPN / tun0
|
||||
IPv6_LOOKUP="$(ip -6 route get 2001:4860:4860::8888 | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')" # May not work for VPN / tun0
|
||||
IP="${IP:-$IP_LOOKUP}" # use $IP, if set, otherwise IP_LOOKUP
|
||||
IPv6="${IPv6:-$IPv6_LOOKUP}" # use $IPv6, if set, otherwise IP_LOOKUP
|
||||
DOCKER_CONFIGS="$(pwd)" # Default of directory you run this from, update to where ever.
|
||||
|
||||
echo "### Make sure your IPs are correct, hard code ServerIP ENV VARs if necessary\nIP: ${IP}\nIPv6: ${IPv6}"
|
||||
docker run -d \
|
||||
--name pihole \
|
||||
-p 53:53/tcp -p 53:53/udp \
|
||||
-p 67:67/udp \
|
||||
-p 80:80 \
|
||||
-p 443:443 \
|
||||
-v "${DOCKER_CONFIGS}/pihole/:/etc/pihole/" \
|
||||
-v "${DOCKER_CONFIGS}/dnsmasq.d/:/etc/dnsmasq.d/" \
|
||||
-e ServerIP="${IP}" \
|
||||
-e ServerIPv6="${IPv6}" \
|
||||
--restart=unless-stopped \
|
||||
--cap-add=NET_ADMIN \
|
||||
pihole/pihole:latest
|
||||
|
||||
echo -n "Your password for https://${IP}/admin/ is "
|
||||
docker logs pihole 2> /dev/null | grep 'password:'
|
||||
-v "$(pwd)/etc-pihole/:/etc/pihole/:z" \
|
||||
-v "$(pwd)/etc-dnsmasq.d/:/etc/dnsmasq.d/:z" \
|
||||
```
|
||||
|
||||
**This is just an example and might need changing.** Volumes are stored in the directory `$DOCKER_CONFIGS` and are recommended for persisting data across docker re-creations for updating images. As mentioned on line 2, the auto `IP_LOOKUP` variable may not work for VPN tunnel interfaces.
|
||||
Volumes are recommended for persisting data across container re-creations for updating images. The IP lookup variables may not work for everyone, please review their values and hard code IP and IPv6 if necessary.
|
||||
|
||||
Two recently added ports to the `docker run` and `docker-compose` examples are port 67 and 443. Port 67 is for users who wish to have Pi-hole run a DHCP server. Port 443 is to provide a sinkhole for ads that use SSL. If only port 80 is used, then blocked HTTPS queries will fail to connect to port 443 and may cause long loading times. Rejecting 443 on your firewall can also serve this same purpose. Ubuntu firewall example: `sudo ufw reject https`
|
||||
Port 443 is to provide a sinkhole for ads that use SSL. If only port 80 is used, then blocked HTTPS queries will fail to connect to port 443 and may cause long loading times. Rejecting 443 on your firewall can also serve this same purpose. Ubuntu firewall example: `sudo ufw reject https`
|
||||
|
||||
**Automatic Ad List Updates** - since the 3.0+ release, `cron` is baked into the container and will grab the newest versions of your lists and flush your logs. **Set your TZ** environment variable to make sure the midnight log rotation syncs up with your timezone's midnight.
|
||||
|
||||
## Running DHCP from Docker Pi-Hole
|
||||
|
||||
There are multiple different ways to run DHCP from within your Docker Pi-hole container but it is slightly more advanced and one size does not fit all. DHCP and Docker's multiple network modes are covered in detail on our docs site: [Docker DHCP and Network Modes](https://docs.pi-hole.net/docker/DHCP/)
|
||||
|
||||
## Environment Variables
|
||||
|
||||
There are other environment variables if you want to customize various things inside the docker container:
|
||||
|
||||
| Docker Environment Var. | Description |
|
||||
| ----------------------- | ----------- |
|
||||
| `-e ServerIP=<Host's IP>`<br/> **Required** | Set to your server's external IP to block ads fully
|
||||
| `-e ServerIPv6=<Host's IPv6>`<br/> *Required if using IPv6* | **If you have a v6 network** set to your server's external IPv6 to block IPv6 ads fully
|
||||
| `-e TZ=<Timezone>`<br/> **Recommended** *Default: UTC* | Set your [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to make sure logs rotate at local midnight instead of at UTC midnight.
|
||||
| `-e WEBPASSWORD=<Admin password>`<br/> **Recommended** *Default: random* | http://pi.hole/admin password. Run `docker logs pihole \| grep random` to find your random pass.
|
||||
| `-e DNS1=<IP>`<br/> *Optional* *Default: 8.8.8.8* | Primary upstream DNS provider, default is google DNS
|
||||
| `-e DNS2=<IP>`<br/> *Optional* *Default: 8.8.4.4* | Secondary upstream DNS provider, default is google DNS, `no` if only one DNS should used
|
||||
| `-e VIRTUAL_HOST=<Custom Hostname>`<br/> *Optional* *Default: $ServerIP* | What your web server 'virtual host' is, accessing admin through this Hostname/IP allows you to make changes to the whitelist / blacklists in addition to the default 'http://pi.hole/admin/' address
|
||||
| `-e IPv6=<True\|False>`<br/> *Optional* *Default: True* | For unraid compatibility, strips out all the IPv6 configuration from DNS/Web services when false.
|
||||
| `-e INTERFACE=<NIC>`<br/> *Advanced/Optional* | The default works fine with our basic example docker run commands. If you're trying to use DHCP with `--net host` mode then you may have to customize this or DNSMASQ_LISTENING.
|
||||
| `-e DNSMASQ_LISTENING=<local\|all\|NIC>`<br/> *Advanced/Optional* | `local` listens on all local subnets, `all` permits listening on internet origin subnets in addition to local.
|
||||
| `-e WEB_PORT=<PORT>`<br/> *Advanced/Optional* | **This will break the 'webpage blocked' functionality of Pi-hole** however it may help advanced setups like those running synology or `--net=host` docker argument. This guide explains how to restore webpage blocked functionality using a linux router DNAT rule: [Alternagtive Synology installation method](https://discourse.pi-hole.net/t/alternative-synology-installation-method/5454?u=diginc)
|
||||
| `TZ: <Timezone>`<br/> **Recommended** *Default: UTC* | Set your [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to make sure logs rotate at local midnight instead of at UTC midnight.
|
||||
| `WEBPASSWORD: <Admin password>`<br/> **Recommended** *Default: random* | http://pi.hole/admin password. Run `docker logs pihole \| grep random` to find your random pass.
|
||||
| `DNS1: <IP>`<br/> *Optional* *Default: 8.8.8.8* | Primary upstream DNS provider, default is google DNS
|
||||
| `DNS2: <IP>`<br/> *Optional* *Default: 8.8.4.4* | Secondary upstream DNS provider, default is google DNS, `no` if only one DNS should used
|
||||
| `DNSSEC: <True\|False>`<br/> *Optional* *Default: false* | Enable DNSSEC support
|
||||
| `DNS_BOGUS_PRIV: <True\|False>`<br/> *Optional* *Default: true* | Enable forwarding of reverse lookups for private ranges
|
||||
| `DNS_FQDN_REQUIRED: <True\|False>`<br/> *Optional* *Default: true* | Never forward non-FQDNs
|
||||
| `CONDITIONAL_FORWARDING: <True\|False>`<br/> *Optional* *Default: False* | Enable DNS conditional forwarding for device name resolution
|
||||
| `CONDITIONAL_FORWARDING_IP: <Router's IP>`<br/> *Optional* | If conditional forwarding is enabled, set the IP of the local network router
|
||||
| `CONDITIONAL_FORWARDING_DOMAIN: <Network Domain>`<br/> *Optional* | If conditional forwarding is enabled, set the domain of the local network router
|
||||
| `CONDITIONAL_FORWARDING_REVERSE: <Reverse DNS>`<br/> *Optional* | If conditional forwarding is enabled, set the reverse DNS of the local network router (e.g. `0.168.192.in-addr.arpa`)
|
||||
| `ServerIP: <Host's IP>`<br/> **Recommended** | **--net=host mode requires** Set to your server's LAN IP, used by web block modes and lighttpd bind address
|
||||
| `ServerIPv6: <Host's IPv6>`<br/> *Required if using IPv6* | **If you have a v6 network** set to your server's LAN IPv6 to block IPv6 ads fully
|
||||
| `VIRTUAL_HOST: <Custom Hostname>`<br/> *Optional* *Default: $ServerIP* | What your web server 'virtual host' is, accessing admin through this Hostname/IP allows you to make changes to the whitelist / blacklists in addition to the default 'http://pi.hole/admin/' address
|
||||
| `IPv6: <True\|False>`<br/> *Optional* *Default: True* | For unraid compatibility, strips out all the IPv6 configuration from DNS/Web services when false.
|
||||
| `INTERFACE: <NIC>`<br/> *Advanced/Optional* | The default works fine with our basic example docker run commands. If you're trying to use DHCP with `--net host` mode then you may have to customize this or DNSMASQ_LISTENING.
|
||||
| `DNSMASQ_LISTENING: <local\|all\|NIC>`<br/> *Advanced/Optional* | `local` listens on all local subnets, `all` permits listening on internet origin subnets in addition to local.
|
||||
| `WEB_PORT: <PORT>`<br/> *Advanced/Optional* | **This will break the 'webpage blocked' functionality of Pi-hole** however it may help advanced setups like those running synology or `--net=host` docker argument. This guide explains how to restore webpage blocked functionality using a linux router DNAT rule: [Alternative Synology installation method](https://discourse.pi-hole.net/t/alternative-synology-installation-method/5454?u=diginc)
|
||||
| `DNSMASQ_USER: <pihole\|root>`<br/> *Experimental Default: root* | Allows running FTLDNS as non-root.
|
||||
|
||||
Here is a rundown of the other arguments passed into the example `docker run`:
|
||||
To use these env vars in docker run format style them like: `-e DNS1=1.1.1.1`
|
||||
|
||||
Here is a rundown of other arguments for your docker-compose / docker run.
|
||||
|
||||
| Docker Arguments | Description |
|
||||
| ---------------- | ----------- |
|
||||
| `-p 80:80`<br/>`-p 53:53/tcp -p 53:53/udp`<br/> **Recommended** | Ports to expose, the bare minimum ports required for Pi-holes HTTP and DNS services
|
||||
| `-p <port>:<port>` **Recommended** | Ports to expose (53, 80, 67, 443), the bare minimum ports required for Pi-holes HTTP and DNS services
|
||||
| `--restart=unless-stopped`<br/> **Recommended** | Automatically (re)start your Pi-hole on boot or in the event of a crash
|
||||
| `-v /dir/for/pihole:/etc/pihole`<br/> **Recommended** | Volumes for your Pi-hole configs help persist changes across docker image updates
|
||||
| `-v /dir/for/dnsmasq.d:/etc/dnsmasq.d`<br/> **Recommended** | Volumes for your dnsmasq configs help persist changes across docker image updates
|
||||
| `--net=host`<br/> *Optional* | Alternative to `-p <port>:<port>` arguments (Cannot be used at same time as -p) if you don't run any other web application
|
||||
| `--cap-add=NET_ADMIN`<br/> *Optional* | If you're forwarding port 67 you will also needs this for DHCP to work. (DHCP Reportedly works, I have not used however)
|
||||
|
||||
If you're a fan of [docker-compose](https://docs.docker.com/compose/install/) I have [example docker-compose.yml files](https://github.com/pi-hole/docker-pi-hole/blob/master/doco-example.yml) in github which I think are a nicer way to represent such long run commands.
|
||||
| `-v $(pwd)/etc-pihole:/etc/pihole`<br/> **Recommended** | Volumes for your Pi-hole configs help persist changes across docker image updates
|
||||
| `-v $(pwd)/etc-dnsmasq.d:/etc/dnsmasq.d`<br/> **Recommended** | Volumes for your dnsmasq configs help persist changes across docker image updates
|
||||
| `--net=host`<br/> *Optional* | Alternative to `-p <port>:<port>` arguments (Cannot be used at same time as -p) if you don't run any other web application. DHCP runs best with --net=host, otherwise your router must support dhcp-relay settings.
|
||||
| `--cap-add=NET_ADMIN`<br/> *Recommended* | Commonly added capability for DHCP, see [Note on Capabilities](#note-on-capabilities) below for other capabilities.
|
||||
| `--dns=127.0.0.1`<br/> *Recommended* | Sets your container's resolve settings to localhost so it can resolve DHCP hostnames from Pi-hole's DNSMasq, also fixes common resolution errors on container restart.
|
||||
| `--dns=1.1.1.1`<br/> *Optional* | Sets a backup server of your choosing in case DNSMasq has problems starting
|
||||
| `--env-file .env` <br/> *Optional* | File to store environment variables for docker replacing `-e key=value` settings. Here for convenience
|
||||
|
||||
## Tips and Tricks
|
||||
|
||||
@@ -92,52 +140,74 @@ If you're a fan of [docker-compose](https://docs.docker.com/compose/install/) I
|
||||
* [How do I set or reset the Web interface Password?](https://discourse.pi-hole.net/t/how-do-i-set-or-reset-the-web-interface-password/1328)
|
||||
* `docker exec -it pihole_container_name pihole -a -p` - then enter your password into the prompt
|
||||
* Port conflicts? Stop your server's existing DNS / Web services.
|
||||
* Ubuntu users especially may need to shut off dns on your docker server so it can run in the container on port 53
|
||||
* 17.04 and later should disable dnsmasq.
|
||||
* 17.10 should disable systemd-resolved service. See this page: [How to disable systemd-resolved in Ubuntu](https://askubuntu.com/questions/907246/how-to-disable-systemd-resolved-in-ubuntu)
|
||||
* Don't forget to stop your services from auto-starting again after you reboot
|
||||
* Ubuntu users see below for more detailed information
|
||||
* Port 80 is highly recommended because if you have another site/service using port 80 by default then the ads may not transform into blank ads correctly. To make sure docker-pi-hole plays nicely with an existing webserver you run you'll probably need a reverse proxy webserver config if you don't have one already. Pi-hole must be the default web app on the proxy e.g. if you go to your host by IP instead of domain then Pi-hole is served out instead of any other sites hosted by the proxy. This is the '[default_server](http://nginx.org/en/docs/http/ngx_http_core_module.html#listen)' in nginx or ['_default_' virtual host](https://httpd.apache.org/docs/2.4/vhosts/examples.html#default) in Apache and is taken advantage of so any undefined ad domain can be directed to your webserver and get a 'blocked' response instead of ads.
|
||||
* You can still map other ports to Pi-hole port 80 using docker's port forwarding like this `-p 8080:80`, but again the ads won't render properly. Changing the inner port 80 shouldn't be required unless you run docker host networking mode.
|
||||
* [Here is an example of running with jwilder/proxy](https://github.com/pi-hole/docker-pi-hole/blob/master/jwilder-proxy-example-doco.yml) (an nginx auto-configuring docker reverse proxy for docker) on my port 80 with Pi-hole on another port. Pi-hole needs to be `DEFAULT_HOST` env in jwilder/proxy and you need to set the matching `VIRTUAL_HOST` for the Pi-hole's container. Please read jwilder/proxy readme for more info if you have trouble. I tested this basic example which is based off what I run.
|
||||
* [Here is an example of running with jwilder/proxy](https://github.com/pi-hole/docker-pi-hole/blob/master/docker-compose-jwilder-proxy.yml) (an nginx auto-configuring docker reverse proxy for docker) on my port 80 with Pi-hole on another port. Pi-hole needs to be `DEFAULT_HOST` env in jwilder/proxy and you need to set the matching `VIRTUAL_HOST` for the Pi-hole's container. Please read jwilder/proxy readme for more info if you have trouble.
|
||||
|
||||
### Installing on Ubuntu
|
||||
Modern releases of Ubuntu (17.10+) include [`systemd-resolved`](http://manpages.ubuntu.com/manpages/bionic/man8/systemd-resolved.service.8.html) which is configured by default to implement a caching DNS stub resolver. This will prevent pi-hole from listening on port 53.
|
||||
The stub resolver should be disabled with: `sudo sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf`
|
||||
|
||||
This will not change the nameserver settings, which point to the stub resolver thus preventing DNS resolution. Change the `/etc/resolv.conf` symlink to point to `/run/systemd/resolve/resolv.conf`, which is automatically updated to follow the system's [`netplan`](https://netplan.io/):
|
||||
`sudo sh -c 'rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf'`
|
||||
|
||||
Once pi-hole is installed, you'll want to configure your clients to use it ([see here](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245)). If you used the symlink above, your docker host will either use whatever is served by DHCP, or whatever static setting you've configured. If you want to explicitly set your docker host's nameservers you can edit the netplan(s) found at `/etc/netplan`, then run `sudo netplan apply`.
|
||||
Example netplan:
|
||||
```yaml
|
||||
network:
|
||||
ethernets:
|
||||
ens160:
|
||||
dhcp4: true
|
||||
dhcp4-overrides:
|
||||
use-dns: false
|
||||
nameservers:
|
||||
addresses: [127.0.0.1]
|
||||
version: 2
|
||||
```
|
||||
|
||||
Note that it is also possible to disable `systemd-resolved` entirely. However, this can cause problems with name resolution in vpns ([see bug report](https://bugs.launchpad.net/network-manager/+bug/1624317)). It also disables the functionality of netplan since systemd-resolved is used as the default renderer ([see `man netplan`](http://manpages.ubuntu.com/manpages/bionic/man5/netplan.5.html#description)). If you choose to disable the service, you will need to manually set the nameservers, for example by creating a new `/etc/resolv.conf`.
|
||||
|
||||
Users of older Ubuntu releases (circa 17.04) will need to disable dnsmasq.
|
||||
|
||||
## Docker tags and versioning
|
||||
|
||||
The primary docker tags / versions are explained in the following table. [Click here to see the full list of x86 tags](https://store.docker.com/community/images/pihole/pihole/tags) ([arm tags are here](https://store.docker.com/community/images/pihole/pihole/tags)), I also try to tag with the specific version of Pi-hole Core for version archival purposes, the web version that comes with the core releases should be in the [GitHub Release notes](https://github.com/pi-hole/docker-pi-hole/releases).
|
||||
The primary docker tags / versions are explained in the following table. [Click here to see the full list of tags](https://store.docker.com/community/images/pihole/pihole/tags) ([arm tags are here](https://store.docker.com/community/images/pihole/pihole/tags)), I also try to tag with the specific version of Pi-hole Core for version archival purposes, the web version that comes with the core releases should be in the [GitHub Release notes](https://github.com/pi-hole/docker-pi-hole/releases).
|
||||
|
||||
| tag | architecture | description | Dockerfile |
|
||||
| --- | ------------ | ----------- | ---------- |
|
||||
| `latest` / `v4.0` | x86 | Debian x86 image, container running lighttpd and dnsmasq | [Dockerfile](https://github.com/pi-hole/docker-pi-hole/blob/master/Dockerfile_amd64) |
|
||||
| `latest` | auto detect | x86, arm, or arm64 container, docker auto detects your architecture. | [Dockerfile](https://github.com/pi-hole/docker-pi-hole/blob/master/Dockerfile_amd64) |
|
||||
| `v4.0.0-1` | auto detect | Versioned tags, if you want to pin against a specific version, use one of these | |
|
||||
| `v4.0.0-1_<arch>` | based on tag | Specific architectures tags | |
|
||||
| `dev` | auto detect | like latest tag, but for the development branch (pushed occasionally) | |
|
||||
|
||||
### `pihole/pihole:latest` [](https://microbadger.com/images/pihole/pihole "Get your own image badge on microbadger.com") [](https://microbadger.com/images/pihole/pihole "Get your own version badge on microbadger.com") [](https://microbadger.com/images/pihole/pihole "Get your own version badge on microbadger.com")
|
||||
|
||||
This version of the docker aims to be as close to a standard Pi-hole installation by using the recommended base OS and the exact configs and scripts (minimally modified to get them working). This enables fast updating when an update comes from Pi-hole.
|
||||
|
||||
### `pihole/pihole:v4.0_armhf` [](https://microbadger.com/images/pihole/pihole "Get your own image badge on microbadger.com")
|
||||
Latest version of ARMv7-compatible pihole image
|
||||
|
||||
https://hub.docker.com/r/pihole/pihole/tags/
|
||||
|
||||
### `pihole/pihole:v4.0_aarch64` [](https://microbadger.com/images/pihole/pihole "Get your own image badge on microbadger.com")
|
||||
Latest version of ARM64-compatible pihole image
|
||||
|
||||
https://hub.docker.com/r/pihole/pihole/tags/
|
||||
|
||||
## Upgrading, Persistence, and Customizations
|
||||
|
||||
The standard Pi-hole customization abilities apply to this docker, but with docker twists such as using docker volume mounts to map host stored file configurations over the container defaults. Volumes are also important to persist the configuration in case you have removed the Pi-hole container which is a typical docker upgrade pattern.
|
||||
|
||||
### Upgrading
|
||||
### Upgrading / Reconfiguring
|
||||
|
||||
`pihole -up` is disabled. Upgrade the docker way instead, please. Long-living docker containers are not the docker way since they aim to be portable and reproducible, why not re-create them often! Just to prove you can.
|
||||
Do not attempt to upgrade (`pihole -up`) or reconfigure (`pihole -r`). New images will be released for upgrades, upgrading by replacing your old container with a fresh upgraded image is the 'docker way'. Long-living docker containers are not the docker way since they aim to be portable and reproducible, why not re-create them often! Just to prove you can.
|
||||
|
||||
0. Read the release notes for both this Docker release and the Pi-hole release
|
||||
* This will help you avoid common problems due to any known issues with upgrading or newly required arguments or variables
|
||||
* We will try to put common break/fixes at the top of this readme too
|
||||
1. Download the latest version of the image: `docker pull pihole/pihole`
|
||||
2. Throw away your container: `docker rm -f pihole`
|
||||
* **Warning** When removing your pihole container you may be stuck without DNS until step 3; **docker pull** before **docker rm -f** to avoid DNS inturruption **OR** always have a fallback DNS server configured in DHCP to avoid this problem altogether.
|
||||
* If you care about your data (logs/customizations), make sure you have it volume-mapped or it will be deleted in this step.
|
||||
* **Warning** When removing your pihole container you may be stuck without DNS until step 3; **docker pull** before **docker rm -f** to avoid DNS inturruption **OR** always have a fallback DNS server configured in DHCP to avoid this problem altogether.
|
||||
* If you care about your data (logs/customizations), make sure you have it volume-mapped or it will be deleted in this step.
|
||||
3. Start your container with the newer base image: `docker run <args> pihole/pihole` (`<args>` being your preferred run volumes and env vars)
|
||||
|
||||
Why is this style of upgrading good? A couple reasons: Everyone is starting from the same base image which has been tested to know it works. No worrying about upgrading from A to B, B to C, or A to C is required when rolling out updates, it reducing complexity, and simply allows a 'fresh start' every time while preserving customizations with volumes. Basically I'm encouraging [phoenix servers](https://www.google.com/?q=phoenix+servers) principles for your containers.
|
||||
Why is this style of upgrading good? A couple reasons: Everyone is starting from the same base image which has been tested to known it works. No worrying about upgrading from A to B, B to C, or A to C is required when rolling out updates, it reducing complexity, and simply allows a 'fresh start' every time while preserving customizations with volumes. Basically I'm encouraging [phoenix server](https://www.google.com/?q=phoenix+servers) principles for your containers.
|
||||
|
||||
To reconfigure Pi-hole you'll either need to use an existing container environment variables or if there is no a variable for what you need, use the web UI or CLI commands.
|
||||
|
||||
### Pi-hole features
|
||||
|
||||
@@ -161,10 +231,17 @@ As long as your docker system service auto starts on boot and you run your conta
|
||||
|
||||
NOTE: After initial run you may need to manually stop the docker container with "docker stop pihole" before the systemctl can start controlling the container.
|
||||
|
||||
## Development
|
||||
## Note on Capabilities
|
||||
|
||||
Development image tags coming soon
|
||||
DNSMasq / [FTLDNS](https://docs.pi-hole.net/ftldns/in-depth/#linux-capabilities) expects to have the following capabilities available:
|
||||
- `CAP_NET_BIND_SERVICE`: Allows FTLDNS binding to TCP/UDP sockets below 1024 (specifically DNS service on port 53)
|
||||
- `CAP_NET_RAW`: use raw and packet sockets (needed for handling DHCPv6 requests, and verifying that an IP is not in use before leasing it)
|
||||
- `CAP_NET_ADMIN`: modify routing tables and other network-related operations (in particular inserting an entry in the neighbor table to answer DHCP requests using unicast packets)
|
||||
|
||||
This image automatically grants those capabilities, if available, to the FTLDNS process, even when run as non-root.\
|
||||
By default, docker does not include the `NET_ADMIN` capability for non-privileged containers, and it is recommended to explicitly add it to the container using `--cap-add=NET_ADMIN`.\
|
||||
However, if DHCP and IPv6 Router Advertisements are not in use, it should be safe to skip it. For the most paranoid, it should even be possible to explicitly drop the `NET_RAW` capability to prevent FTLDNS from automatically gaining it.
|
||||
|
||||
# User Feedback
|
||||
|
||||
Please report issues on the [GitHub project](https://github.com/pi-hole/docker-pi-hole) when you suspect something docker related. Pi-hole questions are best answered on our [user forums](https://github.com/pi-hole/pi-hole/blob/master/README.md#get-help-or-connect-with-us-on-the-web). Ping me (@diginc) on the forums if it's a docker container and you're not sure if it's docker related.
|
||||
Please report issues on the [GitHub project](https://github.com/pi-hole/docker-pi-hole) when you suspect something docker related. Pi-hole or general docker questions are best answered on our [user forums](https://github.com/pi-hole/pi-hole/blob/master/README.md#get-help-or-connect-with-us-on-the-web). Ping me (@diginc) on the forums if it's a docker container and you're not sure if it's docker related.
|
||||
|
||||
@@ -14,7 +14,7 @@ To run the Dockerfile templating, image build, and tests all in one command just
|
||||
|
||||
Docker images built by `tox` or `python Dockerfile.py` are named the same but stripped of the `pihole/` docker repository namespace.
|
||||
|
||||
e.g. `pi-hole:debian_amd64` or `pi-hole-multiarch:debian_aarch64`
|
||||
e.g. `pi-hole:debian_amd64` or `pi-hole-multiarch:debian_arm64`
|
||||
|
||||
You can run the multiarch images on an amd64 development system if you [enable binfmt-support as described in the multiarch image docs](https://hub.docker.com/r/multiarch/multiarch/debian-debootstrap/)
|
||||
|
||||
|
||||
@@ -1,4 +1,40 @@
|
||||
#!/bin/bash
|
||||
# Some of the bash_functions use variables these core pi-hole/web scripts
|
||||
. /opt/pihole/webpage.sh
|
||||
|
||||
docker_checks() {
|
||||
warn_msg='WARNING Misconfigured DNS in /etc/resolv.conf'
|
||||
ns_count="$(grep -c nameserver /etc/resolv.conf)"
|
||||
ns_primary="$(grep nameserver /etc/resolv.conf | head -1)"
|
||||
ns_primary="${ns_primary/nameserver /}"
|
||||
warned=false
|
||||
|
||||
if [ "$ns_count" -lt 2 ] ; then
|
||||
echo "$warn_msg: Two DNS servers are recommended, 127.0.0.1 and any backup server"
|
||||
warned=true
|
||||
fi
|
||||
|
||||
if [ "$ns_primary" != "127.0.0.1" ] ; then
|
||||
echo "$warn_msg: Primary DNS should be 127.0.0.1 (found ${ns_primary})"
|
||||
warned=true
|
||||
fi
|
||||
|
||||
if ! $warned ; then
|
||||
echo "OK: Checks passed for /etc/resolv.conf DNS servers"
|
||||
fi
|
||||
|
||||
echo
|
||||
cat /etc/resolv.conf
|
||||
}
|
||||
|
||||
fix_capabilities() {
|
||||
setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN+ei $(which pihole-FTL) || ret=$?
|
||||
|
||||
if [[ $ret -ne 0 && "${DNSMASQ_USER:-root}" != "root" ]]; then
|
||||
echo "ERROR: Failed to set capabilities for pihole-FTL. Cannot run as non-root."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
prepare_configs() {
|
||||
# Done in /start.sh, don't do twice
|
||||
@@ -9,6 +45,7 @@ prepare_configs() {
|
||||
set +e
|
||||
mkdir -p /var/run/pihole /var/log/pihole
|
||||
# Re-apply perms from basic-install over any volume mounts that may be present (or not)
|
||||
# Also similar to preflights for FTL https://github.com/pi-hole/pi-hole/blob/master/advanced/Templates/pihole-FTL.service
|
||||
chown pihole:root /etc/lighttpd
|
||||
chown pihole:pihole "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" "/var/log/pihole" "${regexFile}"
|
||||
chmod 644 "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf"
|
||||
@@ -32,15 +69,13 @@ prepare_configs() {
|
||||
# Stash and pop the user password to avoid setting the password to the hashed setupVar variable
|
||||
WEBPASSWORD="${USERWEBPASSWORD}"
|
||||
# Clean up old before re-writing the required setupVars
|
||||
sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/PIHOLE_DNS_1/d;/PIHOLE_DNS_2/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;' "${setupVars}"
|
||||
sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;' "${setupVars}"
|
||||
fi
|
||||
# echo the information to the user
|
||||
{
|
||||
echo "PIHOLE_INTERFACE=${PIHOLE_INTERFACE}"
|
||||
echo "IPV4_ADDRESS=${IPV4_ADDRESS}"
|
||||
echo "IPV6_ADDRESS=${IPV6_ADDRESS}"
|
||||
echo "PIHOLE_DNS_1=${PIHOLE_DNS_1}"
|
||||
echo "PIHOLE_DNS_2=${PIHOLE_DNS_2}"
|
||||
echo "QUERY_LOGGING=${QUERY_LOGGING}"
|
||||
echo "INSTALL_WEB_SERVER=${INSTALL_WEB_SERVER}"
|
||||
echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}"
|
||||
@@ -49,12 +84,7 @@ prepare_configs() {
|
||||
}
|
||||
|
||||
validate_env() {
|
||||
if [ -z "$ServerIP" ] ; then
|
||||
echo "ERROR: To function correctly you must pass an environment variables of 'ServerIP' into the docker container with the IP of your docker host from which you are passing web (80) and dns (53) ports from"
|
||||
exit 1
|
||||
fi;
|
||||
|
||||
# Required ServerIP is a valid IP
|
||||
# Optional ServerIP is a valid IP
|
||||
# nc won't throw any text based errors when it times out connecting to a valid IP, otherwise it complains about the DNS name being garbage
|
||||
# if nc doesn't behave as we expect on a valid IP the routing table should be able to look it up and return a 0 retcode
|
||||
if [[ "$(nc -4 -w1 -z "$ServerIP" 53 2>&1)" != "" ]] || ! ip route get "$ServerIP" > /dev/null ; then
|
||||
@@ -86,14 +116,17 @@ setup_dnsmasq_dns() {
|
||||
dnsType='custom'
|
||||
fi;
|
||||
|
||||
# TODO With the addition of this to /start.sh this needs a refactor
|
||||
if [ ! -f /.piholeFirstBoot ] ; then
|
||||
local setupDNS1="$(grep 'PIHOLE_DNS_1' ${setupVars})"
|
||||
local setupDNS2="$(grep 'PIHOLE_DNS_2' ${setupVars})"
|
||||
setupDNS1="${setupDNS1/PIHOLE_DNS_1=/}"
|
||||
setupDNS2="${setupDNS2/PIHOLE_DNS_2=/}"
|
||||
if [[ -n "$DNS1" && -n "$setupDNS1" ]] || \
|
||||
[[ -n "$DNS2" && -n "$setupDNS2" ]] ; then
|
||||
echo "Docker DNS variables not used"
|
||||
fi
|
||||
echo "Existing DNS servers used"
|
||||
echo "Existing DNS servers used (${setupDNS1:-unset} & ${setupDNS2:-unset})"
|
||||
return
|
||||
fi
|
||||
|
||||
@@ -102,8 +135,9 @@ setup_dnsmasq_dns() {
|
||||
change_setting "PIHOLE_DNS_1" "${DNS1}"
|
||||
fi
|
||||
if [[ -n "$DNS2" && -z "$setupDNS2" ]] ; then
|
||||
if [ "$DNS2" = "no" ] ; then
|
||||
if [[ "$DNS2" == "no" ]] ; then
|
||||
delete_setting "PIHOLE_DNS_2"
|
||||
unset PIHOLE_DNS_2
|
||||
else
|
||||
change_setting "PIHOLE_DNS_2" "${DNS2}"
|
||||
fi
|
||||
@@ -120,6 +154,14 @@ setup_dnsmasq_interface() {
|
||||
[ -n "$interface" ] && change_setting "PIHOLE_INTERFACE" "${interface}"
|
||||
}
|
||||
|
||||
setup_dnsmasq_listening_behaviour() {
|
||||
local dnsmasq_listening_behaviour="${1}"
|
||||
|
||||
if [ -n "$dnsmasq_listening_behaviour" ]; then
|
||||
change_setting "DNSMASQ_LISTENING" "${dnsmasq_listening_behaviour}"
|
||||
fi;
|
||||
}
|
||||
|
||||
setup_dnsmasq_config_if_missing() {
|
||||
# When fresh empty directory volumes are used we miss this file
|
||||
if [ ! -f /etc/dnsmasq.d/01-pihole.conf ] ; then
|
||||
@@ -131,13 +173,30 @@ setup_dnsmasq() {
|
||||
local dns1="$1"
|
||||
local dns2="$2"
|
||||
local interface="$3"
|
||||
local dnsmasq_listening_behaviour="$4"
|
||||
# Coordinates
|
||||
setup_dnsmasq_config_if_missing
|
||||
setup_dnsmasq_dns "$dns1" "$dns2"
|
||||
setup_dnsmasq_interface "$interface"
|
||||
setup_dnsmasq_listening_behaviour "$dnsmasq_listening_behaviour"
|
||||
setup_dnsmasq_user "${DNSMASQ_USER}"
|
||||
ProcessDNSSettings
|
||||
}
|
||||
|
||||
setup_dnsmasq_user() {
|
||||
local DNSMASQ_USER="${1}"
|
||||
|
||||
# Run DNSMASQ as root user to avoid SHM permission issues
|
||||
if grep -r -q '^\s*user=' /etc/dnsmasq.* ; then
|
||||
# Change user that had been set previously to root
|
||||
for f in $(grep -r -l '^\s*user=' /etc/dnsmasq.*); do
|
||||
sed -i "/^\s*user=/ c\user=${DNSMASQ_USER}" "${f}"
|
||||
done
|
||||
else
|
||||
echo -e "\nuser=${DNSMASQ_USER}" >> /etc/dnsmasq.conf
|
||||
fi
|
||||
}
|
||||
|
||||
setup_dnsmasq_hostnames() {
|
||||
# largely borrowed from automated install/basic-install.sh
|
||||
local IPV4_ADDRESS="${1}"
|
||||
@@ -223,31 +282,42 @@ setup_web_port() {
|
||||
echo "Custom WEB_PORT set to $web_port"
|
||||
echo "INFO: Without proper router DNAT forwarding to $ServerIP:$web_port, you may not get any blocked websites on ads"
|
||||
|
||||
# Update lighttpd's port
|
||||
sed -i '/server.port\s*=\s*80\s*$/ s/80/'$WEB_PORT'/g' /etc/lighttpd/lighttpd.conf
|
||||
# Update any default port 80 references in the HTML
|
||||
grep -Prl '://127\.0\.0\.1/' /var/www/html/ | xargs -r sed -i "s|/127\.0\.0\.1/|/127.0.0.1:${WEB_PORT}/|g"
|
||||
grep -Prl '://pi\.hole/' /var/www/html/ | xargs -r sed -i "s|/pi\.hole/|/pi\.hole:${WEB_PORT}/|g"
|
||||
# Update lighttpd's port
|
||||
sed -i '/server.port\s*=\s*80\s*$/ s/80/'$WEB_PORT'/g' /etc/lighttpd/lighttpd.conf
|
||||
|
||||
}
|
||||
|
||||
setup_web_password() {
|
||||
generate_password() {
|
||||
if [ -z "${WEBPASSWORD+x}" ] ; then
|
||||
# Not set at all, give the user a random pass
|
||||
WEBPASSWORD=$(tr -dc _A-Z-a-z-0-9 < /dev/urandom | head -c 8)
|
||||
echo "Assigning random password: $WEBPASSWORD"
|
||||
fi;
|
||||
}
|
||||
|
||||
setup_web_password() {
|
||||
setup_var_exists "WEBPASSWORD" && return
|
||||
|
||||
PASS="$1"
|
||||
# Turn bash debug on while setting up password (to print it)
|
||||
set -x
|
||||
if [[ "$WEBPASSWORD" == "" ]] ; then
|
||||
if [[ "$PASS" == "" ]] ; then
|
||||
echo "" | pihole -a -p
|
||||
else
|
||||
pihole -a -p "$WEBPASSWORD" "$WEBPASSWORD"
|
||||
echo "Setting password: ${PASS}"
|
||||
set -x
|
||||
pihole -a -p "$PASS" "$PASS"
|
||||
fi
|
||||
# Turn bash debug back off after print password setup
|
||||
# (subshell to null hides printing output)
|
||||
{ set +x; } 2>/dev/null
|
||||
|
||||
# To avoid printing this if conditional in bash debug, turn off debug above..
|
||||
# then re-enable debug if necessary (more code but cleaner printed output)
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
# Turn bash debug back off after print password setup
|
||||
# (subshell to null hides printing output)
|
||||
{ set +x; } 2>/dev/null
|
||||
set -x
|
||||
fi
|
||||
}
|
||||
|
||||
@@ -263,7 +333,7 @@ setup_ipv4_ipv6() {
|
||||
test_configs() {
|
||||
set -e
|
||||
echo -n '::: Testing pihole-FTL DNS: '
|
||||
pihole-FTL test || exit 1
|
||||
sudo -u ${DNSMASQ_USER:-root} pihole-FTL test || exit 1
|
||||
echo -n '::: Testing lighttpd config: '
|
||||
lighttpd -t -f /etc/lighttpd/lighttpd.conf || exit 1
|
||||
set +e
|
||||
@@ -274,11 +344,11 @@ test_configs() {
|
||||
setup_blocklists() {
|
||||
local blocklists="$1"
|
||||
# Exit/return early without setting up adlists with defaults for any of the following conditions:
|
||||
# 1. NO_SETUP env is set
|
||||
# 1. skip_setup_blocklists env is set
|
||||
exit_string="(exiting ${FUNCNAME[0]} early)"
|
||||
|
||||
if [ -n "${NO_SETUP}" ]; then
|
||||
echo "::: NO_SETUP requested ($exit_string)"
|
||||
if [ -n "${skip_setup_blocklists}" ]; then
|
||||
echo "::: skip_setup_blocklists requested ($exit_string)"
|
||||
return
|
||||
fi
|
||||
|
||||
@@ -289,29 +359,24 @@ setup_blocklists() {
|
||||
return
|
||||
fi
|
||||
|
||||
# 3. If we're running tests, use a small list of fake tests to speed everything up
|
||||
if [ -n "$PYTEST" ]; then
|
||||
echo ":::::: Tests are being ran - stub out ad list fetching and add a fake ad block ${exit_string}"
|
||||
sed -i 's/^gravity_spinup$/#gravity_spinup # DISABLED FOR PYTEST/g' "$(which gravity.sh)"
|
||||
echo '123.123.123.123 testblock.pi-hole.local' > "/var/www/html/fake.list"
|
||||
echo 'file:///var/www/html/fake.list' > "${adlistFile}"
|
||||
echo 'http://localhost/fake.list' >> "${adlistFile}"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "::: ${FUNCNAME[0]} now setting default blocklists up: "
|
||||
echo "::: TIP: Use a docker volume for ${adlistFile} if you want to customize for first boot"
|
||||
> "${adlistFile}"
|
||||
# Just copied outa the choices for now
|
||||
# https://github.com/pi-hole/pi-hole/blob/FTLDNS/automated%20install/basic-install.sh#L1014
|
||||
echo "https://raw.githubusercontent.com/StevenBlack/hosts/master/hosts" >> "${adlistFile}"
|
||||
echo "https://mirror1.malwaredomains.com/files/justdomains" >> "${adlistFile}"
|
||||
echo "http://sysctl.org/cameleon/hosts" >> "${adlistFile}"
|
||||
echo "https://zeustracker.abuse.ch/blocklist.php?download=domainblocklist" >> "${adlistFile}"
|
||||
echo "https://s3.amazonaws.com/lists.disconnect.me/simple_tracking.txt" >> "${adlistFile}"
|
||||
echo "https://s3.amazonaws.com/lists.disconnect.me/simple_ad.txt" >> "${adlistFile}"
|
||||
echo "https://hosts-file.net/ad_servers.txt" >> "${adlistFile}"
|
||||
installDefaultBlocklists
|
||||
|
||||
echo "::: Blocklists (${adlistFile}) now set to:"
|
||||
cat "${adlistFile}"
|
||||
}
|
||||
|
||||
setup_var_exists() {
|
||||
local KEY="$1"
|
||||
if [ -n "$2" ]; then
|
||||
local REQUIRED_VALUE="[^\n]+"
|
||||
fi
|
||||
if grep -Pq "^${KEY}=${REQUIRED_VALUE}" "$setupVars"; then
|
||||
echo "::: Pre existing ${KEY} found"
|
||||
true
|
||||
else
|
||||
false
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
51
circle-deploy.sh
Executable file
51
circle-deploy.sh
Executable file
@@ -0,0 +1,51 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
# Circle CI Job for merging/deploying all architectures (post-test passing)
|
||||
. circle-vars.sh
|
||||
|
||||
annotate() {
|
||||
local base=$1
|
||||
local image=$2
|
||||
local arch=$3
|
||||
local annotate_flags="${annotate_map[$arch]}"
|
||||
|
||||
$dry docker manifest annotate ${base} ${image} --os linux ${annotate_flags}
|
||||
}
|
||||
|
||||
# Keep in sync with circle-ci job names
|
||||
declare -A annotate_map=(
|
||||
["amd64"]="--arch amd64"
|
||||
["armel"]="--arch arm --variant v6"
|
||||
["armhf"]="--arch arm --variant v7"
|
||||
["arm64"]="--arch arm64 --variant v8"
|
||||
)
|
||||
|
||||
# push image when not running a PR
|
||||
mkdir -p ~/.docker
|
||||
export DOCKER_CLI_EXPERIMENTAL='enabled'
|
||||
echo "{}" | jq '.experimental="enabled"' | tee ~/.docker/config.json
|
||||
docker info
|
||||
if [[ "$CIRCLE_PR_NUMBER" == "" ]]; then
|
||||
images=()
|
||||
echo $DOCKERHUB_PASS | docker login --username=$DOCKERHUB_USER --password-stdin
|
||||
ls -lat ./ci-workspace/
|
||||
cd ci-workspace
|
||||
|
||||
for arch in *; do
|
||||
arch_image=$(cat $arch)
|
||||
docker pull $arch_image
|
||||
images+=($arch_image)
|
||||
done
|
||||
|
||||
for docker_tag in $MULTIARCH_IMAGE $LATEST_IMAGE; do
|
||||
docker manifest create $docker_tag ${images[*]}
|
||||
for arch in *; do
|
||||
arch_image=$(cat $arch)
|
||||
docker pull $arch_image
|
||||
annotate "$docker_tag" "$arch_image" "$arch"
|
||||
done
|
||||
|
||||
docker manifest inspect "$docker_tag"
|
||||
docker manifest push "$docker_tag"
|
||||
done;
|
||||
fi
|
||||
30
circle-test.sh
Executable file
30
circle-test.sh
Executable file
@@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
|
||||
# Circle CI Job for single architecture
|
||||
|
||||
# setup qemu/variables
|
||||
docker run --rm --privileged multiarch/qemu-user-static:register --reset > /dev/null
|
||||
. circle-vars.sh
|
||||
|
||||
if [[ "$1" == "enter" ]]; then
|
||||
enter="-it --entrypoint=sh"
|
||||
fi
|
||||
|
||||
# generate and build dockerfile
|
||||
docker build -t image_pipenv -f Dockerfile_build .
|
||||
env > /tmp/env
|
||||
docker run --rm \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v "$(pwd):/$(pwd)" \
|
||||
-w "$(pwd)" \
|
||||
-e PIPENV_CACHE_DIR="$(pwd)/.pipenv" \
|
||||
--env-file /tmp/env \
|
||||
$enter image_pipenv
|
||||
# docker run --rm -v /var/run/docker.sock:/var/run/docker.sock -v "$(pwd):/$(pwd)" -w "$(pwd)" --env-file /tmp/env image_pipenv /ws/Dockerfile.sh
|
||||
|
||||
docker images
|
||||
echo $DOCKERHUB_PASS | docker login --username=$DOCKERHUB_USER --password-stdin
|
||||
docker push $ARCH_IMAGE
|
||||
mkdir -p ci-workspace
|
||||
echo "$ARCH_IMAGE" | tee ./ci-workspace/$ARCH
|
||||
49
circle-vars.sh
Executable file
49
circle-vars.sh
Executable file
@@ -0,0 +1,49 @@
|
||||
set -a
|
||||
|
||||
CIRCLE_JOB="${CIRCLE_JOB:-}"
|
||||
ARCH="${ARCH:-$CIRCLE_JOB}"
|
||||
if [[ -z "$ARCH" ]] ; then
|
||||
echo "Defaulting arch to amd64"
|
||||
ARCH="amd64"
|
||||
fi
|
||||
BASE_IMAGE="${BASE_IMAGE:-${CIRCLE_PROJECT_REPONAME}}"
|
||||
if [[ -z "$BASE_IMAGE" ]] ; then
|
||||
echo "Defaulting image name to pihole"
|
||||
BASE_IMAGE="pihole"
|
||||
fi
|
||||
|
||||
# The docker image will match the github repo path by default but is overrideable with CircleCI environment
|
||||
# BASE_IMAGE Overridable by Circle environment, including namespace (e.g. BASE_IMAGE=bobsmith/test-img:latest)
|
||||
CIRCLE_PROJECT_USERNAME="${CIRCLE_PROJECT_USERNAME:-unset}"
|
||||
HUB_NAMESPACE="${HUB_NAMESPACE:-$CIRCLE_PROJECT_USERNAME}"
|
||||
[[ $CIRCLE_PROJECT_USERNAME == "pi-hole" ]] && HUB_NAMESPACE="pihole" # Custom mapping for namespace
|
||||
[[ $BASE_IMAGE != *"/"* ]] && BASE_IMAGE="${HUB_NAMESPACE}/${BASE_IMAGE}" # If missing namespace, add one
|
||||
|
||||
# Secondary docker tag info (origin github branch/tag) will get prepended also
|
||||
ARCH_IMAGE="$BASE_IMAGE"
|
||||
[[ $ARCH_IMAGE != *":"* ]] && ARCH_IMAGE="${BASE_IMAGE}:$ARCH" # If tag missing, add circle job name as a tag (architecture here)
|
||||
|
||||
DOCKER_TAG="${CIRCLE_TAG:-$CIRCLE_BRANCH}"
|
||||
if [[ -n "$DOCKER_TAG" ]]; then
|
||||
# remove latest tag if used (as part of a user provided image variable)
|
||||
ARCH_IMAGE="${ARCH_IMAGE/:latest/:}"
|
||||
# Prepend the github tag(version) or branch. image:arch = image:v1.0-arch
|
||||
ARCH_IMAGE="${ARCH_IMAGE/:/:${DOCKER_TAG}-}"
|
||||
# latest- sometimes has a trailing slash, remove it
|
||||
ARCH_IMAGE="${ARCH_IMAGE/%-/}"
|
||||
fi
|
||||
|
||||
# To get latest released, cut a release on https://github.com/pi-hole/docker-pi-hole/releases (manually gated for quality control)
|
||||
latest_tag=''
|
||||
if ! latest_tag=$(curl -sI https://github.com/pi-hole/docker-pi-hole/releases/latest | grep --color=never -i Location | awk -F / '{print $NF}' | tr -d '[:cntrl:]'); then
|
||||
print "Failed to retrieve latest docker-pi-hole release metadata"
|
||||
else
|
||||
if [[ "$DOCKER_TAG" == "$latest_tag" ]] ; then
|
||||
#LATEST_IMAGE="$BASE_IMAGE:latest"
|
||||
LATEST_IMAGE="$BASE_IMAGE:testing_latest_deleteme"
|
||||
fi
|
||||
fi
|
||||
|
||||
MULTIARCH_IMAGE="$BASE_IMAGE:$DOCKER_TAG"
|
||||
|
||||
set +a
|
||||
@@ -1,57 +0,0 @@
|
||||
#!/bin/bash -ex
|
||||
# Script for manually pushing the docker arm images for diginc only
|
||||
# (no one else has docker repo permissions)
|
||||
if [ ! -f ~/.docker/config.json ] ; then
|
||||
echo "Error: You should setup your docker push authorization first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
parse_git_branch() {
|
||||
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'
|
||||
}
|
||||
|
||||
namespace='pihole'
|
||||
localimg='pihole'
|
||||
remoteimg="$namespace/$localimg"
|
||||
branch="$(parse_git_branch)"
|
||||
version="${version:-unset}"
|
||||
dry="${dry}"
|
||||
latest="${latest:-false}" # true as shell env var to deploy latest
|
||||
|
||||
if [[ -n "$dry" ]]; then dry='echo '; fi
|
||||
|
||||
if [[ "$version" == 'unset' ]]; then
|
||||
if [[ "$branch" == "master" ]]; then
|
||||
echo "Version number var is unset and master branch needs a version...pass in \$version variable!"
|
||||
exit 1
|
||||
elif [[ "$branch" = "release/"* ]]; then
|
||||
version="$(echo $branch | grep -Po 'v[\d\.-]*')"
|
||||
echo "Version number is being taken from this release branch $version"
|
||||
else
|
||||
version="$branch"
|
||||
remoteimg="${namespace}/${localimg}-dev"
|
||||
echo "Using the branch ($branch) for deployed image version since not passed in"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "# DEPLOYING:"
|
||||
echo "version: $version"
|
||||
echo "branch: $branch"
|
||||
[[ -n "$dry" ]] && echo "DRY RUN: $dry"
|
||||
echo "Example tagging: docker tag $localimg:$tag $remoteimg:${version}_amd64"
|
||||
|
||||
$dry ./Dockerfile.py --arch=amd64 --arch=armhf --arch=aarch64
|
||||
|
||||
# ARMv6/armel doesn't have a FTL binary for v4.0 pi-hole
|
||||
# for tag in debian_armhf debian_aarch64 debian_armel; do
|
||||
for tag in amd64 armhf aarch64; do
|
||||
# Verison specific tags for ongoing history
|
||||
$dry docker tag $localimg:v4.0_$tag $remoteimg:${version}_${tag}
|
||||
$dry docker push pihole/pihole:${version}_${tag}
|
||||
# Floating latest tags (Conditionalize these to master?)
|
||||
if [[ "$tag" == 'amd64' ]] && [[ "$branch" == 'master' || "$latest" == 'true' ]] ; then
|
||||
# Latest tag should become a manifest for multiple architectures, not just amd64!
|
||||
$dry docker tag pihole:v4.0_amd64 pihole/pihole:latest
|
||||
$dry docker push pihole/pihole:latest
|
||||
fi;
|
||||
done
|
||||
63
docker-compose-jwilder-proxy.yml
Normal file
63
docker-compose-jwilder-proxy.yml
Normal file
@@ -0,0 +1,63 @@
|
||||
version: "3"
|
||||
|
||||
# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md
|
||||
|
||||
services:
|
||||
jwilder-proxy:
|
||||
image: jwilder/nginx-proxy
|
||||
ports:
|
||||
- '80:80'
|
||||
environment:
|
||||
DEFAULT_HOST: pihole.yourDomain.lan
|
||||
volumes:
|
||||
- '/var/run/docker.sock:/tmp/docker.sock'
|
||||
restart: always
|
||||
|
||||
pihole:
|
||||
image: pihole/pihole:latest
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
- 1.1.1.1
|
||||
ports:
|
||||
- '53:53/tcp'
|
||||
- '53:53/udp'
|
||||
- "67:67/udp"
|
||||
- '8053:80/tcp'
|
||||
- "443:443/tcp"
|
||||
volumes:
|
||||
- './etc-pihole/:/etc/pihole/'
|
||||
- './etc-dnsmasq.d/:/etc/dnsmasq.d/'
|
||||
# run `touch ./var-log/pihole.log` first unless you like errors
|
||||
# - './var-log/pihole.log:/var/log/pihole.log'
|
||||
# Recommended but not required (DHCP needs NET_ADMIN)
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
ServerIP: 192.168.41.55
|
||||
PROXY_LOCATION: pihole
|
||||
VIRTUAL_HOST: pihole.yourDomain.lan
|
||||
VIRTUAL_PORT: 80
|
||||
extra_hosts:
|
||||
# Resolve to nothing domains (terminate connection)
|
||||
- 'nw2master.bioware.com nwn2.master.gamespy.com:0.0.0.0'
|
||||
# LAN hostnames for other docker containers using jwilder
|
||||
- 'yourDomain.lan:192.168.41.55'
|
||||
- 'pihole pihole.yourDomain.lan:192.168.41.55'
|
||||
- 'ghost ghost.yourDomain.lan:192.168.41.55'
|
||||
- 'wordpress wordpress.yourDomain.lan:192.168.41.55'
|
||||
restart: always
|
||||
|
||||
# Another container you might want to have running through the proxy
|
||||
# Note it also have ENV Vars like pihole and a host under pihole's extra_hosts
|
||||
# ghost:
|
||||
# image: fractalf/ghost
|
||||
# ports:
|
||||
# - '2368:2368/tcp'
|
||||
# volumes:
|
||||
# - '/etc/ghost/:/ghost-override'
|
||||
# environment:
|
||||
# PROXY_LOCATION: ghost
|
||||
# VIRTUAL_HOST: ghost.yourDomain.lan
|
||||
# VIRTUAL_PORT: 2368
|
||||
# restart: always
|
||||
@@ -1,25 +1,25 @@
|
||||
Please note the following about this [traefik](https://traefik.io/) example for pihole.
|
||||
Please note the following about this [traefik](https://traefik.io/) example for Docker Pi-hole
|
||||
|
||||
- Still requires standard pi-hole setup steps, make sure you've gone through the [README](https://github.com/pihole/docker-pi-hole/blob/master/README.md) and understand how to setup pihole without traefik first
|
||||
- Still requires standard Pi-hole setup steps, make sure you've gone through the [README](https://github.com/pihole/docker-pi-hole/blob/master/README.md) and understand how to setup Pi-hole without traefik first
|
||||
- Update these things before using:
|
||||
- set instances of `homedomain.lan` below to your home domain (typically set in your router)
|
||||
- set your pihole ENV WEBPASSWORD if you don't want a random admin pass
|
||||
- set your Pi-hole ENV WEBPASSWORD if you don't want a random admin pass
|
||||
- This works for me, Your mileage may vary!
|
||||
- For support, do your best to figure out traefik issues on your own:
|
||||
- by looking at logs and traefik web interface on port 8080
|
||||
- also by searching the web and searching their forums/docker issues for similar question/problems
|
||||
- Port 8053 is mapped directly to pihole to serve as a back door without going through traefik
|
||||
- Port 8053 is mapped directly to Pi-hole to serve as a back door without going through traefik
|
||||
- There is some delay after starting your container before traefik forwards the HTTP traffic correctly, give it a minute
|
||||
|
||||
```
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
#
|
||||
#
|
||||
traefik:
|
||||
container_name: traefik
|
||||
domainname: homedomain.lan
|
||||
|
||||
|
||||
image: traefik
|
||||
restart: unless-stopped
|
||||
# Note I opt to whitelist certain apps for exposure to traefik instead of auto discovery
|
||||
@@ -42,18 +42,21 @@ services:
|
||||
pihole:
|
||||
container_name: pihole
|
||||
domainname: homedomain.lan
|
||||
|
||||
|
||||
image: pihole/pihole:latest
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
- 1.1.1.1
|
||||
ports:
|
||||
- '0.0.0.0:53:53/tcp'
|
||||
- '0.0.0.0:53:53/udp'
|
||||
- '0.0.0.0:67:67/udp'
|
||||
- '0.0.0.0:8053:80/tcp'
|
||||
volumes:
|
||||
# run `touch ./pihole.log` first unless you like errors
|
||||
# - ./pihole.log:/var/log/pihole.log
|
||||
- ./etc-pihole/:/etc/pihole/
|
||||
- ./etc-dnsmasqd/:/etc/dnsmasq.d/
|
||||
# run `touch ./pihole.log` first unless you like errors
|
||||
# - ./pihole.log:/var/log/pihole.log
|
||||
environment:
|
||||
ServerIP: 192.168.1.50
|
||||
PROXY_LOCATION: pihole
|
||||
@@ -103,4 +106,3 @@ traefik | time="2018-03-07T18:57:42Z" level=info msg="Server configuration re
|
||||
```
|
||||
|
||||
Also your port 8080 should list the Route/Rule for pihole and backend-pihole container.
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
doco-example.yml
|
||||
32
docker-compose.yml
Normal file
32
docker-compose.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
version: "3"
|
||||
|
||||
# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md
|
||||
|
||||
services:
|
||||
pihole:
|
||||
container_name: pihole
|
||||
image: pihole/pihole:latest
|
||||
# For DHCP it is recommended to remove these ports and instead add: network_mode: "host"
|
||||
ports:
|
||||
- "53:53/tcp"
|
||||
- "53:53/udp"
|
||||
- "67:67/udp"
|
||||
- "80:80/tcp"
|
||||
- "443:443/tcp"
|
||||
environment:
|
||||
TZ: 'America/Chicago'
|
||||
# WEBPASSWORD: 'set a secure password here or it will be random'
|
||||
# Volumes store your data between container upgrades
|
||||
volumes:
|
||||
- './etc-pihole/:/etc/pihole/'
|
||||
- './etc-dnsmasq.d/:/etc/dnsmasq.d/'
|
||||
# run `touch ./var-log/pihole.log` first unless you like errors
|
||||
# - './var-log/pihole.log:/var/log/pihole.log'
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
- 1.1.1.1
|
||||
# Recommended but not required (DHCP needs NET_ADMIN)
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
restart: unless-stopped
|
||||
@@ -1,30 +1,32 @@
|
||||
#!/bin/bash
|
||||
# Lookups may not work for VPN / tun0
|
||||
IP_LOOKUP="$(ip route get 8.8.8.8 | awk '{ print $NF; exit }')"
|
||||
IPv6_LOOKUP="$(ip -6 route get 2001:4860:4860::8888 | awk '{for(i=1;i<=NF;i++) if ($i=="src") print $(i+1)}')"
|
||||
|
||||
# Just hard code these to your docker server's LAN IP if lookups aren't working
|
||||
IP="${IP:-$IP_LOOKUP}" # use $IP, if set, otherwise IP_LOOKUP
|
||||
IPv6="${IPv6:-$IPv6_LOOKUP}" # use $IPv6, if set, otherwise IP_LOOKUP
|
||||
# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md
|
||||
|
||||
# Default of directory you run this from, update to where ever.
|
||||
DOCKER_CONFIGS="$(pwd)"
|
||||
|
||||
echo "### Make sure your IPs are correct, hard code ServerIP ENV VARs if necessary\nIP: ${IP}\nIPv6: ${IPv6}"
|
||||
|
||||
# Default ports + daemonized docker container
|
||||
docker run -d \
|
||||
--name pihole \
|
||||
-p 53:53/tcp -p 53:53/udp \
|
||||
-p 67:67/udp \
|
||||
-p 80:80 \
|
||||
-p 443:443 \
|
||||
-v "${DOCKER_CONFIGS}/pihole/:/etc/pihole/" \
|
||||
-v "${DOCKER_CONFIGS}/dnsmasq.d/:/etc/dnsmasq.d/" \
|
||||
-e ServerIP="${IP}" \
|
||||
-e ServerIPv6="${IPv6}" \
|
||||
-e TZ="America/Chicago" \
|
||||
-v "$(pwd)/etc-pihole/:/etc/pihole/" \
|
||||
-v "$(pwd)/etc-dnsmasq.d/:/etc/dnsmasq.d/" \
|
||||
--dns=127.0.0.1 --dns=1.1.1.1 \
|
||||
--restart=unless-stopped \
|
||||
pihole/pihole:latest
|
||||
|
||||
echo -n "Your password for https://${IP}/admin/ is "
|
||||
docker logs pihole 2> /dev/null | grep 'password:'
|
||||
printf 'Starting up pihole container '
|
||||
for i in $(seq 1 20); do
|
||||
if [ "$(docker inspect -f "{{.State.Health.Status}}" pihole)" == "healthy" ] ; then
|
||||
printf ' OK'
|
||||
echo -e "\n$(docker logs pihole 2> /dev/null | grep 'password:') for your pi-hole: https://${IP}/admin/"
|
||||
exit 0
|
||||
else
|
||||
sleep 3
|
||||
printf '.'
|
||||
fi
|
||||
|
||||
if [ $i -eq 20 ] ; then
|
||||
echo -e "\nTimed out waiting for Pi-hole start, consult check your container logs for more info (\`docker logs pihole\`)"
|
||||
exit 1
|
||||
fi
|
||||
done;
|
||||
|
||||
@@ -1,30 +0,0 @@
|
||||
version: "3"
|
||||
services:
|
||||
pihole:
|
||||
image: pihole/pihole:latest
|
||||
ports:
|
||||
- "53:53/tcp"
|
||||
- "53:53/udp"
|
||||
- "67:67/udp"
|
||||
- "80:80/tcp"
|
||||
- "443:443/tcp"
|
||||
environment:
|
||||
# enter your docker host IP here
|
||||
ServerIP:
|
||||
# IPv6 Address if your network supports it
|
||||
#ServerIPv6: 192.168.1.55
|
||||
# jwilder/proxy envs, see readme for more info
|
||||
PROXY_LOCATION: pihole
|
||||
VIRTUAL_HOST: pihole.yourdomain.local
|
||||
VIRTUAL_PORT: 80
|
||||
# Add your own custom hostnames you need for your domain
|
||||
extra_hosts:
|
||||
# Point any of the jwilder virtual_host addresses
|
||||
# to your docker host ip address
|
||||
- 'pihole.yourdomain.local:192.168.1.55'
|
||||
volumes:
|
||||
# - '/etc/pihole/:/etc/pihole/'
|
||||
# WARNING: if this log don't exist as a file on the host already
|
||||
# docker will try to create a directory in it's place making for lots of errors
|
||||
# - '/var/log/pihole.log:/var/log/pihole.log'
|
||||
restart: always
|
||||
1
doco-example.yml
Symbolic link
1
doco-example.yml
Symbolic link
@@ -0,0 +1 @@
|
||||
docker-compose.yml
|
||||
87
install.sh
87
install.sh
@@ -1,25 +1,29 @@
|
||||
#!/bin/bash -ex
|
||||
|
||||
mkdir -p /etc/pihole/
|
||||
mkdir -p /var/run/pihole
|
||||
# Production tags with valid web footers
|
||||
export CORE_TAG='v4.0'
|
||||
export WEB_TAG='v4.0'
|
||||
# Only use for pre-production / testing
|
||||
export USE_CUSTOM_BRANCHES=false
|
||||
export CORE_VERSION="$(cat /etc/docker-pi-hole-version)"
|
||||
export WEB_VERSION="v4.3.3"
|
||||
|
||||
if [[ $USE_CUSTOM_BRANCHES == true ]] ; then
|
||||
CORE_TAG='development'
|
||||
# Only use for pre-production / testing
|
||||
export CHECKOUT_BRANCHES=false
|
||||
# Search for release/* branch naming convention for custom checkouts
|
||||
if [[ "$CORE_VERSION" == *"release/"* ]] ; then
|
||||
CHECKOUT_BRANCHES=true
|
||||
fi
|
||||
|
||||
# Make pihole scripts fail searching for `systemctl`,
|
||||
# which fails pretty miserably in docker compared to `service`
|
||||
# For more info see docker/docker issue #7459
|
||||
which systemctl && mv "$(which systemctl)" /bin/no_systemctl
|
||||
apt-get update
|
||||
apt-get install --no-install-recommends -y curl procps ca-certificates
|
||||
curl -L -s $S6OVERLAY_RELEASE | tar xvzf - -C /
|
||||
mv /init /s6-init
|
||||
|
||||
# debconf-apt-progress seems to hang so get rid of it too
|
||||
which debconf-apt-progress && mv "$(which debconf-apt-progress)" /bin/no_debconf-apt-progress
|
||||
which debconf-apt-progress
|
||||
mv "$(which debconf-apt-progress)" /bin/no_debconf-apt-progress
|
||||
|
||||
# Get the install functions
|
||||
curl https://raw.githubusercontent.com/pi-hole/pi-hole/${CORE_TAG}/automated%20install/basic-install.sh > "$PIHOLE_INSTALL"
|
||||
curl https://raw.githubusercontent.com/pi-hole/pi-hole/${CORE_VERSION}/automated%20install/basic-install.sh > "$PIHOLE_INSTALL"
|
||||
PH_TEST=true . "${PIHOLE_INSTALL}"
|
||||
|
||||
# Preseed variables to assist with using --unattended install
|
||||
@@ -40,47 +44,70 @@ export USER=pihole
|
||||
distro_check
|
||||
|
||||
# fix permission denied to resolvconf post-inst /etc/resolv.conf moby/moby issue #1297
|
||||
apt-get -y install debconf-utils && echo resolvconf resolvconf/linkify-resolvconf boolean false | debconf-set-selections
|
||||
apt-get -y install debconf-utils
|
||||
echo resolvconf resolvconf/linkify-resolvconf boolean false | debconf-set-selections
|
||||
|
||||
# Tried this - unattended causes starting services during a build, should probably PR a flag to shut that off and switch to that
|
||||
#bash -ex "./${PIHOLE_INSTALL}" --unattended
|
||||
install_dependent_packages INSTALLER_DEPS[@]
|
||||
install_dependent_packages PIHOLE_DEPS[@]
|
||||
install_dependent_packages PIHOLE_WEB_DEPS[@]
|
||||
ln -s /bin/true /usr/local/bin/service
|
||||
bash -ex "./${PIHOLE_INSTALL}" --unattended
|
||||
rm /usr/local/bin/service
|
||||
# Old way of setting up
|
||||
#install_dependent_packages INSTALLER_DEPS[@]
|
||||
#install_dependent_packages PIHOLE_DEPS[@]
|
||||
#install_dependent_packages PIHOLE_WEB_DEPS[@]
|
||||
# IPv6 support for nc openbsd better than traditional
|
||||
apt-get install -y --force-yes netcat-openbsd
|
||||
|
||||
piholeGitUrl="${piholeGitUrl}"
|
||||
webInterfaceGitUrl="${webInterfaceGitUrl}"
|
||||
webInterfaceDir="${webInterfaceDir}"
|
||||
git clone "${piholeGitUrl}" "${PI_HOLE_LOCAL_REPO}"
|
||||
git clone "${webInterfaceGitUrl}" "${webInterfaceDir}"
|
||||
#git clone --branch "${CORE_VERSION}" --depth 1 "${piholeGitUrl}" "${PI_HOLE_LOCAL_REPO}"
|
||||
#git clone --branch "${WEB_VERSION}" --depth 1 "${webInterfaceGitUrl}" "${webInterfaceDir}"
|
||||
|
||||
tmpLog="/tmp/pihole-install.log"
|
||||
installLogLoc="${installLogLoc}"
|
||||
FTLdetect 2>&1 | tee "${tmpLog}"
|
||||
installPihole 2>&1 | tee "${tmpLog}"
|
||||
mv "${tmpLog}" /
|
||||
|
||||
if [[ $USE_CUSTOM_BRANCHES == true ]] ; then
|
||||
fetch_release_metadata() {
|
||||
local directory="$1"
|
||||
local version="$2"
|
||||
pushd "$directory"
|
||||
git fetch -t
|
||||
git remote set-branches origin '*'
|
||||
git fetch --depth 10
|
||||
git checkout master
|
||||
git reset --hard "$version"
|
||||
popd
|
||||
}
|
||||
|
||||
if [[ $CHECKOUT_BRANCHES == true ]] ; then
|
||||
ln -s /bin/true /usr/local/bin/service
|
||||
echo "$CORE_TAG" | tee /etc/pihole/ftlbranch
|
||||
echo y | bash -x pihole checkout core $CORE_TAG
|
||||
echo y | bash -x pihole checkout web $CORE_TAG
|
||||
ln -s /bin/true /usr/local/bin/update-rc.d
|
||||
echo y | bash -x pihole checkout core ${CORE_VERSION}
|
||||
echo y | bash -x pihole checkout web ${WEB_VERSION}
|
||||
echo y | bash -x pihole checkout ftl tweak/overhaul_overTime
|
||||
# If the v is forgotten: ${CORE_VERSION/v/}
|
||||
unlink /usr/local/bin/service
|
||||
unlink /usr/local/bin/update-rc.d
|
||||
else
|
||||
# Reset to our tags so version numbers get detected correctly
|
||||
pushd "${PI_HOLE_LOCAL_REPO}"; git reset --hard "${CORE_TAG}"; popd;
|
||||
pushd "${webInterfaceDir}"; git reset --hard "${WEB_TAG}"; popd;
|
||||
fetch_release_metadata "${PI_HOLE_LOCAL_REPO}" "${CORE_VERSION}"
|
||||
fetch_release_metadata "${webInterfaceDir}" "${WEB_VERSION}"
|
||||
fi
|
||||
# FTL Armel fix not in prod yet
|
||||
# Remove once https://github.com/pi-hole/pi-hole/commit/3fbb0ac8dde14b8edc1982ae3a2a021f3cf68477 is in master
|
||||
if [[ "$ARCH" == 'armel' ]]; then
|
||||
curl -o /usr/bin/pihole-FTL https://ftl.pi-hole.net/development/pihole-FTL-armel-native
|
||||
fi
|
||||
|
||||
sed -i 's/readonly //g' /opt/pihole/webpage.sh
|
||||
sed -i '/^WEBPASSWORD/d' /etc/pihole/setupVars.conf
|
||||
|
||||
# Replace the call to `updatePiholeFunc` in arg parse with new `unsupportedFunc`
|
||||
sed -i $'s/helpFunc() {/unsupportedFunc() {\\\n echo "Function not supported in Docker images"\\\n exit 0\\\n}\\\n\\\nhelpFunc() {/g' /usr/local/bin/pihole
|
||||
# Replace references to `updatePiholeFunc` with new `unsupportedFunc`
|
||||
sed -i $'s/updatePiholeFunc;;/unsupportedFunc;;/g' /usr/local/bin/pihole
|
||||
sed -i $'s/)\s*updatePiholeFunc/) unsupportedFunc/g' /usr/local/bin/pihole
|
||||
|
||||
touch /.piholeFirstBoot
|
||||
|
||||
# Fix dnsmasq in docker
|
||||
grep -q '^user=root' || echo -e '\nuser=root' >> /etc/dnsmasq.conf
|
||||
echo 'Docker install successful'
|
||||
|
||||
@@ -1,51 +0,0 @@
|
||||
applist:
|
||||
image: jwilder/nginx-proxy
|
||||
ports:
|
||||
- '80:80'
|
||||
environment:
|
||||
DEFAULT_HOST: pihole.yourDomain.lan
|
||||
volumes:
|
||||
- '/var/run/docker.sock:/tmp/docker.sock'
|
||||
restart: always
|
||||
|
||||
pihole:
|
||||
image: pihole/pihole:latest
|
||||
ports:
|
||||
- '53:53/tcp'
|
||||
- '53:53/udp'
|
||||
- "67:67/udp"
|
||||
- '8053:80/tcp'
|
||||
- "443:443/tcp"
|
||||
volumes:
|
||||
- '/var/log/pihole.log:/var/log/pihole.log'
|
||||
#net: host
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
ServerIP: 192.168.41.55
|
||||
PROXY_LOCATION: pihole
|
||||
VIRTUAL_HOST: pihole.yourDomain.lan
|
||||
VIRTUAL_PORT: 80
|
||||
extra_hosts:
|
||||
# Resolve to nothing domains (terminate connection)
|
||||
- 'nw2master.bioware.com nwn2.master.gamespy.com:0.0.0.0'
|
||||
# LAN hostnames for other docker containers using jwilder
|
||||
- 'yourDomain.lan:192.168.41.55'
|
||||
- 'pihole pihole.yourDomain.lan:192.168.41.55'
|
||||
- 'ghost ghost.yourDomain.lan:192.168.41.55'
|
||||
- 'wordpress wordpress.yourDomain.lan:192.168.41.55'
|
||||
restart: always
|
||||
|
||||
# Another container you might want to have running through the proxy
|
||||
# Note it also have ENV Vars like pihole and a host under pihole's extra_hosts
|
||||
#ghost:
|
||||
# image: fractalf/ghost
|
||||
# ports:
|
||||
# - '2368:2368/tcp'
|
||||
# volumes:
|
||||
# - '/etc/ghost/:/ghost-override'
|
||||
# environment:
|
||||
# PROXY_LOCATION: ghost
|
||||
# VIRTUAL_HOST: ghost.yourDomain.lan
|
||||
# VIRTUAL_PORT: 2368
|
||||
# restart: always
|
||||
@@ -1,7 +1,53 @@
|
||||
docker-compose
|
||||
jinja2
|
||||
pytest
|
||||
pytest-cov
|
||||
pytest-xdist
|
||||
testinfra==1.5.1
|
||||
tox
|
||||
apipkg==1.5
|
||||
atomicwrites==1.3.0
|
||||
attrs==19.3.0
|
||||
backports.shutil-get-terminal-size==1.0.0
|
||||
backports.ssl-match-hostname==3.7.0.1
|
||||
bcrypt==3.1.7
|
||||
cached-property==1.5.1
|
||||
certifi==2019.11.28
|
||||
cffi==1.13.2
|
||||
chardet==3.0.4
|
||||
configparser==4.0.2
|
||||
contextlib2==0.6.0.post1
|
||||
coverage==5.0.1
|
||||
cryptography==2.8
|
||||
docker==4.1.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
enum34==1.1.6
|
||||
execnet==1.7.1
|
||||
filelock==3.0.12
|
||||
funcsigs==1.0.2
|
||||
idna==2.8
|
||||
importlib-metadata==1.3.0
|
||||
ipaddress==1.0.23
|
||||
Jinja2==2.10.3
|
||||
jsonschema==3.2.0
|
||||
MarkupSafe==1.1.1
|
||||
more-itertools==5.0.0
|
||||
packaging==19.2
|
||||
pathlib2==2.3.5
|
||||
pluggy==0.13.1
|
||||
py==1.8.1
|
||||
pycparser==2.19
|
||||
pyparsing==2.4.6
|
||||
pyrsistent==0.15.6
|
||||
pytest==4.6.8
|
||||
pytest-cov==2.8.1
|
||||
pytest-forked==1.1.3
|
||||
pytest-xdist==1.31.0
|
||||
PyYAML==5.2
|
||||
requests==2.22.0
|
||||
scandir==1.10.0
|
||||
six==1.13.0
|
||||
subprocess32==3.5.4
|
||||
testinfra==3.3.0
|
||||
texttable==1.6.2
|
||||
toml==0.10.0
|
||||
tox==3.14.3
|
||||
urllib3==1.25.7
|
||||
virtualenv==16.7.9
|
||||
wcwidth==0.1.7
|
||||
websocket-client==0.57.0
|
||||
zipp==0.6.0
|
||||
|
||||
@@ -10,6 +10,10 @@ fi
|
||||
# used to start dnsmasq here for gravity to use...now that conflicts port 53
|
||||
|
||||
$bashCmd /start.sh
|
||||
# Gotta go fast, no time for gravity
|
||||
if [ -n "$PYTEST" ]; then
|
||||
sed -i 's/^gravity_spinup$/#gravity_spinup # DISABLED FOR PYTEST/g' "$(which gravity.sh)"
|
||||
fi
|
||||
gravity.sh
|
||||
|
||||
# Kill dnsmasq because s6 won't like it if it's running when s6 services start
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Starting pihole-FTL ($FTL_CMD)"
|
||||
s6-setuidgid root pihole-FTL $FTL_CMD
|
||||
s6-echo "Starting pihole-FTL ($FTL_CMD) as ${DNSMASQ_USER}"
|
||||
s6-setuidgid ${DNSMASQ_USER} pihole-FTL $FTL_CMD >/dev/null 2>&1
|
||||
|
||||
# Notes on above:
|
||||
# - DNSMASQ_USER default of root is in Dockerfile & can be overwritten by runtime container env
|
||||
# - /var/log/pihole*.log has FTL's output that no-daemon would normally print in FG too
|
||||
# prevent duplicating it in docker logs by sending to dev null
|
||||
|
||||
46
start.sh
46
start.sh
@@ -10,29 +10,53 @@ export HOSTNAME
|
||||
export WEBLOGDIR
|
||||
export DNS1
|
||||
export DNS2
|
||||
export DNSSEC
|
||||
export DNS_BOGUS_PRIV
|
||||
export DNS_FQDN_REQUIRED
|
||||
export INTERFACE
|
||||
export DNSMASQ_LISTENING_BEHAVIOUR="$DNSMASQ_LISTENING"
|
||||
export IPv6
|
||||
export WEB_PORT
|
||||
export PLAINWEBPASSWORD="$WEBPASSWORD"
|
||||
export CONDITIONAL_FORWARDING
|
||||
export CONDITIONAL_FORWARDING_IP
|
||||
export CONDITIONAL_FORWARDING_DOMAIN
|
||||
export CONDITIONAL_FORWARDING_REVERSE
|
||||
|
||||
export adlistFile='/etc/pihole/adlists.list'
|
||||
|
||||
# The below functions are all contained in bash_functions.sh
|
||||
. /bash_functions.sh
|
||||
|
||||
# Some of the bash_functions use variables these core pi-hole/web scripts
|
||||
. /opt/pihole/webpage.sh
|
||||
# PH_TEST prevents the install from actually running (someone should rename that)
|
||||
PH_TEST=true . $PIHOLE_INSTALL
|
||||
|
||||
echo " ::: Starting docker specific setup for docker pihole/pihole"
|
||||
echo " ::: Starting docker specific checks & setup for docker pihole/pihole"
|
||||
|
||||
docker_checks
|
||||
|
||||
# TODO:
|
||||
#if [ ! -f /.piholeFirstBoot ] ; then
|
||||
# echo " ::: Not first container startup so not running docker's setup, re-create container to run setup again"
|
||||
#else
|
||||
# regular_setup_functions
|
||||
#fi
|
||||
|
||||
fix_capabilities
|
||||
generate_password
|
||||
validate_env || exit 1
|
||||
prepare_configs
|
||||
change_setting "IPV4_ADDRESS" "$ServerIP"
|
||||
change_setting "IPV6_ADDRESS" "$ServerIPv6"
|
||||
change_setting "DNS_BOGUS_PRIV" "$DNS_BOGUS_PRIV"
|
||||
change_setting "DNS_FQDN_REQUIRED" "$DNS_FQDN_REQUIRED"
|
||||
change_setting "DNSSEC" "$DNSSEC"
|
||||
change_setting "CONDITIONAL_FORWARDING" "$CONDITIONAL_FORWARDING"
|
||||
change_setting "CONDITIONAL_FORWARDING_IP" "$CONDITIONAL_FORWARDING_IP"
|
||||
change_setting "CONDITIONAL_FORWARDING_DOMAIN" "$CONDITIONAL_FORWARDING_DOMAIN"
|
||||
change_setting "CONDITIONAL_FORWARDING_REVERSE" "$CONDITIONAL_FORWARDING_REVERSE"
|
||||
setup_web_port "$WEB_PORT"
|
||||
setup_web_password "$PLAINWEBPASSWORD"
|
||||
setup_dnsmasq "$DNS1" "$DNS2" "$INTERFACE"
|
||||
setup_web_password "$WEBPASSWORD"
|
||||
setup_dnsmasq "$DNS1" "$DNS2" "$INTERFACE" "$DNSMASQ_LISTENING_BEHAVIOUR"
|
||||
setup_php_env
|
||||
setup_dnsmasq_hostnames "$ServerIP" "$ServerIPv6" "$HOSTNAME"
|
||||
setup_ipv4_ipv6
|
||||
@@ -43,13 +67,3 @@ test_configs
|
||||
[ -f /.piholeFirstBoot ] && rm /.piholeFirstBoot
|
||||
|
||||
echo " ::: Docker start setup complete"
|
||||
|
||||
echo """
|
||||
:: ::: ::: ::: ::: ::: ::: ::: ::: :::
|
||||
:: Image moved / deprecation notice
|
||||
:: OLD IMAGE : diginc/pi-hole
|
||||
:: NEW IMAGE : pihole/pihole
|
||||
:: In order to get the latest updates
|
||||
:: please update your image references
|
||||
:: ::: ::: ::: ::: ::: ::: ::: ::: :::
|
||||
"""
|
||||
|
||||
144
test/conftest.py
144
test/conftest.py
@@ -1,16 +1,49 @@
|
||||
|
||||
import functools
|
||||
import os
|
||||
import pytest
|
||||
import testinfra
|
||||
import types
|
||||
|
||||
check_output = testinfra.get_backend(
|
||||
"local://"
|
||||
).get_module("Command").check_output
|
||||
local_host = testinfra.get_host('local://')
|
||||
check_output = local_host.check_output
|
||||
|
||||
def DockerGeneric(request, args, image, cmd, entrypoint=''):
|
||||
assert 'docker' in check_output('id'), "Are you in the docker group?"
|
||||
if 'pihole' in image:
|
||||
args += " --dns 127.0.0.1 -v /dev/null:/etc/pihole/adlists.default -e PYTEST=\"True\""
|
||||
docker_run = "docker run -d {args} {entry} {image} {cmd}".format(args=args, entry=entrypoint, image=image, cmd=cmd)
|
||||
print docker_run
|
||||
__version__ = None
|
||||
dotdot = os.path.abspath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir))
|
||||
with open('{}/VERSION'.format(dotdot), 'r') as v:
|
||||
raw_version = v.read().strip()
|
||||
__version__ = raw_version.replace('release/', 'release-')
|
||||
|
||||
@pytest.fixture()
|
||||
def args_dns():
|
||||
return '--dns 127.0.0.1 --dns 1.1.1.1'
|
||||
|
||||
@pytest.fixture()
|
||||
def args_volumes():
|
||||
return '-v /dev/null:/etc/pihole/adlists.list'
|
||||
|
||||
@pytest.fixture()
|
||||
def args_env():
|
||||
return '-e ServerIP="127.0.0.1"'
|
||||
|
||||
@pytest.fixture()
|
||||
def args(args_dns, args_volumes, args_env):
|
||||
return "{} {} {}".format(args_dns, args_volumes, args_env)
|
||||
|
||||
@pytest.fixture()
|
||||
def test_args():
|
||||
''' test override fixture to provide arguments seperate from our core args '''
|
||||
return ''
|
||||
|
||||
def DockerGeneric(request, _test_args, _args, _image, _cmd, _entrypoint):
|
||||
#assert 'docker' in check_output('id'), "Are you in the docker group?"
|
||||
# Always appended PYTEST arg to tell pihole we're testing
|
||||
if 'pihole' in _image and 'PYTEST=1' not in _args:
|
||||
_args = '{} -e PYTEST=1'.format(_args)
|
||||
docker_run = 'docker run -d -t {args} {test_args} {entry} {image} {cmd}'\
|
||||
.format(args=_args, test_args=_test_args, entry=_entrypoint, image=_image, cmd=_cmd)
|
||||
# Print a human runable version of the container run command for faster debugging
|
||||
print(docker_run.replace('-d -t', '--rm -it').replace('tail -f /dev/null', 'bash'))
|
||||
docker_id = check_output(docker_run)
|
||||
|
||||
def teardown():
|
||||
@@ -18,36 +51,22 @@ def DockerGeneric(request, args, image, cmd, entrypoint=''):
|
||||
check_output("docker rm -f {}".format(docker_id))
|
||||
request.addfinalizer(teardown)
|
||||
|
||||
docker_container = testinfra.get_backend("docker://" + docker_id)
|
||||
docker_container = testinfra.backend.get_backend("docker://" + docker_id, sudo=False)
|
||||
docker_container.id = docker_id
|
||||
|
||||
def run_bash(self, command, *args, **kwargs):
|
||||
cmd = self.get_command(command, *args)
|
||||
if self.user is not None:
|
||||
out = self.run_local(
|
||||
"docker exec -u %s %s /bin/bash -c %s",
|
||||
self.user, self.name, cmd)
|
||||
else:
|
||||
out = self.run_local(
|
||||
"docker exec %s /bin/bash -c %s", self.name, cmd)
|
||||
out.command = self.encode(cmd)
|
||||
return out
|
||||
|
||||
funcType = type(docker_container.run)
|
||||
docker_container.run = funcType(run_bash, docker_container, testinfra.backend.docker.DockerBackend)
|
||||
return docker_container
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def Docker(request, args, image, cmd, entrypoint):
|
||||
def Docker(request, test_args, args, image, cmd, entrypoint):
|
||||
''' One-off Docker container run '''
|
||||
return DockerGeneric(request, args, image, cmd, entrypoint)
|
||||
return DockerGeneric(request, test_args, args, image, cmd, entrypoint)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def DockerPersist(request, persist_args, persist_image, persist_cmd, Dig):
|
||||
def DockerPersist(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint, Dig):
|
||||
''' Persistent Docker container for multiple tests, instead of stopping container after one test '''
|
||||
''' Uses DUP'd module scoped fixtures because smaller scoped fixtures won't mix with module scope '''
|
||||
persistent_container = DockerGeneric(request, persist_args, persist_image, persist_cmd)
|
||||
persistent_container = DockerGeneric(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint)
|
||||
''' attach a dig conatiner for lookups '''
|
||||
persistent_container.dig = Dig(persistent_container.id)
|
||||
return persistent_container
|
||||
@@ -56,69 +75,84 @@ def DockerPersist(request, persist_args, persist_image, persist_cmd, Dig):
|
||||
def entrypoint():
|
||||
return ''
|
||||
|
||||
@pytest.fixture()
|
||||
def args(request):
|
||||
return '-e ServerIP="127.0.0.1" -e ServerIPv6="::1"'
|
||||
|
||||
@pytest.fixture(params=['amd64', 'armel', 'armhf', 'aarch64'])
|
||||
@pytest.fixture(params=['amd64', 'armhf', 'arm64', 'armel'])
|
||||
def arch(request):
|
||||
return request.param
|
||||
|
||||
@pytest.fixture()
|
||||
def version(request):
|
||||
''' TODO: include from external .py that can be shared with Dockerfile.py / Tests / deploy scripts '''
|
||||
return 'v4.0'
|
||||
def version():
|
||||
return __version__
|
||||
|
||||
@pytest.fixture()
|
||||
def tag(request, version, arch):
|
||||
def tag(version, arch):
|
||||
return '{}_{}'.format(version, arch)
|
||||
|
||||
@pytest.fixture
|
||||
def webserver(request, tag):
|
||||
def webserver(tag):
|
||||
''' TODO: this is obvious without alpine+nginx as the alternative, remove fixture, hard code lighttpd in tests? '''
|
||||
return 'lighttpd'
|
||||
|
||||
@pytest.fixture()
|
||||
def image(request, tag):
|
||||
def image(tag):
|
||||
image = 'pihole'
|
||||
return '{}:{}'.format(image, tag)
|
||||
|
||||
@pytest.fixture()
|
||||
def cmd(request):
|
||||
def cmd():
|
||||
return 'tail -f /dev/null'
|
||||
|
||||
@pytest.fixture(scope='module', params=['amd64'])
|
||||
def persist_arch(request):
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_arch():
|
||||
'''amd64 only, dnsmasq/pihole-FTL(?untested?) will not start under qemu-user-static :('''
|
||||
return request.param
|
||||
return 'amd64'
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_version(request):
|
||||
''' TODO: include from external .py that can be shared with Dockerfile.py / Tests / deploy scripts '''
|
||||
return 'v4.0'
|
||||
def persist_version():
|
||||
return __version__
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_args(request):
|
||||
return '-e ServerIP="127.0.0.1" -e ServerIPv6="::1"'
|
||||
def persist_args_dns():
|
||||
return '--dns 127.0.0.1 --dns 1.1.1.1'
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_tag(request, persist_version, persist_arch):
|
||||
def persist_args_volumes():
|
||||
return '-v /dev/null:/etc/pihole/adlists.list'
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_args_env():
|
||||
return '-e ServerIP="127.0.0.1"'
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_args(persist_args_dns, persist_args_volumes, persist_args_env):
|
||||
return "{} {} {}".format(args_dns, args_volumes, args_env)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_test_args():
|
||||
''' test override fixture to provide arguments seperate from our core args '''
|
||||
return ''
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_tag(persist_version, persist_arch):
|
||||
return '{}_{}'.format(persist_version, persist_arch)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_webserver(request, persist_tag):
|
||||
def persist_webserver(persist_tag):
|
||||
''' TODO: this is obvious without alpine+nginx as the alternative, remove fixture, hard code lighttpd in tests? '''
|
||||
return 'lighttpd'
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_image(request, persist_tag):
|
||||
def persist_image(persist_tag):
|
||||
image = 'pihole'
|
||||
return '{}:{}'.format(image, persist_tag)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_cmd(request):
|
||||
def persist_cmd():
|
||||
return 'tail -f /dev/null'
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_entrypoint():
|
||||
return ''
|
||||
|
||||
@pytest.fixture
|
||||
def Slow():
|
||||
"""
|
||||
@@ -130,7 +164,7 @@ def Slow():
|
||||
while True:
|
||||
try:
|
||||
assert check()
|
||||
except AssertionError, e:
|
||||
except AssertionError as e:
|
||||
if time.time() < timeout_at:
|
||||
time.sleep(1)
|
||||
else:
|
||||
@@ -140,14 +174,14 @@ def Slow():
|
||||
return slow
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def Dig(request):
|
||||
def Dig():
|
||||
''' separate container to link to pi-hole and perform lookups '''
|
||||
''' a docker pull is faster than running an install of dnsutils '''
|
||||
def dig(docker_id):
|
||||
args = '--link {}:test_pihole'.format(docker_id)
|
||||
image = 'azukiapp/dig'
|
||||
cmd = 'tail -f /dev/null'
|
||||
dig_container = DockerGeneric(request, args, image, cmd)
|
||||
dig_container = DockerGeneric(request, '', args, image, cmd, '')
|
||||
return dig_container
|
||||
return dig
|
||||
|
||||
|
||||
@@ -1,26 +1,33 @@
|
||||
|
||||
import os
|
||||
import pytest
|
||||
import re
|
||||
|
||||
DEFAULTARGS = '-e ServerIP="127.0.0.1" '
|
||||
|
||||
@pytest.mark.parametrize('args,expected_ipv6,expected_stdout', [
|
||||
(DEFAULTARGS, True, 'IPv4 and IPv6'),
|
||||
(DEFAULTARGS + '-e "IPv6=True"', True, 'IPv4 and IPv6'),
|
||||
(DEFAULTARGS + '-e "IPv6=False"', False, 'IPv4'),
|
||||
(DEFAULTARGS + '-e "IPv6=foobar"', False, 'IPv4'),
|
||||
@pytest.mark.parametrize('test_args,expected_ipv6,expected_stdout', [
|
||||
('', True, 'IPv4 and IPv6'),
|
||||
('-e "IPv6=True"', True, 'IPv4 and IPv6'),
|
||||
('-e "IPv6=False"', False, 'IPv4'),
|
||||
('-e "IPv6=foobar"', False, 'IPv4'),
|
||||
])
|
||||
def test_IPv6_not_True_removes_ipv6(Docker, args, expected_ipv6, expected_stdout):
|
||||
def test_IPv6_not_True_removes_ipv6(Docker, Slow, test_args, expected_ipv6, expected_stdout):
|
||||
''' When a user overrides IPv6=True they only get IPv4 listening webservers '''
|
||||
IPV6_LINE = 'use-ipv6.pl'
|
||||
WEB_CONFIG = '/etc/lighttpd/lighttpd.conf'
|
||||
|
||||
function = Docker.run('. /bash_functions.sh ; setup_ipv4_ipv6')
|
||||
assert "Using {}".format(expected_stdout) in function.stdout
|
||||
config = Docker.run('cat {}'.format(WEB_CONFIG)).stdout
|
||||
assert (IPV6_LINE in config) == expected_ipv6
|
||||
if expected_stdout == 'IPv4':
|
||||
assert 'IPv6' not in function.stdout
|
||||
# On overlay2(?) docker sometimes writes to disk are slow enough to break some tests...
|
||||
expected_ipv6_check = lambda: (\
|
||||
IPV6_LINE in Docker.run('grep \'use-ipv6.pl\' {}'.format(WEB_CONFIG)).stdout
|
||||
) == expected_ipv6
|
||||
Slow(expected_ipv6_check)
|
||||
|
||||
@pytest.mark.parametrize('args', [DEFAULTARGS + '-e "WEB_PORT=999"'])
|
||||
def test_overrides_default_WEB_PORT(Docker, args):
|
||||
|
||||
@pytest.mark.parametrize('test_args', ['-e "WEB_PORT=999"'])
|
||||
def test_overrides_default_WEB_PORT(Docker, Slow, test_args):
|
||||
''' When a --net=host user sets WEB_PORT to avoid synology's 80 default IPv4 and or IPv6 ports are updated'''
|
||||
CONFIG_LINE = 'server.port\s*=\s*999'
|
||||
WEB_CONFIG = '/etc/lighttpd/lighttpd.conf'
|
||||
@@ -28,98 +35,106 @@ def test_overrides_default_WEB_PORT(Docker, args):
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_port /start.sh`')
|
||||
assert "Custom WEB_PORT set to 999" in function.stdout
|
||||
assert "INFO: Without proper router DNAT forwarding to 127.0.0.1:999, you may not get any blocked websites on ads" in function.stdout
|
||||
config = Docker.run('cat {}'.format(WEB_CONFIG)).stdout
|
||||
assert re.search(CONFIG_LINE, config) != None
|
||||
Slow(lambda: re.search(CONFIG_LINE, Docker.run('cat {}'.format(WEB_CONFIG)).stdout) != None)
|
||||
Slow(lambda: re.search('://127.0.0.1:999/', Docker.run('cat /var/www/html/pihole/index.php').stdout) != None)
|
||||
# grep fails to find any of the old address w/o port
|
||||
assert Docker.run('grep -rq "://127.0.0.1/" /var/www/html/').rc == 1
|
||||
assert Docker.run('grep -rq "://pi.hole/" /var/www/html/').rc == 1
|
||||
# Find at least one instance of our changes
|
||||
# upstream repos determines how many and I don't want to keep updating this test
|
||||
assert int(Docker.run('grep -rl "://127.0.0.1:999/" /var/www/html/ | wc -l').stdout) >= 1
|
||||
assert int(Docker.run('grep -rl "://pi.hole:999/" /var/www/html/ | wc -l').stdout) >= 1
|
||||
#assert Docker.run('grep -r "://127.0.0.1/" /var/www/html/').stdout == ''
|
||||
#assert Docker.run('grep -r "://pi.hole/" /var/www/html/').stdout == ''
|
||||
## Find at least one instance of our changes
|
||||
## upstream repos determines how many and I don't want to keep updating this test
|
||||
#assert int(Docker.run('grep -rl "://127.0.0.1:999/" /var/www/html/ | wc -l').stdout) >= 1
|
||||
#assert int(Docker.run('grep -rl "://pi.hole:999/" /var/www/html/ | wc -l').stdout) >= 1
|
||||
|
||||
@pytest.mark.parametrize('args,expected_error', [
|
||||
(DEFAULTARGS + '-e WEB_PORT="LXXX"', 'WARNING: Custom WEB_PORT not used - LXXX is not an integer'),
|
||||
(DEFAULTARGS + '-e WEB_PORT="1,000"', 'WARNING: Custom WEB_PORT not used - 1,000 is not an integer'),
|
||||
(DEFAULTARGS + '-e WEB_PORT="99999"', 'WARNING: Custom WEB_PORT not used - 99999 is not within valid port range of 1-65535'),
|
||||
|
||||
@pytest.mark.parametrize('test_args,expected_error', [
|
||||
('-e WEB_PORT="LXXX"', 'WARNING: Custom WEB_PORT not used - LXXX is not an integer'),
|
||||
('-e WEB_PORT="1,000"', 'WARNING: Custom WEB_PORT not used - 1,000 is not an integer'),
|
||||
('-e WEB_PORT="99999"', 'WARNING: Custom WEB_PORT not used - 99999 is not within valid port range of 1-65535'),
|
||||
])
|
||||
def test_bad_input_to_WEB_PORT(Docker, args, expected_error):
|
||||
def test_bad_input_to_WEB_PORT(Docker, test_args, expected_error):
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_port /start.sh`')
|
||||
assert expected_error in function.stdout
|
||||
|
||||
|
||||
# DNS Environment Variable behavior in combinations of modified pihole LTE settings
|
||||
@pytest.mark.parametrize('args, expected_stdout, dns1, dns2', [
|
||||
('-e ServerIP="1.2.3.4"', 'default DNS', '8.8.8.8', '8.8.4.4' ),
|
||||
('-e ServerIP="1.2.3.4" -e DNS1="1.2.3.4"', 'custom DNS', '1.2.3.4', '8.8.4.4' ),
|
||||
('-e ServerIP="1.2.3.4" -e DNS2="1.2.3.4"', 'custom DNS', '8.8.8.8', '1.2.3.4' ),
|
||||
('-e ServerIP="1.2.3.4" -e DNS1="1.2.3.4" -e DNS2="2.2.3.4"', 'custom DNS', '1.2.3.4', '2.2.3.4' ),
|
||||
('-e ServerIP="1.2.3.4" -e DNS1="1.2.3.4" -e DNS2="no"', 'custom DNS', '1.2.3.4', None ),
|
||||
('-e ServerIP="1.2.3.4" -e DNS2="no"', 'custom DNS', '8.8.8.8', None ),
|
||||
@pytest.mark.parametrize('args_env, expected_stdout, dns1, dns2', [
|
||||
('', 'default DNS', '8.8.8.8', '8.8.4.4' ),
|
||||
('-e DNS1="1.2.3.4"', 'custom DNS', '1.2.3.4', '8.8.4.4' ),
|
||||
('-e DNS2="1.2.3.4"', 'custom DNS', '8.8.8.8', '1.2.3.4' ),
|
||||
('-e DNS1="1.2.3.4" -e DNS2="2.2.3.4"', 'custom DNS', '1.2.3.4', '2.2.3.4' ),
|
||||
('-e DNS1="1.2.3.4" -e DNS2="no"', 'custom DNS', '1.2.3.4', None ),
|
||||
('-e DNS2="no"', 'custom DNS', '8.8.8.8', None ),
|
||||
])
|
||||
def test_override_default_servers_with_DNS_EnvVars(Docker, args, expected_stdout, dns1, dns2):
|
||||
def test_override_default_servers_with_DNS_EnvVars(Docker, Slow, args_env, expected_stdout, dns1, dns2):
|
||||
''' on first boot when DNS vars are NOT set explain default google DNS settings are used
|
||||
or when DNS vars are set override the pihole DNS settings '''
|
||||
assert Docker.run('test -f /.piholeFirstBoot').rc == 0
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep setup_dnsmasq /start.sh`')
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep "^setup_dnsmasq " /start.sh`')
|
||||
assert expected_stdout in function.stdout
|
||||
|
||||
docker_dns_servers = Docker.run('grep "^server=" /etc/dnsmasq.d/01-pihole.conf').stdout
|
||||
expected_servers = 'server={}\n'.format(dns1) if dns2 == None else 'server={}\nserver={}\n'.format(dns1, dns2)
|
||||
assert expected_servers == docker_dns_servers
|
||||
Slow(lambda: expected_servers == Docker.run('grep "^server=[^/]" /etc/dnsmasq.d/01-pihole.conf').stdout)
|
||||
|
||||
@pytest.mark.parametrize('args, dns1, dns2, expected_stdout', [
|
||||
('-e ServerIP="1.2.3.4"', '9.9.9.1', '9.9.9.2',
|
||||
|
||||
@pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
|
||||
reason="Can't get setupVar setup to work on travis")
|
||||
@pytest.mark.parametrize('args_env, dns1, dns2, expected_stdout', [
|
||||
|
||||
('', '9.9.9.1', '9.9.9.2',
|
||||
'Existing DNS servers used'),
|
||||
('-e ServerIP="1.2.3.4" -e DNS1="1.2.3.4"', '9.9.9.1', '9.9.9.2',
|
||||
'Docker DNS variables not used\nExisting DNS servers used'),
|
||||
('-e ServerIP="1.2.3.4" -e DNS2="1.2.3.4"', '8.8.8.8', '1.2.3.4',
|
||||
'Docker DNS variables not used\nExisting DNS servers used'),
|
||||
('-e ServerIP="1.2.3.4" -e DNS1="1.2.3.4" -e DNS2="2.2.3.4"', '1.2.3.4', '2.2.3.4',
|
||||
'Docker DNS variables not used\nExisting DNS servers used'),
|
||||
('-e DNS1="1.2.3.4"', '9.9.9.1', '9.9.9.2',
|
||||
'Docker DNS variables not used\nExisting DNS servers used (9.9.9.1 & 9.9.9.2)'),
|
||||
('-e DNS2="1.2.3.4"', '8.8.8.8', None,
|
||||
'Docker DNS variables not used\nExisting DNS servers used (8.8.8.8 & unset)'),
|
||||
('-e DNS1="1.2.3.4" -e DNS2="2.2.3.4"', '1.2.3.4', '2.2.3.4',
|
||||
'Docker DNS variables not used\nExisting DNS servers used (1.2.3.4 & 2.2.3.4'),
|
||||
])
|
||||
def test_DNS_Envs_are_secondary_to_setupvars(Docker, args, expected_stdout, dns1, dns2):
|
||||
def test_DNS_Envs_are_secondary_to_setupvars(Docker, Slow, args_env, expected_stdout, dns1, dns2):
|
||||
''' on second boot when DNS vars are set just use pihole DNS settings
|
||||
or when DNS vars and FORCE_DNS var are set override the pihole DNS settings '''
|
||||
# Given we are not booting for the first time
|
||||
assert Docker.run('rm /.piholeFirstBoot').rc == 0
|
||||
|
||||
# and a user already has custom pihole dns variables in setup vars
|
||||
dns_count = 1
|
||||
setupVars = '/etc/pihole/setupVars.conf'
|
||||
Docker.run('sed -i "/^PIHOLE_DNS_1/ c\PIHOLE_DNS_1={}" {}'.format(dns1, setupVars))
|
||||
Docker.run('sed -i "/^PIHOLE_DNS_2/ c\PIHOLE_DNS_2={}" {}'.format(dns2, setupVars))
|
||||
Docker.run('sed -i "/^PIHOLE_DNS/ d" {}'.format(setupVars))
|
||||
Docker.run('echo "PIHOLE_DNS_1={}" | tee -a {}'.format(dns1, setupVars))
|
||||
if dns2:
|
||||
Docker.run('echo "PIHOLE_DNS_2={}" | tee -a {}'.format(dns2, setupVars))
|
||||
Docker.run('sync {}'.format(setupVars))
|
||||
Slow(lambda: 'PIHOLE_DNS' in Docker.run('cat {}'.format(setupVars)).stdout)
|
||||
|
||||
# When we run setup dnsmasq during startup of the container
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep setup_dnsmasq /start.sh`')
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep "^setup_dnsmasq " /start.sh`')
|
||||
assert expected_stdout in function.stdout
|
||||
|
||||
expected_servers = 'server={}\nserver={}\n'.format(dns1, dns2)
|
||||
servers = Docker.run('grep "^server=" /etc/dnsmasq.d/01-pihole.conf').stdout
|
||||
searchDns1 = servers.split('\n')[0]
|
||||
searchDns2 = servers.split('\n')[1]
|
||||
|
||||
# Then the servers are still what the user had customized if forced dnsmasq is not set
|
||||
assert 'server={}'.format(dns1) == searchDns1
|
||||
assert 'server={}'.format(dns2) == searchDns2
|
||||
expected_servers = ['server={}'.format(dns1)]
|
||||
if dns2:
|
||||
expected_servers.append('server={}'.format(dns2))
|
||||
Slow(lambda: Docker.run('grep "^server=[^/]" /etc/dnsmasq.d/01-pihole.conf').stdout.strip().split('\n') == \
|
||||
expected_servers)
|
||||
|
||||
@pytest.mark.parametrize('args, expected_stdout, expected_config_line', [
|
||||
('-e ServerIP="1.2.3.4"', 'binding to default interface: eth0', 'interface=eth0' ),
|
||||
('-e ServerIP="1.2.3.4" -e INTERFACE="eth0"', 'binding to default interface: eth0', 'interface=eth0' ),
|
||||
('-e ServerIP="1.2.3.4" -e INTERFACE="br0"', 'binding to custom interface: br0', 'interface=br0'),
|
||||
|
||||
@pytest.mark.parametrize('args_env, expected_stdout, expected_config_line', [
|
||||
('', 'binding to default interface: eth0', 'interface=eth0' ),
|
||||
('-e INTERFACE="eth0"', 'binding to default interface: eth0', 'interface=eth0' ),
|
||||
('-e INTERFACE="br0"', 'binding to custom interface: br0', 'interface=br0'),
|
||||
])
|
||||
def test_DNS_interface_override_defaults(Docker, args, expected_stdout, expected_config_line):
|
||||
def test_DNS_interface_override_defaults(Docker, Slow, args_env, expected_stdout, expected_config_line):
|
||||
''' When INTERFACE environment var is passed in, overwrite dnsmasq interface '''
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep setup_dnsmasq /start.sh`')
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep "^setup_dnsmasq " /start.sh`')
|
||||
assert expected_stdout in function.stdout
|
||||
Slow(lambda: expected_config_line + '\n' == Docker.run('grep "^interface" /etc/dnsmasq.d/01-pihole.conf').stdout)
|
||||
|
||||
docker_dns_interface = Docker.run('grep "^interface" /etc/dnsmasq.d/01-pihole.conf').stdout
|
||||
assert expected_config_line + '\n' == docker_dns_interface
|
||||
|
||||
expected_debian_lines = [
|
||||
'"VIRTUAL_HOST" => "127.0.0.1"',
|
||||
'"ServerIP" => "127.0.0.1"',
|
||||
'"PHP_ERROR_LOG" => "/var/log/lighttpd/error.log"'
|
||||
]
|
||||
|
||||
|
||||
@pytest.mark.parametrize('expected_lines,repeat_function', [
|
||||
(expected_debian_lines, 1),
|
||||
(expected_debian_lines, 2)
|
||||
@@ -136,18 +151,21 @@ def test_debian_setup_php_env(Docker, expected_lines, repeat_function):
|
||||
if found_lines > 1:
|
||||
assert False, "Found line {} times (more than once): {}".format(expected_line)
|
||||
|
||||
@pytest.mark.parametrize('args,secure,setupVarsHash', [
|
||||
|
||||
def test_webPassword_random_generation(Docker):
|
||||
''' When a user sets webPassword env the admin password gets set to that '''
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep generate_password /start.sh`')
|
||||
assert 'assigning random password' in function.stdout.lower()
|
||||
|
||||
|
||||
@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')])
|
||||
@pytest.mark.parametrize('args_env,secure,setupVarsHash', [
|
||||
('-e ServerIP=1.2.3.4 -e WEBPASSWORD=login', True, 'WEBPASSWORD=6060d59351e8c2f48140f01b2c3f3b61652f396c53a5300ae239ebfbe7d5ff08'),
|
||||
('-e ServerIP=1.2.3.4 -e WEBPASSWORD=""', False, ''),
|
||||
('-e ServerIP=1.2.3.4', True, 'WEBPASSWORD='),
|
||||
])
|
||||
def test_webPassword_env_assigns_password_to_file(Docker, args, secure, setupVarsHash):
|
||||
''' When a user sets webPassword env the admin password gets set to that '''
|
||||
def test_webPassword_env_assigns_password_to_file_or_removes_if_empty(Docker, args_env, secure, setupVarsHash):
|
||||
''' When a user sets webPassword env the admin password gets set or removed if empty '''
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_password /start.sh`')
|
||||
if secure and 'WEBPASSWORD' not in args:
|
||||
assert 'assigning random password' in function.stdout.lower()
|
||||
else:
|
||||
assert 'assigning random password' not in function.stdout.lower()
|
||||
|
||||
if secure:
|
||||
assert 'new password set' in function.stdout.lower()
|
||||
@@ -155,3 +173,31 @@ def test_webPassword_env_assigns_password_to_file(Docker, args, secure, setupVar
|
||||
else:
|
||||
assert 'password removed' in function.stdout.lower()
|
||||
assert Docker.run('grep -q \'^WEBPASSWORD=$\' /etc/pihole/setupVars.conf').rc == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')])
|
||||
@pytest.mark.parametrize('test_args', ['-e WEBPASSWORD=login', '-e WEBPASSWORD=""'])
|
||||
def test_webPassword_pre_existing_trumps_all_envs(Docker, args_env, test_args):
|
||||
'''When a user setup webPassword in the volume prior to first container boot,
|
||||
during prior container boot, the prior volume password is left intact / setup skipped'''
|
||||
Docker.run('. /opt/pihole/webpage.sh ; add_setting WEBPASSWORD volumepass')
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_password /start.sh`')
|
||||
|
||||
assert '::: Pre existing WEBPASSWORD found' in function.stdout
|
||||
assert Docker.run('grep -q \'{}\' {}'.format('WEBPASSWORD=volumepass', '/etc/pihole/setupVars.conf')).rc == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize('args_dns, expected_stdout', [
|
||||
# No DNS passed will vary by the host this is ran on, bad idea for a test
|
||||
#('', 'WARNING Misconfigured DNS in /etc/resolv.conf: Primary DNS should be 127.0.0.1'),
|
||||
('--dns 1.1.1.1', 'WARNING Misconfigured DNS in /etc/resolv.conf: Two DNS servers are recommended, 127.0.0.1 and any backup server\n'
|
||||
'WARNING Misconfigured DNS in /etc/resolv.conf: Primary DNS should be 127.0.0.1 (found 1.1.1.1)'),
|
||||
('--dns 127.0.0.1', 'WARNING Misconfigured DNS in /etc/resolv.conf: Two DNS servers are recommended, 127.0.0.1 and any backup server'),
|
||||
('--dns 1.1.1.1 --dns 127.0.0.1', 'WARNING Misconfigured DNS in /etc/resolv.conf: Primary DNS should be 127.0.0.1 (found 1.1.1.1)'),
|
||||
('--dns 127.0.0.1 --dns 1.1.1.1', 'OK: Checks passed for /etc/resolv.conf DNS servers'),
|
||||
])
|
||||
def test_docker_checks_for_resolvconf_misconfiguration(Docker, args_dns, expected_stdout):
|
||||
''' The container checks for misconfigured resolv.conf '''
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep docker_checks /start.sh`')
|
||||
print(function.stdout)
|
||||
assert expected_stdout in function.stdout
|
||||
|
||||
@@ -1,14 +1,18 @@
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def start_cmd():
|
||||
''' broken by default, required override '''
|
||||
return None
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def RunningPiHole(DockerPersist, Slow, persist_webserver, persist_tag, start_cmd):
|
||||
''' Override the RunningPiHole to run and check for success of a
|
||||
pihole-FTL start based `pihole` script command '''
|
||||
pihole-FTL start based `pihole` script command
|
||||
|
||||
Individual tests all must override start_cmd'''
|
||||
#print DockerPersist.run('ps -ef').stdout
|
||||
assert DockerPersist.dig.run('ping -c 1 test_pihole').rc == 0
|
||||
Slow(lambda: DockerPersist.run('pgrep pihole-FTL').rc == 0)
|
||||
@@ -26,16 +30,12 @@ def RunningPiHole(DockerPersist, Slow, persist_webserver, persist_tag, start_cmd
|
||||
DockerPersist.cmd = cmd
|
||||
return DockerPersist
|
||||
|
||||
@pytest.mark.parametrize('start_cmd', ['start_cmd'])
|
||||
def test_pihole_start_cmd(RunningPiHole, start_cmd, persist_tag):
|
||||
''' the start_cmd tests are all built into the RunningPiHole fixture in this file '''
|
||||
assert RunningPiHole.cmd.stdout == START_DNS_STDOUT[persist_tag]
|
||||
|
||||
@pytest.mark.parametrize('start_cmd,hostname,expected_ip, expected_messages', [
|
||||
('enable', 'pi.hole', '127.0.0.1', ['Enabling blocking','Pi-hole Enabled']),
|
||||
('enable', 'pi.hole', '127.0.0.1', ['Blocking already enabled,','nothing to do']),
|
||||
('disable', 'pi.hole', '127.0.0.1', ['Disabling blocking','Pi-hole Disabled']),
|
||||
])
|
||||
def test_pihole_start_cmd(RunningPiHole, Dig, persist_tag, start_cmd, hostname, expected_ip, expected_messages):
|
||||
def test_pihole_enable_disable_command(RunningPiHole, Dig, persist_tag, start_cmd, hostname, expected_ip, expected_messages):
|
||||
''' the start_cmd tests are all built into the RunningPiHole fixture in this file '''
|
||||
dig_cmd = "dig +time=1 +noall +answer {} @test_pihole".format(hostname)
|
||||
lookup = RunningPiHole.dig.run(dig_cmd)
|
||||
@@ -45,3 +45,10 @@ def test_pihole_start_cmd(RunningPiHole, Dig, persist_tag, start_cmd, hostname,
|
||||
|
||||
for part_of_output in expected_messages:
|
||||
assert part_of_output in RunningPiHole.cmd.stdout
|
||||
|
||||
@pytest.mark.parametrize('start_cmd,expected_message', [
|
||||
('-up', 'Function not supported in Docker images')
|
||||
])
|
||||
def test_pihole_update_command(RunningPiHole, start_cmd, expected_message):
|
||||
assert RunningPiHole.cmd.stdout.strip() == expected_message
|
||||
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
import pytest
|
||||
import testinfra
|
||||
|
||||
run_local = testinfra.get_backend(
|
||||
"local://"
|
||||
).get_module("Command").run
|
||||
|
||||
def test_scripts_pass_shellcheck():
|
||||
''' Make sure shellcheck does not find anything wrong with our shell scripts '''
|
||||
shellcheck = "find . -name '*.sh' -a ! ( -name 'list.sh' ) | while read file; do shellcheck -e SC1008 $file; done;"
|
||||
results = run_local(shellcheck)
|
||||
print results.stdout
|
||||
assert '' == results.stdout
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
import pytest
|
||||
import time
|
||||
''' conftest.py provides the defaults through fixtures '''
|
||||
@@ -5,9 +6,8 @@ import time
|
||||
docker containers (esp. musl based OSs) stripped down nature '''
|
||||
|
||||
# If the test runs /start.sh, do not let s6 run it too! Kill entrypoint to avoid race condition/duplicated execution
|
||||
@pytest.mark.parametrize('entrypoint,cmd', [('--entrypoint=tail','-f /dev/null')])
|
||||
@pytest.mark.parametrize('args', [ '' ])
|
||||
def test_ServerIP_missing_triggers_start_error(Docker):
|
||||
@pytest.mark.parametrize('persist_entrypoint,persist_cmd,persist_args_env', [('--entrypoint=tail','-f /dev/null','')])
|
||||
def test_ServerIP_missing_is_not_required_anymore(RunningPiHole):
|
||||
''' When args to docker are empty start.sh exits saying ServerIP is required '''
|
||||
start = Docker.run('/start.sh')
|
||||
error_msg = "ERROR: To function correctly you must pass an environment variables of 'ServerIP' into the docker container"
|
||||
@@ -45,42 +45,16 @@ def test_indecies_are_present(RunningPiHole):
|
||||
|
||||
def validate_curl(http_rc, expected_http_code, page_contents):
|
||||
if int(http_rc.rc) != 0 or int(http_rc.stdout) != expected_http_code:
|
||||
print 'CURL return code: {}'.format(http_rc.rc)
|
||||
print 'CURL stdout: {}'.format(http_rc.stdout)
|
||||
print 'CURL stderr:{}'.format(http_rc.stderr)
|
||||
print 'CURL file:\n{}\n'.format(page_contents.encode('ascii'))
|
||||
print('CURL return code: {}'.format(http_rc.rc))
|
||||
print('CURL stdout: {}'.format(http_rc.stdout))
|
||||
print('CURL stderr:{}'.format(http_rc.stderr))
|
||||
print('CURL file:\n{}\n'.format(page_contents.encode('utf-8')))
|
||||
|
||||
@pytest.mark.parametrize('addr', [ 'testblock.pi-hole.local' ])
|
||||
@pytest.mark.parametrize('url', [ '/', '/index.html', '/any.html' ] )
|
||||
def test_html_index_requests_load_as_expected(RunningPiHole, Slow, addr, url):
|
||||
command = 'curl -s -o /tmp/curled_file -w "%{{http_code}}" http://{}{}'.format(addr, url)
|
||||
http_rc = RunningPiHole.run(command)
|
||||
page_contents = RunningPiHole.run('cat /tmp/curled_file ').stdout
|
||||
expected_http_code = 200
|
||||
|
||||
validate_curl(http_rc, expected_http_code, page_contents)
|
||||
assert http_rc.rc == 0
|
||||
assert int(http_rc.stdout) == expected_http_code
|
||||
assert 'testblock.pi-hole.local' in page_contents
|
||||
|
||||
@pytest.mark.parametrize('addr', [ 'testblock.pi-hole.local' ])
|
||||
@pytest.mark.parametrize('url', [ '/index.js', '/any.js'] )
|
||||
def test_javascript_requests_load_as_expected(RunningPiHole, addr, url):
|
||||
command = 'curl -s -o /tmp/curled_file -w "%{{http_code}}" http://{}{}'.format(addr, url)
|
||||
http_rc = RunningPiHole.run(command)
|
||||
page_contents = RunningPiHole.run('cat /tmp/curled_file ').stdout
|
||||
expected_http_code = 200
|
||||
|
||||
validate_curl(http_rc, expected_http_code, page_contents)
|
||||
assert http_rc.rc == 0
|
||||
assert int(http_rc.stdout) == expected_http_code
|
||||
assert 'var x = "Pi-hole: A black hole for Internet advertisements."' in page_contents
|
||||
|
||||
# IPv6 checks aren't passing CORS, removed :(
|
||||
@pytest.mark.parametrize('addr', [ 'localhost' ] )
|
||||
@pytest.mark.parametrize('url', [ '/admin/', '/admin/index.php' ] )
|
||||
def test_admin_requests_load_as_expected(RunningPiHole, addr, url):
|
||||
command = 'curl -s -o /tmp/curled_file -w "%{{http_code}}" http://{}{}'.format(addr, url)
|
||||
def test_admin_requests_load_as_expected(RunningPiHole, version, addr, url):
|
||||
command = 'curl -L -s -o /tmp/curled_file -w "%{{http_code}}" http://{}{}'.format(addr, url)
|
||||
http_rc = RunningPiHole.run(command)
|
||||
page_contents = RunningPiHole.run('cat /tmp/curled_file ').stdout
|
||||
expected_http_code = 200
|
||||
@@ -88,6 +62,8 @@ def test_admin_requests_load_as_expected(RunningPiHole, addr, url):
|
||||
validate_curl(http_rc, expected_http_code, page_contents)
|
||||
assert http_rc.rc == 0
|
||||
assert int(http_rc.stdout) == expected_http_code
|
||||
for html_text in ['dns_queries_today', 'Content-Security-Policy', 'scripts/pi-hole/js/footer.js']:
|
||||
for html_text in ['dns_queries_today', 'Content-Security-Policy',
|
||||
'scripts/pi-hole/js/footer.js']:
|
||||
# version removed, not showing up in footer of test env (fix me)
|
||||
assert html_text in page_contents
|
||||
|
||||
|
||||
11
tox.ini
11
tox.ini
@@ -1,5 +1,5 @@
|
||||
[tox]
|
||||
envlist = py27
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
@@ -7,7 +7,8 @@ deps = -rrequirements.txt
|
||||
# 2 parallel max b/c race condition with docker fixture (I think?)
|
||||
commands = docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
./Dockerfile.py -v --arch amd64
|
||||
pytest -vv -n 2 -k amd64 ./test/
|
||||
./Dockerfile.py -v --arch armhf --arch aarch64
|
||||
pytest -vv -n 2 -k armhf ./test/
|
||||
pytest -vv -n 2 -k aarch64 ./test/
|
||||
pytest -vv -n auto -k amd64 ./test/
|
||||
./Dockerfile.py -v --arch armhf --arch arm64 --arch armel
|
||||
pytest -vv -n auto -k arm64 ./test/
|
||||
pytest -vv -n auto -k armhf ./test/
|
||||
pytest -vv -n auto -k armel ./test/
|
||||
|
||||
Reference in New Issue
Block a user