Compare commits
575 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b35e7fdd4 | ||
|
|
90f4ebe882 | ||
|
|
6b5f746a15 | ||
|
|
2a705ecfe9 | ||
|
|
5b41b63fa0 | ||
|
|
0a7427371e | ||
|
|
5efcd88b20 | ||
|
|
fd0b9bf9d3 | ||
|
|
a42f67f5ee | ||
|
|
89b03445a1 | ||
|
|
13c81a9091 | ||
|
|
3d6f53899f | ||
|
|
69d4349bbb | ||
|
|
1d51da7f47 | ||
|
|
351b9a7f85 | ||
|
|
19bb9dcc2f | ||
|
|
2db09515d7 | ||
|
|
06d6b74c63 | ||
|
|
140312b79c | ||
|
|
e6dd028c61 | ||
|
|
1f3951431b | ||
|
|
a29faad9df | ||
|
|
2eb9afc350 | ||
|
|
25539a9c99 | ||
|
|
778e993903 | ||
|
|
510ca0b873 | ||
|
|
eed203530e | ||
|
|
fe1676b9ed | ||
|
|
c2870031e3 | ||
|
|
0ecde45d22 | ||
|
|
2cec4fa719 | ||
|
|
b29ac1ec18 | ||
|
|
0a20ef8955 | ||
|
|
13a3da65c8 | ||
|
|
b901a899ae | ||
|
|
9436389095 | ||
|
|
6262433f4d | ||
|
|
e0f05ef372 | ||
|
|
2fb2202a17 | ||
|
|
c6e859723c | ||
|
|
69e9272a91 | ||
|
|
6e9790870c | ||
|
|
0baf7b63d9 | ||
|
|
5e79db52ab | ||
|
|
0018e46a28 | ||
|
|
25a8e0dcba | ||
|
|
74707b576c | ||
|
|
9deb685cc0 | ||
|
|
28892f4533 | ||
|
|
4cc3587084 | ||
|
|
2567ab9940 | ||
|
|
6b29503dd0 | ||
|
|
806f734cdc | ||
|
|
2c6bc33e8d | ||
|
|
2a7ea09d00 | ||
|
|
a5eda6844e | ||
|
|
44bdfc1438 | ||
|
|
0eb1a60d78 | ||
|
|
8ea5f9c8dc | ||
|
|
2dde4a64c9 | ||
|
|
3f97e6e4fc | ||
|
|
b0a5a0f163 | ||
|
|
523762e47d | ||
|
|
7848d06325 | ||
|
|
54ca7e9ce4 | ||
|
|
9a9ee41a45 | ||
|
|
f70a4f4022 | ||
|
|
7049b5df03 | ||
|
|
4aaf0998a3 | ||
|
|
27bdf81665 | ||
|
|
ff776c2ba4 | ||
|
|
e874dc1f3e | ||
|
|
c0d05f291d | ||
|
|
a39726d183 | ||
|
|
5ed8b8ec7b | ||
|
|
aefeb2bc8d | ||
|
|
4782f15123 | ||
|
|
73a335533b | ||
|
|
d036e0d0c0 | ||
|
|
3696f6065f | ||
|
|
9740d2b86a | ||
|
|
1b6318c0d9 | ||
|
|
c371768800 | ||
|
|
bc545b6049 | ||
|
|
3942d9a740 | ||
|
|
944bc8eb79 | ||
|
|
55a682a94c | ||
|
|
7624fc3d56 | ||
|
|
9b106e6275 | ||
|
|
17453166c2 | ||
|
|
521bed1c4f | ||
|
|
1cc3159518 | ||
|
|
ca69e29514 | ||
|
|
b553462c95 | ||
|
|
155d87f8c1 | ||
|
|
6cec6f937f | ||
|
|
1a7bfdc2ee | ||
|
|
d84851c7a3 | ||
|
|
f0f96ee1ab | ||
|
|
9a800f7efd | ||
|
|
52b21c6ef6 | ||
|
|
0cceed3c83 | ||
|
|
300fcf0c9e | ||
|
|
ec3b4609bc | ||
|
|
22ce49a123 | ||
|
|
d821dcb2f0 | ||
|
|
ef54216986 | ||
|
|
93092da023 | ||
|
|
3153346ec8 | ||
|
|
ee6e1b36a9 | ||
|
|
73ad23b3b6 | ||
|
|
8700afa74d | ||
|
|
1f19941b4c | ||
|
|
d69a8febae | ||
|
|
a26eb9c6ad | ||
|
|
2fcc4f66ee | ||
|
|
066f891cc1 | ||
|
|
f50ce4637f | ||
|
|
56f4933237 | ||
|
|
ea95c4f3e5 | ||
|
|
88518b036d | ||
|
|
eb36372dea | ||
|
|
3865e77c6e | ||
|
|
388f0f01a1 | ||
|
|
6c3ccfcd4f | ||
|
|
bdaf352a23 | ||
|
|
55b257126b | ||
|
|
1f1dc03a69 | ||
|
|
2b2e05dce3 | ||
|
|
451fc19432 | ||
|
|
8d536703d9 | ||
|
|
58e1eac241 | ||
|
|
91d75416b8 | ||
|
|
e65890208b | ||
|
|
559b317148 | ||
|
|
92999dff08 | ||
|
|
1c6ba2d3c7 | ||
|
|
a45725ed37 | ||
|
|
e98dff970f | ||
|
|
e674bb4b81 | ||
|
|
6820b2e55c | ||
|
|
cec28c79f4 | ||
|
|
a44b7f1205 | ||
|
|
bf82c2e8f5 | ||
|
|
73f1cb3e74 | ||
|
|
8565c3828d | ||
|
|
d0e6b77fdb | ||
|
|
b5046af667 | ||
|
|
408f71a480 | ||
|
|
7650896158 | ||
|
|
4dba6339c7 | ||
|
|
ba9d2e54bf | ||
|
|
d13ae118b4 | ||
|
|
7d82719839 | ||
|
|
70c573b69e | ||
|
|
bd121aa471 | ||
|
|
28b5154e52 | ||
|
|
918e6b1a21 | ||
|
|
9292d8e58a | ||
|
|
76be304d79 | ||
|
|
0c5ffd72f4 | ||
|
|
3022fa5092 | ||
|
|
2bc1e9c5cb | ||
|
|
f95bcba67a | ||
|
|
437e8ddbf5 | ||
|
|
901ee3d10b | ||
|
|
5ed2b9ab2c | ||
|
|
6b1765c638 | ||
|
|
67b04365c5 | ||
|
|
39fa779653 | ||
|
|
2484c9ced2 | ||
|
|
c4caf293f7 | ||
|
|
d0c152eb25 | ||
|
|
995ec08608 | ||
|
|
504f3bac3b | ||
|
|
99de0dce83 | ||
|
|
58d2c6a934 | ||
|
|
0954c9f3dd | ||
|
|
c436a8a053 | ||
|
|
3acf505a1a | ||
|
|
60063a81fd | ||
|
|
4963578fb5 | ||
|
|
e64b1d0c0a | ||
|
|
73378d1231 | ||
|
|
47c15eb65d | ||
|
|
fcd002d507 | ||
|
|
bd343fc3da | ||
|
|
1f46ed4cbe | ||
|
|
3db66a66fa | ||
|
|
622294a7a1 | ||
|
|
70ae3baf75 | ||
|
|
2b270c75b7 | ||
|
|
1ff3d7a5fc | ||
|
|
35b5c80408 | ||
|
|
59e177525a | ||
|
|
27135f263f | ||
|
|
ac444c44f2 | ||
|
|
96408f91a7 | ||
|
|
151d588c4c | ||
|
|
071b169896 | ||
|
|
f9c51379b0 | ||
|
|
42f1fa3380 | ||
|
|
5ca1dbf35f | ||
|
|
ebe72b45e4 | ||
|
|
c36d6ae501 | ||
|
|
2e04f5abee | ||
|
|
17f6ef8e56 | ||
|
|
c9a3ac0df1 | ||
|
|
784be64178 | ||
|
|
82ef6544a4 | ||
|
|
59c08f36ee | ||
|
|
dba2317072 | ||
|
|
4b7c35be7e | ||
|
|
f6b9716a75 | ||
|
|
14e3bcb480 | ||
|
|
e88429a65d | ||
|
|
4f531d9e78 | ||
|
|
87c99a0b8c | ||
|
|
6f865b1699 | ||
|
|
ba6f7f9d62 | ||
|
|
1207b5cb88 | ||
|
|
1633a976a4 | ||
|
|
fa771f74fd | ||
|
|
6f21b020f8 | ||
|
|
6292fd1294 | ||
|
|
68ad7b045d | ||
|
|
fe7f0c8612 | ||
|
|
692a12c2c6 | ||
|
|
731cd5e907 | ||
|
|
e6a45eb96b | ||
|
|
f53015157d | ||
|
|
190844d0b0 | ||
|
|
6532390647 | ||
|
|
26aea62e22 | ||
|
|
ededb44601 | ||
|
|
c0e55c156f | ||
|
|
6d9913e824 | ||
|
|
20cd9c3abf | ||
|
|
e6307df638 | ||
|
|
14f6262491 | ||
|
|
bcd9ad5157 | ||
|
|
6e1e93112d | ||
|
|
5acc88ef86 | ||
|
|
8d57c8ce7b | ||
|
|
ea7f659874 | ||
|
|
abdf9f1130 | ||
|
|
75328c63f8 | ||
|
|
3f0a7d85b1 | ||
|
|
c5484e6a85 | ||
|
|
dad930f9d2 | ||
|
|
b39f53d12e | ||
|
|
a51f69bef9 | ||
|
|
0669b0fe91 | ||
|
|
3d42c16217 | ||
|
|
6b6ba9b458 | ||
|
|
c23b1d6894 | ||
|
|
29cf35b2c5 | ||
|
|
8cf36e6f7e | ||
|
|
7adccc0287 | ||
|
|
7c135f5ce8 | ||
|
|
79ead80720 | ||
|
|
302924a3e2 | ||
|
|
0a8d174ef7 | ||
|
|
5735495ec7 | ||
|
|
512f610afa | ||
|
|
e40ed3b5cc | ||
|
|
c5f682d1e7 | ||
|
|
4902b4cc6a | ||
|
|
0f618ffc31 | ||
|
|
91fd3933aa | ||
|
|
c75ca906e7 | ||
|
|
494667c85d | ||
|
|
9bcd8e163a | ||
|
|
b154947cb9 | ||
|
|
1d35a336b4 | ||
|
|
0d9fc510cf | ||
|
|
1aefa3b063 | ||
|
|
39090babf5 | ||
|
|
8f53e1bd22 | ||
|
|
64a1f548ef | ||
|
|
5c21f146f4 | ||
|
|
99b6dc9503 | ||
|
|
a445507627 | ||
|
|
f445660079 | ||
|
|
c0bf6c91f6 | ||
|
|
a6eaf7313c | ||
|
|
5dbf46aa05 | ||
|
|
6ef8ef1f15 | ||
|
|
7485a84d2d | ||
|
|
e6bff1ac32 | ||
|
|
f1cc22e5e0 | ||
|
|
325c49f7b3 | ||
|
|
0cb7324a60 | ||
|
|
f77e4609ce | ||
|
|
b0a14b7eb9 | ||
|
|
c525df56fa | ||
|
|
02a28b25ae | ||
|
|
9bdfe868bd | ||
|
|
5d1d394b1b | ||
|
|
8c62a1437a | ||
|
|
89972b3ce5 | ||
|
|
90dada0a8a | ||
|
|
121f3f92c9 | ||
|
|
ee42fdae04 | ||
|
|
b669826149 | ||
|
|
105eb1165f | ||
|
|
941e433834 | ||
|
|
e7bfad6301 | ||
|
|
1c421b0f7c | ||
|
|
72a1c8290d | ||
|
|
f1d4efe766 | ||
|
|
7792b91ccc | ||
|
|
9a94ade8dd | ||
|
|
c90f3c5aa5 | ||
|
|
352bb55f4c | ||
|
|
2bd15dea8f | ||
|
|
e67952eaec | ||
|
|
fa065c2434 | ||
|
|
e64f2d682b | ||
|
|
30bf5fe0d1 | ||
|
|
8ba1eea803 | ||
|
|
9324107088 | ||
|
|
2c7be7c3cb | ||
|
|
1f64381ebb | ||
|
|
a779c7fc37 | ||
|
|
87132465b5 | ||
|
|
97d00cd936 | ||
|
|
5a35eca5e2 | ||
|
|
6e5bf71595 | ||
|
|
a9f423ff37 | ||
|
|
a30bd0528f | ||
|
|
8770b9789b | ||
|
|
7248b3c915 | ||
|
|
e15017d69a | ||
|
|
52e819a975 | ||
|
|
7199d938e1 | ||
|
|
93b86c8981 | ||
|
|
daa3c1e45c | ||
|
|
41f109cf48 | ||
|
|
fda66e2f67 | ||
|
|
54e5846c16 | ||
|
|
bba93dea3e | ||
|
|
d96a7fe887 | ||
|
|
28bec77984 | ||
|
|
9c791f099f | ||
|
|
6fc9d4d186 | ||
|
|
7ad7b0824f | ||
|
|
c8781ee427 | ||
|
|
51daf5cba2 | ||
|
|
8e5b17e7d3 | ||
|
|
562b6c1d5a | ||
|
|
0b2c4642df | ||
|
|
1b4677b5ed | ||
|
|
0958583bf2 | ||
|
|
b50e94e22d | ||
|
|
bb8a8aeedd | ||
|
|
d49eebc25d | ||
|
|
4808b9fe8d | ||
|
|
88fd258043 | ||
|
|
752d83aeaa | ||
|
|
3b8227c29d | ||
|
|
25d32da895 | ||
|
|
022b039718 | ||
|
|
d832213e3c | ||
|
|
765eccea22 | ||
|
|
3b34fecc41 | ||
|
|
9213bbd322 | ||
|
|
5b75953d91 | ||
|
|
ce3d6e866e | ||
|
|
b093aaabd9 | ||
|
|
e7bb456701 | ||
|
|
ecb0aaf805 | ||
|
|
55ee5d0e60 | ||
|
|
72f5730d56 | ||
|
|
0da5afcd61 | ||
|
|
e621670df4 | ||
|
|
04ab6d447f | ||
|
|
92c19555bd | ||
|
|
caad778b0c | ||
|
|
808359e832 | ||
|
|
8e409bdf6f | ||
|
|
bd831bcc30 | ||
|
|
d7fdb188ed | ||
|
|
a5b827917e | ||
|
|
6939ea024f | ||
|
|
c619303a30 | ||
|
|
cecf602df7 | ||
|
|
8cb67fde23 | ||
|
|
740248f18b | ||
|
|
bb292a301c | ||
|
|
a1300b4c01 | ||
|
|
3140814a30 | ||
|
|
441bc67159 | ||
|
|
59753e30a3 | ||
|
|
edde439023 | ||
|
|
34ef3d4d86 | ||
|
|
0a4c0af9a2 | ||
|
|
eb637e07cc | ||
|
|
e78260cf8b | ||
|
|
6265a3c88f | ||
|
|
2c47cfa8c5 | ||
|
|
154272be8a | ||
|
|
d444a11ad1 | ||
|
|
ce10b7b655 | ||
|
|
032b53799d | ||
|
|
2228115a97 | ||
|
|
9778b5d9a4 | ||
|
|
415ccbb132 | ||
|
|
b9b8b1f8bd | ||
|
|
3c510e2edb | ||
|
|
36c1e66dc1 | ||
|
|
1b5abd4ad6 | ||
|
|
0f73f390a9 | ||
|
|
e0e6b491c9 | ||
|
|
e6a9cd5ce2 | ||
|
|
931aad4cb1 | ||
|
|
64892423b6 | ||
|
|
b39a6af9b5 | ||
|
|
db919a22cc | ||
|
|
d20bde221b | ||
|
|
91ab080476 | ||
|
|
3fb82b0fde | ||
|
|
730ee6952a | ||
|
|
76d2578b77 | ||
|
|
ca9bf8ce5c | ||
|
|
de7e563ad2 | ||
|
|
d102ba94e5 | ||
|
|
cf6d74a2be | ||
|
|
80cea505c4 | ||
|
|
c0c72219a9 | ||
|
|
6bf15d6577 | ||
|
|
2fdcf19223 | ||
|
|
c024dcd8d9 | ||
|
|
18eb5ffbc8 | ||
|
|
a378a0f696 | ||
|
|
0570d264ed | ||
|
|
b74f50ba37 | ||
|
|
31132aa4ec | ||
|
|
5367761311 | ||
|
|
6091299fbf | ||
|
|
f523449d73 | ||
|
|
eb683cf043 | ||
|
|
f0a9a9fb04 | ||
|
|
bf0a9190c8 | ||
|
|
1add81a1ce | ||
|
|
8264a46ed5 | ||
|
|
fe545c6bb2 | ||
|
|
1dafcb6946 | ||
|
|
4c4e9c3bc9 | ||
|
|
8bfe96968d | ||
|
|
aab53102db | ||
|
|
1265766b3a | ||
|
|
3765d41b5c | ||
|
|
939d2992f1 | ||
|
|
a92ea9c386 | ||
|
|
d880f69836 | ||
|
|
ea989a78c4 | ||
|
|
a615d551a5 | ||
|
|
6995122edd | ||
|
|
ad6c06d075 | ||
|
|
fe3c9d22d5 | ||
|
|
7d54465eb0 | ||
|
|
1505156bfa | ||
|
|
6ddcd3f716 | ||
|
|
2f2fb40c56 | ||
|
|
c3933ab7ae | ||
|
|
5c1503aff5 | ||
|
|
f34f2ce857 | ||
|
|
8d5724e70e | ||
|
|
23c4420630 | ||
|
|
09eae003cf | ||
|
|
4ba50052a5 | ||
|
|
9c5393dfa0 | ||
|
|
9c76d246f6 | ||
|
|
096c6b5848 | ||
|
|
8b51f53bfe | ||
|
|
8ab895b38a | ||
|
|
3a0b1b197c | ||
|
|
3d8fed70e7 | ||
|
|
c2bca952a4 | ||
|
|
0c5468ba29 | ||
|
|
18b1f446ab | ||
|
|
f23ef9b293 | ||
|
|
e92dfcfa38 | ||
|
|
c88f12a351 | ||
|
|
6e38238dc1 | ||
|
|
e74f8c12e4 | ||
|
|
4dffbf031c | ||
|
|
b5356568d5 | ||
|
|
49ed8a0b2c | ||
|
|
05977b507d | ||
|
|
7a41d55e61 | ||
|
|
e7174adaf1 | ||
|
|
f38ac6ff0b | ||
|
|
17f87be607 | ||
|
|
658df9e905 | ||
|
|
aa31e6baf6 | ||
|
|
a6e70b653d | ||
|
|
e061deee3e | ||
|
|
79ecb74140 | ||
|
|
ce2b04e34e | ||
|
|
1111f81fbf | ||
|
|
f057c22649 | ||
|
|
f4742240c2 | ||
|
|
6dcb04d9d6 | ||
|
|
dbd8611288 | ||
|
|
b135d68b6f | ||
|
|
6987b36981 | ||
|
|
cec9ffb36b | ||
|
|
47298e7f37 | ||
|
|
c7519e31a2 | ||
|
|
226d7239e5 | ||
|
|
fa044d0129 | ||
|
|
7418b6adee | ||
|
|
4eb10fd63a | ||
|
|
cee3e47550 | ||
|
|
7459a75cac | ||
|
|
a2b28d03b8 | ||
|
|
f226e090fe | ||
|
|
0cc62575a7 | ||
|
|
e5a1a26a4a | ||
|
|
89717949c4 | ||
|
|
fd48454649 | ||
|
|
8fe0d7231e | ||
|
|
8607e26f5e | ||
|
|
9cf80692d2 | ||
|
|
798a27075c | ||
|
|
4b6c41f950 | ||
|
|
eebb82bb3f | ||
|
|
09131ad78f | ||
|
|
7c402d329a | ||
|
|
a80868209f | ||
|
|
113da00d0a | ||
|
|
187ba13f73 | ||
|
|
874c4b757b | ||
|
|
3d76736421 | ||
|
|
20b130a666 | ||
|
|
5eab0a9730 | ||
|
|
23f7feff20 | ||
|
|
c7adfe432a | ||
|
|
4f85381d8c | ||
|
|
d8cf7df70b | ||
|
|
c08698c11a | ||
|
|
8ce0aafcd3 | ||
|
|
41289bdd58 | ||
|
|
de74f56e87 | ||
|
|
bbb770b6c0 | ||
|
|
fea14ae5a4 | ||
|
|
7da8fa586b | ||
|
|
a4c4de3cf4 | ||
|
|
fded8bdcc4 | ||
|
|
49d17bded0 | ||
|
|
fea57446a5 | ||
|
|
41fc498166 | ||
|
|
3e76bbd51f | ||
|
|
7cb09b78db | ||
|
|
062f94d0f7 | ||
|
|
cb463d01d2 | ||
|
|
1322db7ec3 | ||
|
|
07bc55786b | ||
|
|
397ec03285 | ||
|
|
138e061c17 | ||
|
|
b72da80f13 | ||
|
|
3de08b138e | ||
|
|
040d9fcee6 | ||
|
|
f7f6949c53 | ||
|
|
f950671a1c | ||
|
|
c1fb17a645 | ||
|
|
e3f42c86f3 | ||
|
|
cf36276d21 | ||
|
|
7d36ab2ff9 | ||
|
|
5f8b01bce3 | ||
|
|
346213415d | ||
|
|
2a973474fd | ||
|
|
02e3ead7eb |
@@ -1 +1,5 @@
|
||||
**/*.sw*
|
||||
.tox
|
||||
.git
|
||||
**/__pycache__
|
||||
.pipenv
|
||||
|
||||
52
.github/ISSUE_TEMPLATE.md
vendored
52
.github/ISSUE_TEMPLATE.md
vendored
@@ -1,52 +0,0 @@
|
||||
<!-- Provide a general summary of the issue in the Title above -->
|
||||
<!-- Note: these are comments that don't show up in the actual issue, no need to delete them as you fill out the template -->
|
||||
|
||||
This is a... <!-- To choose ONE, put an [x] in the box that applies -->
|
||||
|
||||
- [ ] Request for a new or modified feature
|
||||
- [ ] Issue trying to run the docker image
|
||||
- [ ] Issue trying to build / test / develop the docker image
|
||||
|
||||
## Description
|
||||
<!-- Provide a more detailed introduction to the issue or feature -->
|
||||
|
||||
## Expected Behavior
|
||||
<!-- Tell us what should happen -->
|
||||
|
||||
## Actual Behavior
|
||||
<!-- Tell us what happens instead -->
|
||||
|
||||
## Possible Fix
|
||||
<!-- Not obligatory, but suggest a fix or reason for the bug -->
|
||||
|
||||
## Steps to Reproduce and debugging done
|
||||
<!-- Reproduce this bug. Include code to reproduce, if relevant -->
|
||||
e.g. your docker run command, pages to visit, CLI commands you ran
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
4.
|
||||
|
||||
## Debug steps I have tried
|
||||
<!-- Please attempt these debug steps to see if it helps you resolve or understand your own issue -->
|
||||
|
||||
- [ ] I have tried destroying my container instance, pulling the newest image version, and re-creating a new container
|
||||
- [ ] I have tried running the nearly stock `docker run` example in the readme (removing any customizations I added)
|
||||
- [ ] I have tried running without my volume data mounts to eliminate volumes as the cause
|
||||
- [ ] I have searched this repository for existing issues and pull requests that look similar <!-- Add links below! -->
|
||||
|
||||
<!-- Note: If volumes are your issue, I strongly recommend just starting with fresh volume data -->
|
||||
|
||||
<!-- Add any other debugging steps you've taken that maybe relevant information -->
|
||||
|
||||
## Context and extra information
|
||||
<!-- How has this bug affected you? What were you trying to accomplish? -->
|
||||
<!-- Got any other relevant links to similar issues? -->
|
||||
|
||||
## Your Environment
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
* Docker Host Operating System and OS Version:
|
||||
* Docker Version:
|
||||
* Hardware architecture: <!-- ARMv7, x86 -->
|
||||
|
||||
|
||||
57
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
57
.github/ISSUE_TEMPLATE/bug_report.md
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
---
|
||||
name: Bug report
|
||||
about: Create a report to help us improve
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
<!-- Provide a general summary of the issue in the Title above -->
|
||||
<!-- Note: these are comments that don't show up in the actual issue, no need to delete them as you fill out the template -->
|
||||
|
||||
<!-- IMPORTANT Complete the entire template please, the info gathered here is usually needed to debug issues anyway so it saves time in the long run. Incomplete/stock template issues may be closed -->
|
||||
|
||||
<!-- pick ONE: Bug,
|
||||
Feature Request,
|
||||
Run Issue (running Pi-hole container failing),
|
||||
Build Issue (Building image failing)
|
||||
Enter in line below: -->
|
||||
This is a: **FILL ME IN**
|
||||
|
||||
|
||||
## Details
|
||||
<!-- Provide a more detailed introduction to the issue or feature, try not to duplicate info from lower sections by reviewing the entire template first -->
|
||||
|
||||
## Related Issues
|
||||
- [ ] I have searched this repository/Pi-hole forums for existing issues and pull requests that look similar
|
||||
<!-- Add links below! -->
|
||||
|
||||
<!------- FEATURE REQUESTS CAN STOP FILLING IN TEMPLATE HERE -------->
|
||||
<!------- ISSUES SHOULD FILL OUT REMAINDER OF TEMPLATE -------->
|
||||
|
||||
## How to reproduce the issue
|
||||
|
||||
1. Environment data
|
||||
* Operating System: **ENTER HERE** <!-- Debian, Ubuntu, Rasbian, etc -->
|
||||
* Hardware: <!-- PC, RasPi B/2B/3B/4B, Mac, Synology, QNAP, etc -->
|
||||
* Kernel Architecture: <!-- x86/amd64, ArmV7, ArmV8 32bit, ArmV8 64bit, etc -->
|
||||
* Docker Install Info and version:
|
||||
- Software source: <!-- official docker-ce, OS provided package, Hypriot -->
|
||||
- Supplimentary Software: <!-- synology, portainer, etc -->
|
||||
* Hardware architecture: <!-- ARMv7, x86 -->
|
||||
|
||||
2. docker-compose.yml contents, docker run shell command, or paste a screenshot of any UI based configuration of containers here
|
||||
3. any additional info to help reproduce
|
||||
|
||||
|
||||
## These common fixes didn't work for my issue
|
||||
<!-- IMPORTANT! Help me help you! Ordered with most common fixes first. -->
|
||||
- [ ] I have tried removing/destroying my container, and re-creating a new container
|
||||
- [ ] I have tried fresh volume data by backing up and moving/removing the old volume data
|
||||
- [ ] I have tried running the stock `docker run` example(s) in the readme (removing any customizations I added)
|
||||
- [ ] I have tried a newer or older version of Docker Pi-hole (depending what version the issue started in for me)
|
||||
- [ ] I have tried running without my volume data mounts to eliminate volumes as the cause
|
||||
|
||||
If the above debugging / fixes revealed any new information note it here.
|
||||
Add any other debugging steps you've taken or theories on root cause that may help.
|
||||
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
12
.github/ISSUE_TEMPLATE/config.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
blank_issues_enabled: false
|
||||
contact_links:
|
||||
- name: Questions and Configurations
|
||||
url: https://discourse.pi-hole.net
|
||||
about: Ask a question or get help with configurations.
|
||||
- name: Feature Requests
|
||||
url: https://discourse.pi-hole.net/c/feature-requests/8
|
||||
about: See existing Feature Requests and suggest new ones.
|
||||
- name: Documentation
|
||||
url: https://docs.pi-hole.net
|
||||
about: Documentation and guides.
|
||||
|
||||
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
2
.github/PULL_REQUEST_TEMPLATE.md
vendored
@@ -1,3 +1,5 @@
|
||||
`{Please select 'base: dev' as target branch above! (you can delete this line)}`
|
||||
|
||||
<!--- Provide a general summary of your changes in the Title above -->
|
||||
|
||||
## Description
|
||||
|
||||
2
.github/dco.yml
vendored
Normal file
2
.github/dco.yml
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
require:
|
||||
members: false
|
||||
7
.github/dependabot.yml
vendored
Normal file
7
.github/dependabot.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
version: 2
|
||||
updates:
|
||||
# Maintain dependencies for GitHub Actions
|
||||
- package-ecosystem: "github-actions"
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "weekly"
|
||||
7
.github/release.yml
vendored
Normal file
7
.github/release.yml
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
changelog:
|
||||
exclude:
|
||||
labels:
|
||||
- internal
|
||||
authors:
|
||||
- dependabot
|
||||
- github-actions
|
||||
25
.github/workflows/stale.yml
vendored
Normal file
25
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
name: Mark stale issues
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 * * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
|
||||
steps:
|
||||
- uses: actions/stale@v4
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
days-before-stale: 30
|
||||
days-before-close: 5
|
||||
stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Please comment or update this issue or it will be closed in 5 days.'
|
||||
stale-issue-label: 'stale'
|
||||
exempt-issue-labels: 'pinned, Fixed in next release, bug, never-stale, documentation, investigating'
|
||||
exempt-all-issue-assignees: true
|
||||
operations-per-run: 300
|
||||
28
.github/workflows/sync-back-to-dev.yml
vendored
Normal file
28
.github/workflows/sync-back-to-dev.yml
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
name: Sync Back to Development
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
sync-branches:
|
||||
runs-on: ubuntu-latest
|
||||
name: Syncing branches
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Opening pull request
|
||||
id: pull
|
||||
uses: tretuna/sync-branches@1.4.0
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
FROM_BRANCH: 'master'
|
||||
TO_BRANCH: 'dev'
|
||||
CONTENT_COMPARISON: true
|
||||
- name: Label the pull request to ignore for release note generation
|
||||
uses: actions-ecosystem/action-add-labels@v1
|
||||
with:
|
||||
labels: internal
|
||||
repo: ${{ github.repository }}
|
||||
number: ${{ steps.pull.outputs.PULL_REQUEST_NUMBER }}
|
||||
85
.github/workflows/test-and-build.yaml
vendored
Normal file
85
.github/workflows/test-and-build.yaml
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
name: Test & Build
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 2 * * *'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
pull_request:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
ARCH: amd64
|
||||
DEBIAN_VERSION: buster
|
||||
steps:
|
||||
- name: Checkout Repo
|
||||
uses: actions/checkout@v2
|
||||
- name: Run Tests
|
||||
run: |
|
||||
echo "Building ${ARCH}-${DEBIAN_VERSION}"
|
||||
./gh-actions-test.sh
|
||||
|
||||
build-and-publish:
|
||||
if: github.event_name != 'pull_request'
|
||||
needs: test
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
if: github.event_name != 'schedule'
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Checkout dev branch if we are building nightly
|
||||
if: github.event_name == 'schedule'
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
ref: dev
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
images: |
|
||||
${{ secrets.DOCKERHUB_NAMESPACE }}/pihole
|
||||
ghcr.io/${{ github.repository_owner }}/pihole
|
||||
flavor: |
|
||||
latest=${{ startsWith(github.ref, 'refs/tags/') }}
|
||||
tags: |
|
||||
type=schedule
|
||||
type=ref,event=branch,enable=${{ github.event_name != 'schedule' }}
|
||||
type=ref,event=tag
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USER }}
|
||||
password: ${{ secrets.DOCKERHUB_PASS }}
|
||||
-
|
||||
name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.repository_owner }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
platforms: linux/amd64, linux/arm64, linux/386, linux/arm/v7, linux/arm/v6
|
||||
build-args: |
|
||||
PIHOLE_DOCKER_TAG=${{ steps.meta.outputs.version }}
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -3,9 +3,16 @@
|
||||
.cache
|
||||
__pycache__
|
||||
.tox
|
||||
.pipenv
|
||||
.eggs
|
||||
UNKNOWN.egg-info
|
||||
.env
|
||||
ci-workspace
|
||||
.gh-workspace
|
||||
docker-compose.yml
|
||||
etc-dnsmasq.d/
|
||||
etc-pihole/
|
||||
var-log/
|
||||
|
||||
# WIP/test stuff
|
||||
doco.yml
|
||||
|
||||
24
.travis.yml
24
.travis.yml
@@ -1,24 +0,0 @@
|
||||
sudo: required
|
||||
services:
|
||||
- docker
|
||||
language: python
|
||||
env:
|
||||
global:
|
||||
- QEMU_VER=v2.9.1
|
||||
matrix:
|
||||
- ARCH=amd64
|
||||
- ARCH=armhf
|
||||
- ARCH=aarch64
|
||||
python:
|
||||
- "2.7"
|
||||
install:
|
||||
- pip install -r requirements.txt
|
||||
script:
|
||||
# prepare qemu
|
||||
- docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
# generate and build dockerfile
|
||||
- ./Dockerfile.py --arch=${ARCH} -v
|
||||
- docker images
|
||||
# run docker build & tests
|
||||
# 2 parallel max b/c race condition with docker fixture (I think?)
|
||||
- py.test -vv -n 2 -k "${ARCH}" ./test/
|
||||
@@ -2,4 +2,4 @@
|
||||
|
||||
Notes about releases will be documented on [docker-pi-hole's github releases page](https://github.com/pi-hole/docker-pi-hole/releases). Breaking changes will be copied to the top of the docker repo's README file to assist with common upgrade issues.
|
||||
|
||||
See the [Pi-hole releases](https://github.com/pi-hole/pi-hole/releases) for details on updates unreleated to docker image releases
|
||||
See the [Pi-hole releases](https://github.com/pi-hole/pi-hole/releases) for details on updates unrelated to docker image releases
|
||||
|
||||
@@ -6,5 +6,5 @@ Please review the following before opening a pull request (PR) to help your PR g
|
||||
* To ensure proper testing and quality control, target any code change pull requests against `dev` branch.
|
||||
|
||||
* Make sure the tests pass
|
||||
* Take a look at [TESTING.md](TESTING.md) to see how to run tests locally so you do not have to push all your code to a PR and have travis-ci run it.
|
||||
* Take a look at [TESTING.md](TESTING.md) to see how to run tests locally so you do not have to push all your code to a PR and have GitHub Actions run it.
|
||||
* Your tests will probably run faster locally and you get a faster feedback loop.
|
||||
|
||||
47
Dockerfile
Normal file
47
Dockerfile
Normal file
@@ -0,0 +1,47 @@
|
||||
ARG PIHOLE_BASE
|
||||
FROM "${PIHOLE_BASE:-ghcr.io/pi-hole/docker-pi-hole-base:buster-slim}"
|
||||
|
||||
ARG PIHOLE_DOCKER_TAG
|
||||
ENV PIHOLE_DOCKER_TAG "${PIHOLE_DOCKER_TAG}"
|
||||
|
||||
ENV S6_OVERLAY_VERSION v2.1.0.2
|
||||
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
ENV PIHOLE_INSTALL /etc/.pihole/automated\ install/basic-install.sh
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
COPY s6/debian-root /
|
||||
COPY s6/service /usr/local/bin/service
|
||||
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
# php config start passes special ENVs into
|
||||
ARG PHP_ENV_CONFIG
|
||||
ENV PHP_ENV_CONFIG /etc/lighttpd/conf-enabled/15-fastcgi-php.conf
|
||||
ARG PHP_ERROR_LOG
|
||||
ENV PHP_ERROR_LOG /var/log/lighttpd/error.log
|
||||
COPY ./start.sh /
|
||||
COPY ./bash_functions.sh /
|
||||
|
||||
# IPv6 disable flag for networks/devices that do not support it
|
||||
ENV IPv6 True
|
||||
|
||||
EXPOSE 53 53/udp
|
||||
EXPOSE 67/udp
|
||||
EXPOSE 80
|
||||
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER pihole
|
||||
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
HEALTHCHECK CMD dig +short +norecurse +retry=0 @127.0.0.1 pi.hole || exit 1
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
181
Dockerfile.py
181
Dockerfile.py
@@ -1,139 +1,82 @@
|
||||
#!/usr/bin/env python
|
||||
#!/usr/bin/env python3
|
||||
""" Dockerfile.py - generates and build dockerfiles
|
||||
|
||||
Usage:
|
||||
Dockerfile.py [--arch=<arch> ...] [--skip=<arch> ...] [-v] [-t] [--no-build | --no-generate] [--no-cache]
|
||||
Dockerfile.py [--hub_tag=<tag>] [--arch=<arch> ...] [--debian=<version> ...] [-v] [-t] [--no-build] [--no-cache] [--fail-fast]
|
||||
|
||||
Options:
|
||||
--no-build Skip building the docker images
|
||||
--no-cache Build without using any cache data
|
||||
--no-generate Skip generating Dockerfiles from template
|
||||
--arch=<arch> What Architecture(s) to build [default: amd64 armel armhf aarch64]
|
||||
--skip=<arch> What Architectures(s) to skip [default: None]
|
||||
-v Print docker's command output [default: False]
|
||||
-t Print docker's build time [default: False]
|
||||
--no-build Skip building the docker images
|
||||
--no-cache Build without using any cache data
|
||||
--fail-fast Exit on first build error
|
||||
--hub_tag=<tag> What the Docker Hub Image should be tagged as [default: None]
|
||||
--arch=<arch> What Architecture(s) to build [default: amd64 armel armhf arm64]
|
||||
--debian=<version> What debian version(s) to build [default: stretch buster bullseye]
|
||||
-v Print docker's command output [default: False]
|
||||
-t Print docker's build time [default: False]
|
||||
|
||||
Examples:
|
||||
"""
|
||||
|
||||
from docopt import docopt
|
||||
from jinja2 import Environment, FileSystemLoader
|
||||
from docopt import docopt
|
||||
import os
|
||||
import testinfra
|
||||
import sys
|
||||
import subprocess
|
||||
from dotenv import dotenv_values
|
||||
|
||||
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
|
||||
|
||||
base_vars = {
|
||||
'name': 'pihole/pihole',
|
||||
'maintainer' : 'adam@diginc.us',
|
||||
's6_version' : 'v1.21.7.0',
|
||||
}
|
||||
|
||||
os_base_vars = {
|
||||
'php_env_config': '/etc/lighttpd/conf-enabled/15-fastcgi-php.conf',
|
||||
'php_error_log': '/var/log/lighttpd/error.log'
|
||||
}
|
||||
|
||||
__version__ = None
|
||||
dot = os.path.abspath('.')
|
||||
with open('{}/VERSION'.format(dot), 'r') as v:
|
||||
__version__ = v.read().strip()
|
||||
|
||||
images = {
|
||||
__version__: [
|
||||
{
|
||||
'base': 'pihole/debian-base:latest',
|
||||
'arch': 'amd64'
|
||||
},
|
||||
{
|
||||
'base': 'multiarch/debian-debootstrap:armel-stretch-slim',
|
||||
'arch': 'armel'
|
||||
},
|
||||
{
|
||||
'base': 'multiarch/debian-debootstrap:armhf-stretch-slim',
|
||||
'arch': 'armhf'
|
||||
},
|
||||
{
|
||||
'base': 'multiarch/debian-debootstrap:arm64-stretch-slim',
|
||||
'arch': 'aarch64'
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
def generate_dockerfiles(args):
|
||||
if args['--no-generate']:
|
||||
print " ::: Skipping Dockerfile generation"
|
||||
return
|
||||
|
||||
for version, archs in images.iteritems():
|
||||
for image in archs:
|
||||
if image['arch'] not in args['--arch'] or image['arch'] in args['--skip']:
|
||||
return
|
||||
s6arch = image['arch']
|
||||
if image['arch'] == 'armel':
|
||||
s6arch = 'arm'
|
||||
merged_data = dict(
|
||||
{ 'version': version }.items() +
|
||||
base_vars.items() +
|
||||
os_base_vars.items() +
|
||||
image.items() +
|
||||
{ 's6arch': s6arch }.items()
|
||||
)
|
||||
j2_env = Environment(loader=FileSystemLoader(THIS_DIR),
|
||||
trim_blocks=True)
|
||||
template = j2_env.get_template('Dockerfile.template')
|
||||
|
||||
dockerfile = 'Dockerfile_{}'.format(image['arch'])
|
||||
with open(dockerfile, 'w') as f:
|
||||
f.write(template.render(pihole=merged_data))
|
||||
|
||||
|
||||
def build_dockerfiles(args):
|
||||
def build_dockerfiles(args) -> bool:
|
||||
all_success = True
|
||||
if args['-v']:
|
||||
print(args)
|
||||
if args['--no-build']:
|
||||
print " ::: Skipping Dockerfile building"
|
||||
return
|
||||
print(" ::: Skipping Dockerfile building")
|
||||
return all_success
|
||||
|
||||
for arch in args['--arch']:
|
||||
# TODO: include from external .py that can be shared with Dockerfile.py / Tests / deploy scripts '''
|
||||
if arch == 'armel':
|
||||
print "Skipping armel, incompatible upstream binaries/broken"
|
||||
continue
|
||||
build('pihole', arch, args)
|
||||
for debian_version in args['--debian']:
|
||||
all_success = build('pihole', arch, debian_version, args['--hub_tag'], args['-t'], args['--no-cache'], args['-v']) and all_success
|
||||
if not all_success and args['--fail-fast']:
|
||||
return False
|
||||
return all_success
|
||||
|
||||
|
||||
def build(docker_repo, arch, args):
|
||||
run_local = testinfra.get_backend(
|
||||
"local://"
|
||||
).get_module("Command").run
|
||||
def run_and_stream_command_output(command, environment_vars, verbose) -> bool:
|
||||
print("Running", command)
|
||||
build_result = subprocess.Popen(command.split(), env=environment_vars, stdout=subprocess.PIPE,
|
||||
stderr=subprocess.STDOUT, bufsize=1, universal_newlines=True)
|
||||
if verbose:
|
||||
while build_result.poll() is None:
|
||||
for line in build_result.stdout:
|
||||
print(line, end='')
|
||||
build_result.wait()
|
||||
if build_result.returncode != 0:
|
||||
print(" ::: Error running".format(command))
|
||||
print(build_result.stderr)
|
||||
return build_result.returncode == 0
|
||||
|
||||
dockerfile = 'Dockerfile_{}'.format(arch)
|
||||
repo_tag = '{}:{}_{}'.format(docker_repo, __version__, arch)
|
||||
cached_image = '{}/{}'.format('pihole', repo_tag)
|
||||
time=''
|
||||
if args['-t']:
|
||||
time='time '
|
||||
no_cache = ''
|
||||
if args['--no-cache']:
|
||||
no_cache = '--no-cache'
|
||||
build_command = '{time}docker build {no_cache} --pull --cache-from="{cache},{create_tag}" -f {dockerfile} -t {create_tag} .'\
|
||||
.format(time=time, no_cache=no_cache, cache=cached_image, dockerfile=dockerfile, create_tag=repo_tag)
|
||||
print " ::: Building {} into {}".format(dockerfile, repo_tag)
|
||||
if args['-v']:
|
||||
print build_command, '\n'
|
||||
build_result = run_local(build_command)
|
||||
if args['-v']:
|
||||
print build_result.stdout
|
||||
print build_result.stderr
|
||||
if build_result.rc != 0:
|
||||
print " ::: Building {} encountered an error".format(dockerfile)
|
||||
print build_result.stderr
|
||||
assert build_result.rc == 0
|
||||
|
||||
def build(docker_repo: str, arch: str, debian_version: str, hub_tag: str, show_time: bool, no_cache: bool, verbose: bool) -> bool:
|
||||
# remove the `pihole/pihole:` from hub_tag for use elsewhere
|
||||
tag_name = hub_tag.split(":",1)[1]
|
||||
create_tag = f'{docker_repo}:{tag_name}'
|
||||
print(f' ::: Building {create_tag}')
|
||||
time_arg = 'time' if show_time else ''
|
||||
cache_arg = '--no-cache' if no_cache else ''
|
||||
build_env = os.environ.copy()
|
||||
build_env['PIHOLE_DOCKER_TAG'] = os.environ.get('GIT_TAG', None)
|
||||
build_env['DEBIAN_VERSION'] = debian_version
|
||||
build_command = f'{time_arg} docker-compose -f build.yml build {cache_arg} --pull {arch}'
|
||||
print(f' ::: Building {arch} into {create_tag}')
|
||||
success = run_and_stream_command_output(build_command, build_env, verbose)
|
||||
if verbose:
|
||||
print(build_command, '\n')
|
||||
if success and hub_tag:
|
||||
hub_tag_command = f'{time_arg} docker tag {create_tag} {hub_tag}'
|
||||
print(f' ::: Tagging {create_tag} into {hub_tag}')
|
||||
success = run_and_stream_command_output(hub_tag_command, build_env, verbose)
|
||||
return success
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = docopt(__doc__, version='Dockerfile 1.0')
|
||||
# print args
|
||||
|
||||
generate_dockerfiles(args)
|
||||
build_dockerfiles(args)
|
||||
args = docopt(__doc__, version='Dockerfile 1.1')
|
||||
success = build_dockerfiles(args)
|
||||
exit_code = 0 if success else 1
|
||||
sys.exit(exit_code)
|
||||
|
||||
13
Dockerfile.sh
Executable file
13
Dockerfile.sh
Executable file
@@ -0,0 +1,13 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# @param ${ARCH} The architecture to build. Example: amd64
|
||||
# @param ${DEBIAN_VERSION} The debian version to build. Example: buster
|
||||
# @param ${ARCH_IMAGE} What the Docker Hub Image should be tagged as [default: None]
|
||||
|
||||
set -eux
|
||||
./Dockerfile.py -v --no-cache --arch="${ARCH}" --debian="${DEBIAN_VERSION}" --hub_tag="${ARCH_IMAGE}"
|
||||
docker images
|
||||
|
||||
# TODO: Add junitxml output and have something consume it
|
||||
# 2 parallel max b/c race condition with docker fixture (I think?)
|
||||
py.test -vv -n 2 -k "${ARCH}" ./test/
|
||||
@@ -1,48 +0,0 @@
|
||||
FROM {{ pihole.base }}
|
||||
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/{{ pihole.s6_version }}/s6-overlay-{{ pihole.s6arch }}.tar.gz
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
ADD s6/debian-root /
|
||||
COPY s6/service /usr/local/bin/service
|
||||
|
||||
# php config start passes special ENVs into
|
||||
ENV PHP_ENV_CONFIG '{{ pihole.php_env_config }}'
|
||||
ENV PHP_ERROR_LOG '{{ pihole.php_error_log }}'
|
||||
COPY ./start.sh /
|
||||
COPY ./bash_functions.sh /
|
||||
|
||||
# IPv6 disable flag for networks/devices that do not support it
|
||||
ENV IPv6 True
|
||||
|
||||
EXPOSE 53 53/udp
|
||||
EXPOSE 67/udp
|
||||
EXPOSE 80
|
||||
EXPOSE 443
|
||||
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION {{ pihole.version }}
|
||||
ENV ARCH {{ pihole.arch }}
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="{{ pihole.name }}:{{ pihole.version }}_{{ pihole.arch }}"
|
||||
LABEL maintainer="{{ pihole.maintainer }}"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
HEALTHCHECK CMD dig @127.0.0.1 pi.hole || exit 1
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
@@ -1,48 +0,0 @@
|
||||
FROM multiarch/debian-debootstrap:arm64-stretch-slim
|
||||
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.7.0/s6-overlay-aarch64.tar.gz
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
ADD s6/debian-root /
|
||||
COPY s6/service /usr/local/bin/service
|
||||
|
||||
# php config start passes special ENVs into
|
||||
ENV PHP_ENV_CONFIG '/etc/lighttpd/conf-enabled/15-fastcgi-php.conf'
|
||||
ENV PHP_ERROR_LOG '/var/log/lighttpd/error.log'
|
||||
COPY ./start.sh /
|
||||
COPY ./bash_functions.sh /
|
||||
|
||||
# IPv6 disable flag for networks/devices that do not support it
|
||||
ENV IPv6 True
|
||||
|
||||
EXPOSE 53 53/udp
|
||||
EXPOSE 67/udp
|
||||
EXPOSE 80
|
||||
EXPOSE 443
|
||||
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION v4.2.2
|
||||
ENV ARCH aarch64
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="pihole/pihole:v4.2.2_aarch64"
|
||||
LABEL maintainer="adam@diginc.us"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
HEALTHCHECK CMD dig @127.0.0.1 pi.hole || exit 1
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
@@ -1,48 +0,0 @@
|
||||
FROM pihole/debian-base:latest
|
||||
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.7.0/s6-overlay-amd64.tar.gz
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
ADD s6/debian-root /
|
||||
COPY s6/service /usr/local/bin/service
|
||||
|
||||
# php config start passes special ENVs into
|
||||
ENV PHP_ENV_CONFIG '/etc/lighttpd/conf-enabled/15-fastcgi-php.conf'
|
||||
ENV PHP_ERROR_LOG '/var/log/lighttpd/error.log'
|
||||
COPY ./start.sh /
|
||||
COPY ./bash_functions.sh /
|
||||
|
||||
# IPv6 disable flag for networks/devices that do not support it
|
||||
ENV IPv6 True
|
||||
|
||||
EXPOSE 53 53/udp
|
||||
EXPOSE 67/udp
|
||||
EXPOSE 80
|
||||
EXPOSE 443
|
||||
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION v4.2.2
|
||||
ENV ARCH amd64
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="pihole/pihole:v4.2.2_amd64"
|
||||
LABEL maintainer="adam@diginc.us"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
HEALTHCHECK CMD dig @127.0.0.1 pi.hole || exit 1
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
@@ -1,48 +0,0 @@
|
||||
FROM multiarch/debian-debootstrap:armel-stretch-slim
|
||||
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.7.0/s6-overlay-arm.tar.gz
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
ADD s6/debian-root /
|
||||
COPY s6/service /usr/local/bin/service
|
||||
|
||||
# php config start passes special ENVs into
|
||||
ENV PHP_ENV_CONFIG '/etc/lighttpd/conf-enabled/15-fastcgi-php.conf'
|
||||
ENV PHP_ERROR_LOG '/var/log/lighttpd/error.log'
|
||||
COPY ./start.sh /
|
||||
COPY ./bash_functions.sh /
|
||||
|
||||
# IPv6 disable flag for networks/devices that do not support it
|
||||
ENV IPv6 True
|
||||
|
||||
EXPOSE 53 53/udp
|
||||
EXPOSE 67/udp
|
||||
EXPOSE 80
|
||||
EXPOSE 443
|
||||
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION v4.2.2
|
||||
ENV ARCH armel
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="pihole/pihole:v4.2.2_armel"
|
||||
LABEL maintainer="adam@diginc.us"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
HEALTHCHECK CMD dig @127.0.0.1 pi.hole || exit 1
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
@@ -1,48 +0,0 @@
|
||||
FROM multiarch/debian-debootstrap:armhf-stretch-slim
|
||||
|
||||
ENV S6OVERLAY_RELEASE https://github.com/just-containers/s6-overlay/releases/download/v1.21.7.0/s6-overlay-armhf.tar.gz
|
||||
COPY install.sh /usr/local/bin/install.sh
|
||||
COPY VERSION /etc/docker-pi-hole-version
|
||||
ENV PIHOLE_INSTALL /root/ph_install.sh
|
||||
|
||||
RUN bash -ex install.sh 2>&1 && \
|
||||
rm -rf /var/cache/apt/archives /var/lib/apt/lists/*
|
||||
|
||||
ENTRYPOINT [ "/s6-init" ]
|
||||
|
||||
ADD s6/debian-root /
|
||||
COPY s6/service /usr/local/bin/service
|
||||
|
||||
# php config start passes special ENVs into
|
||||
ENV PHP_ENV_CONFIG '/etc/lighttpd/conf-enabled/15-fastcgi-php.conf'
|
||||
ENV PHP_ERROR_LOG '/var/log/lighttpd/error.log'
|
||||
COPY ./start.sh /
|
||||
COPY ./bash_functions.sh /
|
||||
|
||||
# IPv6 disable flag for networks/devices that do not support it
|
||||
ENV IPv6 True
|
||||
|
||||
EXPOSE 53 53/udp
|
||||
EXPOSE 67/udp
|
||||
EXPOSE 80
|
||||
EXPOSE 443
|
||||
|
||||
ENV S6_LOGGING 0
|
||||
ENV S6_KEEP_ENV 1
|
||||
ENV S6_BEHAVIOUR_IF_STAGE2_FAILS 2
|
||||
|
||||
ENV ServerIP 0.0.0.0
|
||||
ENV FTL_CMD no-daemon
|
||||
ENV DNSMASQ_USER root
|
||||
|
||||
ENV VERSION v4.2.2
|
||||
ENV ARCH armhf
|
||||
ENV PATH /opt/pihole:${PATH}
|
||||
|
||||
LABEL image="pihole/pihole:v4.2.2_armhf"
|
||||
LABEL maintainer="adam@diginc.us"
|
||||
LABEL url="https://www.github.com/pi-hole/docker-pi-hole"
|
||||
|
||||
HEALTHCHECK CMD dig @127.0.0.1 pi.hole || exit 1
|
||||
|
||||
SHELL ["/bin/bash", "-c"]
|
||||
25
Dockerfile_build
Normal file
25
Dockerfile_build
Normal file
@@ -0,0 +1,25 @@
|
||||
FROM python:3.8-buster
|
||||
|
||||
# Only works for docker CLIENT (bind mounted socket)
|
||||
COPY --from=docker:18.09.3 /usr/local/bin/docker /usr/local/bin/
|
||||
|
||||
ARG packages
|
||||
RUN apt-get update && \
|
||||
apt-get install -y python3-dev curl gcc make \
|
||||
libffi-dev libssl-dev ${packages} \
|
||||
&& pip3 install -U pip pipenv
|
||||
|
||||
RUN curl -L https://github.com/docker/compose/releases/download/1.25.5/docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose && \
|
||||
chmod +x /usr/local/bin/docker-compose
|
||||
|
||||
COPY ./Dockerfile.sh /usr/local/bin/
|
||||
COPY Pipfile* /root/
|
||||
WORKDIR /root
|
||||
|
||||
RUN pipenv install --system \
|
||||
&& sed -i 's|/bin/sh|/bin/bash|g' /usr/local/lib/python3.8/site-packages/testinfra/backend/docker.py
|
||||
|
||||
RUN echo "set -ex && Dockerfile.sh && \$@" > /usr/local/bin/entrypoint.sh
|
||||
RUN chmod +x /usr/local/bin/entrypoint.sh
|
||||
ENTRYPOINT entrypoint.sh
|
||||
CMD Dockerfile.sh
|
||||
64
Pipfile
Normal file
64
Pipfile
Normal file
@@ -0,0 +1,64 @@
|
||||
[[source]]
|
||||
name = "pypi"
|
||||
url = "https://pypi.org/simple"
|
||||
verify_ssl = true
|
||||
|
||||
[dev-packages]
|
||||
|
||||
[packages]
|
||||
apipkg = "==1.5"
|
||||
atomicwrites = "==1.3.0"
|
||||
attrs = "==19.3.0"
|
||||
bcrypt = "==3.1.7"
|
||||
cached-property = "==1.5.1"
|
||||
certifi = "==2019.11.28"
|
||||
cffi = "==1.13.2"
|
||||
chardet = "==3.0.4"
|
||||
configparser = "==4.0.2"
|
||||
contextlib2 = "==0.6.0.post1"
|
||||
coverage = "==5.0.1"
|
||||
cryptography = "==3.3.2"
|
||||
docker = "==4.1.0"
|
||||
dockerpty = "==0.4.1"
|
||||
docopt = "==0.6.2"
|
||||
enum34 = "==1.1.6"
|
||||
execnet = "==1.7.1"
|
||||
filelock = "==3.0.12"
|
||||
funcsigs = "==1.0.2"
|
||||
idna = "==2.8"
|
||||
importlib-metadata = "==1.3.0"
|
||||
ipaddress = "==1.0.23"
|
||||
jsonschema = "==3.2.0"
|
||||
more-itertools = "==5.0.0"
|
||||
pathlib2 = "==2.3.5"
|
||||
pluggy = "==0.13.1"
|
||||
py = "==1.10.0"
|
||||
pycparser = "==2.19"
|
||||
pyparsing = "==2.4.6"
|
||||
pyrsistent = "==0.15.6"
|
||||
pytest = "==4.6.8"
|
||||
pytest-cov = "==2.8.1"
|
||||
pytest-forked = "==1.1.3"
|
||||
pytest-xdist = "==1.31.0"
|
||||
requests = "==2.22.0"
|
||||
scandir = "==1.10.0"
|
||||
six = "==1.13.0"
|
||||
subprocess32 = "==3.5.4"
|
||||
testinfra = "==3.3.0"
|
||||
texttable = "==1.6.2"
|
||||
toml = "==0.10.0"
|
||||
tox = "==3.14.3"
|
||||
urllib3 = "==1.25.8"
|
||||
virtualenv = "==16.7.9"
|
||||
wcwidth = "==0.1.7"
|
||||
zipp = "==0.6.0"
|
||||
"backports.shutil_get_terminal_size" = "==1.0.0"
|
||||
"backports.ssl_match_hostname" = "==3.7.0.1"
|
||||
Jinja2 = "==2.11.3"
|
||||
MarkupSafe = "==1.1.1"
|
||||
PyYAML = "==5.4"
|
||||
websocket_client = "==0.57.0"
|
||||
python-dotenv = "==0.17.1"
|
||||
|
||||
[requires]
|
||||
python_version = "3.8"
|
||||
620
Pipfile.lock
generated
Normal file
620
Pipfile.lock
generated
Normal file
@@ -0,0 +1,620 @@
|
||||
{
|
||||
"_meta": {
|
||||
"hash": {
|
||||
"sha256": "2c7f1fb7f001bf70bba7309859b06dc323040f21518b32ee8993aa823c27df15"
|
||||
},
|
||||
"pipfile-spec": 6,
|
||||
"requires": {
|
||||
"python_version": "3.8"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"name": "pypi",
|
||||
"url": "https://pypi.org/simple",
|
||||
"verify_ssl": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"default": {
|
||||
"apipkg": {
|
||||
"hashes": [
|
||||
"sha256:37228cda29411948b422fae072f57e31d3396d2ee1c9783775980ee9c9990af6",
|
||||
"sha256:58587dd4dc3daefad0487f6d9ae32b4542b185e1c36db6993290e7c41ca2b47c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.5"
|
||||
},
|
||||
"atomicwrites": {
|
||||
"hashes": [
|
||||
"sha256:03472c30eb2c5d1ba9227e4c2ca66ab8287fbfbbda3888aa93dc2e28fc6811b4",
|
||||
"sha256:75a9445bac02d8d058d5e1fe689654ba5a6556a1dfd8ce6ec55a0ed79866cfa6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"attrs": {
|
||||
"hashes": [
|
||||
"sha256:08a96c641c3a74e44eb59afb61a24f2cb9f4d7188748e76ba4bb5edfa3cb7d1c",
|
||||
"sha256:f7b7ce16570fe9965acd6d30101a28f62fb4a7f9e926b3bbc9b61f8b04247e72"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==19.3.0"
|
||||
},
|
||||
"backports.shutil-get-terminal-size": {
|
||||
"hashes": [
|
||||
"sha256:0975ba55054c15e346944b38956a4c9cbee9009391e41b86c68990effb8c1f64",
|
||||
"sha256:713e7a8228ae80341c70586d1cc0a8caa5207346927e23d09dcbcaf18eadec80"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.0"
|
||||
},
|
||||
"backports.ssl-match-hostname": {
|
||||
"hashes": [
|
||||
"sha256:bb82e60f9fbf4c080eabd957c39f0641f0fc247d9a16e31e26d594d8f42b9fd2"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.7.0.1"
|
||||
},
|
||||
"bcrypt": {
|
||||
"hashes": [
|
||||
"sha256:0258f143f3de96b7c14f762c770f5fc56ccd72f8a1857a451c1cd9a655d9ac89",
|
||||
"sha256:0b0069c752ec14172c5f78208f1863d7ad6755a6fae6fe76ec2c80d13be41e42",
|
||||
"sha256:19a4b72a6ae5bb467fea018b825f0a7d917789bcfe893e53f15c92805d187294",
|
||||
"sha256:436a487dec749bca7e6e72498a75a5fa2433bda13bac91d023e18df9089ae0b8",
|
||||
"sha256:5432dd7b34107ae8ed6c10a71b4397f1c853bd39a4d6ffa7e35f40584cffd161",
|
||||
"sha256:6305557019906466fc42dbc53b46da004e72fd7a551c044a827e572c82191752",
|
||||
"sha256:69361315039878c0680be456640f8705d76cb4a3a3fe1e057e0f261b74be4b31",
|
||||
"sha256:6fe49a60b25b584e2f4ef175b29d3a83ba63b3a4df1b4c0605b826668d1b6be5",
|
||||
"sha256:74a015102e877d0ccd02cdeaa18b32aa7273746914a6c5d0456dd442cb65b99c",
|
||||
"sha256:763669a367869786bb4c8fcf731f4175775a5b43f070f50f46f0b59da45375d0",
|
||||
"sha256:8b10acde4e1919d6015e1df86d4c217d3b5b01bb7744c36113ea43d529e1c3de",
|
||||
"sha256:9fe92406c857409b70a38729dbdf6578caf9228de0aef5bc44f859ffe971a39e",
|
||||
"sha256:a190f2a5dbbdbff4b74e3103cef44344bc30e61255beb27310e2aec407766052",
|
||||
"sha256:a595c12c618119255c90deb4b046e1ca3bcfad64667c43d1166f2b04bc72db09",
|
||||
"sha256:c9457fa5c121e94a58d6505cadca8bed1c64444b83b3204928a866ca2e599105",
|
||||
"sha256:cb93f6b2ab0f6853550b74e051d297c27a638719753eb9ff66d1e4072be67133",
|
||||
"sha256:ce4e4f0deb51d38b1611a27f330426154f2980e66582dc5f438aad38b5f24fc1",
|
||||
"sha256:d7bdc26475679dd073ba0ed2766445bb5b20ca4793ca0db32b399dccc6bc84b7",
|
||||
"sha256:ff032765bb8716d9387fd5376d987a937254b0619eff0972779515b5c98820bc"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.1.7"
|
||||
},
|
||||
"cached-property": {
|
||||
"hashes": [
|
||||
"sha256:3a026f1a54135677e7da5ce819b0c690f156f37976f3e30c5430740725203d7f",
|
||||
"sha256:9217a59f14a5682da7c4b8829deadbfc194ac22e9908ccf7c8820234e80a1504"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.5.1"
|
||||
},
|
||||
"certifi": {
|
||||
"hashes": [
|
||||
"sha256:017c25db2a153ce562900032d5bc68e9f191e44e9a0f762f373977de9df1fbb3",
|
||||
"sha256:25b64c7da4cd7479594d035c08c2d809eb4aab3a26e5a990ea98cc450c320f1f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2019.11.28"
|
||||
},
|
||||
"cffi": {
|
||||
"hashes": [
|
||||
"sha256:0b49274afc941c626b605fb59b59c3485c17dc776dc3cc7cc14aca74cc19cc42",
|
||||
"sha256:0e3ea92942cb1168e38c05c1d56b0527ce31f1a370f6117f1d490b8dcd6b3a04",
|
||||
"sha256:135f69aecbf4517d5b3d6429207b2dff49c876be724ac0c8bf8e1ea99df3d7e5",
|
||||
"sha256:19db0cdd6e516f13329cba4903368bff9bb5a9331d3410b1b448daaadc495e54",
|
||||
"sha256:2781e9ad0e9d47173c0093321bb5435a9dfae0ed6a762aabafa13108f5f7b2ba",
|
||||
"sha256:291f7c42e21d72144bb1c1b2e825ec60f46d0a7468f5346841860454c7aa8f57",
|
||||
"sha256:2c5e309ec482556397cb21ede0350c5e82f0eb2621de04b2633588d118da4396",
|
||||
"sha256:2e9c80a8c3344a92cb04661115898a9129c074f7ab82011ef4b612f645939f12",
|
||||
"sha256:32a262e2b90ffcfdd97c7a5e24a6012a43c61f1f5a57789ad80af1d26c6acd97",
|
||||
"sha256:3c9fff570f13480b201e9ab69453108f6d98244a7f495e91b6c654a47486ba43",
|
||||
"sha256:415bdc7ca8c1c634a6d7163d43fb0ea885a07e9618a64bda407e04b04333b7db",
|
||||
"sha256:42194f54c11abc8583417a7cf4eaff544ce0de8187abaf5d29029c91b1725ad3",
|
||||
"sha256:4424e42199e86b21fc4db83bd76909a6fc2a2aefb352cb5414833c030f6ed71b",
|
||||
"sha256:4a43c91840bda5f55249413037b7a9b79c90b1184ed504883b72c4df70778579",
|
||||
"sha256:599a1e8ff057ac530c9ad1778293c665cb81a791421f46922d80a86473c13346",
|
||||
"sha256:5c4fae4e9cdd18c82ba3a134be256e98dc0596af1e7285a3d2602c97dcfa5159",
|
||||
"sha256:5ecfa867dea6fabe2a58f03ac9186ea64da1386af2159196da51c4904e11d652",
|
||||
"sha256:62f2578358d3a92e4ab2d830cd1c2049c9c0d0e6d3c58322993cc341bdeac22e",
|
||||
"sha256:6471a82d5abea994e38d2c2abc77164b4f7fbaaf80261cb98394d5793f11b12a",
|
||||
"sha256:6d4f18483d040e18546108eb13b1dfa1000a089bcf8529e30346116ea6240506",
|
||||
"sha256:71a608532ab3bd26223c8d841dde43f3516aa5d2bf37b50ac410bb5e99053e8f",
|
||||
"sha256:74a1d8c85fb6ff0b30fbfa8ad0ac23cd601a138f7509dc617ebc65ef305bb98d",
|
||||
"sha256:7b93a885bb13073afb0aa73ad82059a4c41f4b7d8eb8368980448b52d4c7dc2c",
|
||||
"sha256:7d4751da932caaec419d514eaa4215eaf14b612cff66398dd51129ac22680b20",
|
||||
"sha256:7f627141a26b551bdebbc4855c1157feeef18241b4b8366ed22a5c7d672ef858",
|
||||
"sha256:8169cf44dd8f9071b2b9248c35fc35e8677451c52f795daa2bb4643f32a540bc",
|
||||
"sha256:aa00d66c0fab27373ae44ae26a66a9e43ff2a678bf63a9c7c1a9a4d61172827a",
|
||||
"sha256:ccb032fda0873254380aa2bfad2582aedc2959186cce61e3a17abc1a55ff89c3",
|
||||
"sha256:d754f39e0d1603b5b24a7f8484b22d2904fa551fe865fd0d4c3332f078d20d4e",
|
||||
"sha256:d75c461e20e29afc0aee7172a0950157c704ff0dd51613506bd7d82b718e7410",
|
||||
"sha256:dcd65317dd15bc0451f3e01c80da2216a31916bdcffd6221ca1202d96584aa25",
|
||||
"sha256:e570d3ab32e2c2861c4ebe6ffcad6a8abf9347432a37608fe1fbd157b3f0036b",
|
||||
"sha256:fd43a88e045cf992ed09fa724b5315b790525f2676883a6ea64e3263bae6549d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.13.2"
|
||||
},
|
||||
"chardet": {
|
||||
"hashes": [
|
||||
"sha256:84ab92ed1c4d4f16916e05906b6b75a6c0fb5db821cc65e70cbd64a3e2a5eaae",
|
||||
"sha256:fc323ffcaeaed0e0a02bf4d117757b98aed530d9ed4531e3e15460124c106691"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.0.4"
|
||||
},
|
||||
"configparser": {
|
||||
"hashes": [
|
||||
"sha256:254c1d9c79f60c45dfde850850883d5aaa7f19a23f13561243a050d5a7c3fe4c",
|
||||
"sha256:c7d282687a5308319bf3d2e7706e575c635b0a470342641c93bea0ea3b5331df"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==4.0.2"
|
||||
},
|
||||
"contextlib2": {
|
||||
"hashes": [
|
||||
"sha256:01f490098c18b19d2bd5bb5dc445b2054d2fa97f09a4280ba2c5f3c394c8162e",
|
||||
"sha256:3355078a159fbb44ee60ea80abd0d87b80b78c248643b49aa6d94673b413609b"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.6.0.post1"
|
||||
},
|
||||
"coverage": {
|
||||
"hashes": [
|
||||
"sha256:0101888bd1592a20ccadae081ba10e8b204d20235d18d05c6f7d5e904a38fc10",
|
||||
"sha256:04b961862334687549eb91cd5178a6fbe977ad365bddc7c60f2227f2f9880cf4",
|
||||
"sha256:1ca43dbd739c0fc30b0a3637a003a0d2c7edc1dd618359d58cc1e211742f8bd1",
|
||||
"sha256:1cbb88b34187bdb841f2599770b7e6ff8e259dc3bb64fc7893acf44998acf5f8",
|
||||
"sha256:232f0b52a5b978288f0bbc282a6c03fe48cd19a04202df44309919c142b3bb9c",
|
||||
"sha256:24bcfa86fd9ce86b73a8368383c39d919c497a06eebb888b6f0c12f13e920b1a",
|
||||
"sha256:25b8f60b5c7da71e64c18888f3067d5b6f1334b9681876b2fb41eea26de881ae",
|
||||
"sha256:2714160a63da18aed9340c70ed514973971ee7e665e6b336917ff4cca81a25b1",
|
||||
"sha256:2ca2cd5264e84b2cafc73f0045437f70c6378c0d7dbcddc9ee3fe192c1e29e5d",
|
||||
"sha256:2cc707fc9aad2592fc686d63ef72dc0031fc98b6fb921d2f5395d9ab84fbc3ef",
|
||||
"sha256:348630edea485f4228233c2f310a598abf8afa5f8c716c02a9698089687b6085",
|
||||
"sha256:40fbfd6b044c9db13aeec1daf5887d322c710d811f944011757526ef6e323fd9",
|
||||
"sha256:46c9c6a1d1190c0b75ec7c0f339088309952b82ae8d67a79ff1319eb4e749b96",
|
||||
"sha256:591506e088901bdc25620c37aec885e82cc896528f28c57e113751e3471fc314",
|
||||
"sha256:5ac71bba1e07eab403b082c4428f868c1c9e26a21041436b4905c4c3d4e49b08",
|
||||
"sha256:5f622f19abda4e934938e24f1d67599249abc201844933a6f01aaa8663094489",
|
||||
"sha256:65bead1ac8c8930cf92a1ccaedcce19a57298547d5d1db5c9d4d068a0675c38b",
|
||||
"sha256:7362a7f829feda10c7265b553455de596b83d1623b3d436b6d3c51c688c57bf6",
|
||||
"sha256:7f2675750c50151f806070ec11258edf4c328340916c53bac0adbc465abd6b1e",
|
||||
"sha256:960d7f42277391e8b1c0b0ae427a214e1b31a1278de6b73f8807b20c2e913bba",
|
||||
"sha256:a50b0888d8a021a3342d36a6086501e30de7d840ab68fca44913e97d14487dc1",
|
||||
"sha256:b7dbc5e8c39ea3ad3db22715f1b5401cd698a621218680c6daf42c2f9d36e205",
|
||||
"sha256:bb3d29df5d07d5399d58a394d0ef50adf303ab4fbf66dfd25b9ef258effcb692",
|
||||
"sha256:c0fff2733f7c2950f58a4fd09b5db257b00c6fec57bf3f68c5bae004d804b407",
|
||||
"sha256:c792d3707a86c01c02607ae74364854220fb3e82735f631cd0a345dea6b4cee5",
|
||||
"sha256:c90bda74e16bcd03861b09b1d37c0a4158feda5d5a036bb2d6e58de6ff65793e",
|
||||
"sha256:cfce79ce41cc1a1dc7fc85bb41eeeb32d34a4cf39a645c717c0550287e30ff06",
|
||||
"sha256:eeafb646f374988c22c8e6da5ab9fb81367ecfe81c70c292623373d2a021b1a1",
|
||||
"sha256:f425f50a6dd807cb9043d15a4fcfba3b5874a54d9587ccbb748899f70dc18c47",
|
||||
"sha256:fcd4459fe35a400b8f416bc57906862693c9f88b66dc925e7f2a933e77f6b18b",
|
||||
"sha256:ff3936dd5feaefb4f91c8c1f50a06c588b5dc69fba4f7d9c79a6617ad80bb7df"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.0.1"
|
||||
},
|
||||
"cryptography": {
|
||||
"hashes": [
|
||||
"sha256:0d7b69674b738068fa6ffade5c962ecd14969690585aaca0a1b1fc9058938a72",
|
||||
"sha256:1bd0ccb0a1ed775cd7e2144fe46df9dc03eefd722bbcf587b3e0616ea4a81eff",
|
||||
"sha256:3c284fc1e504e88e51c428db9c9274f2da9f73fdf5d7e13a36b8ecb039af6e6c",
|
||||
"sha256:49570438e60f19243e7e0d504527dd5fe9b4b967b5a1ff21cc12b57602dd85d3",
|
||||
"sha256:541dd758ad49b45920dda3b5b48c968f8b2533d8981bcdb43002798d8f7a89ed",
|
||||
"sha256:5a60d3780149e13b7a6ff7ad6526b38846354d11a15e21068e57073e29e19bed",
|
||||
"sha256:7951a966613c4211b6612b0352f5bf29989955ee592c4a885d8c7d0f830d0433",
|
||||
"sha256:922f9602d67c15ade470c11d616f2b2364950602e370c76f0c94c94ae672742e",
|
||||
"sha256:a0f0b96c572fc9f25c3f4ddbf4688b9b38c69836713fb255f4a2715d93cbaf44",
|
||||
"sha256:a777c096a49d80f9d2979695b835b0f9c9edab73b59e4ceb51f19724dda887ed",
|
||||
"sha256:a9a4ac9648d39ce71c2f63fe7dc6db144b9fa567ddfc48b9fde1b54483d26042",
|
||||
"sha256:aa4969f24d536ae2268c902b2c3d62ab464b5a66bcb247630d208a79a8098e9b",
|
||||
"sha256:c7390f9b2119b2b43160abb34f63277a638504ef8df99f11cb52c1fda66a2e6f",
|
||||
"sha256:e18e6ab84dfb0ab997faf8cca25a86ff15dfea4027b986322026cc99e0a892da"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.3.2"
|
||||
},
|
||||
"docker": {
|
||||
"hashes": [
|
||||
"sha256:6e06c5e70ba4fad73e35f00c55a895a448398f3ada7faae072e2bb01348bafc1",
|
||||
"sha256:8f93775b8bdae3a2df6bc9a5312cce564cade58d6555f2c2570165a1270cd8a7"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==4.1.0"
|
||||
},
|
||||
"dockerpty": {
|
||||
"hashes": [
|
||||
"sha256:69a9d69d573a0daa31bcd1c0774eeed5c15c295fe719c61aca550ed1393156ce"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.4.1"
|
||||
},
|
||||
"docopt": {
|
||||
"hashes": [
|
||||
"sha256:49b3a825280bd66b3aa83585ef59c4a8c82f2c8a522dbe754a8bc8d08c85c491"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.6.2"
|
||||
},
|
||||
"enum34": {
|
||||
"hashes": [
|
||||
"sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850",
|
||||
"sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a",
|
||||
"sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79",
|
||||
"sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.1.6"
|
||||
},
|
||||
"execnet": {
|
||||
"hashes": [
|
||||
"sha256:cacb9df31c9680ec5f95553976c4da484d407e85e41c83cb812aa014f0eddc50",
|
||||
"sha256:d4efd397930c46415f62f8a31388d6be4f27a91d7550eb79bc64a756e0056547"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.7.1"
|
||||
},
|
||||
"filelock": {
|
||||
"hashes": [
|
||||
"sha256:18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59",
|
||||
"sha256:929b7d63ec5b7d6b71b0fa5ac14e030b3f70b75747cef1b10da9b879fef15836"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.0.12"
|
||||
},
|
||||
"funcsigs": {
|
||||
"hashes": [
|
||||
"sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca",
|
||||
"sha256:a7bb0f2cf3a3fd1ab2732cb49eba4252c2af4240442415b4abce3b87022a8f50"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.2"
|
||||
},
|
||||
"idna": {
|
||||
"hashes": [
|
||||
"sha256:c357b3f628cf53ae2c4c05627ecc484553142ca23264e593d327bcde5e9c3407",
|
||||
"sha256:ea8b7f6188e6fa117537c3df7da9fc686d485087abf6ac197f9c46432f7e4a3c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.8"
|
||||
},
|
||||
"importlib-metadata": {
|
||||
"hashes": [
|
||||
"sha256:073a852570f92da5f744a3472af1b61e28e9f78ccf0c9117658dc32b15de7b45",
|
||||
"sha256:d95141fbfa7ef2ec65cfd945e2af7e5a6ddbd7c8d9a25e66ff3be8e3daf9f60f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.3.0"
|
||||
},
|
||||
"ipaddress": {
|
||||
"hashes": [
|
||||
"sha256:6e0f4a39e66cb5bb9a137b00276a2eff74f93b71dcbdad6f10ff7df9d3557fcc",
|
||||
"sha256:b7f8e0369580bb4a24d5ba1d7cc29660a4a6987763faf1d8a8046830e020e7e2"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.0.23"
|
||||
},
|
||||
"jinja2": {
|
||||
"hashes": [
|
||||
"sha256:03e47ad063331dd6a3f04a43eddca8a966a26ba0c5b7207a9a9e4e08f1b29419",
|
||||
"sha256:a6d58433de0ae800347cab1fa3043cebbabe8baa9d29e668f1c768cb87a333c6"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.11.3"
|
||||
},
|
||||
"jsonschema": {
|
||||
"hashes": [
|
||||
"sha256:4e5b3cf8216f577bee9ce139cbe72eca3ea4f292ec60928ff24758ce626cd163",
|
||||
"sha256:c8a85b28d377cc7737e46e2d9f2b4f44ee3c0e1deac6bf46ddefc7187d30797a"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.2.0"
|
||||
},
|
||||
"markupsafe": {
|
||||
"hashes": [
|
||||
"sha256:00bc623926325b26bb9605ae9eae8a215691f33cae5df11ca5424f06f2d1f473",
|
||||
"sha256:09027a7803a62ca78792ad89403b1b7a73a01c8cb65909cd876f7fcebd79b161",
|
||||
"sha256:09c4b7f37d6c648cb13f9230d847adf22f8171b1ccc4d5682398e77f40309235",
|
||||
"sha256:1027c282dad077d0bae18be6794e6b6b8c91d58ed8a8d89a89d59693b9131db5",
|
||||
"sha256:13d3144e1e340870b25e7b10b98d779608c02016d5184cfb9927a9f10c689f42",
|
||||
"sha256:195d7d2c4fbb0ee8139a6cf67194f3973a6b3042d742ebe0a9ed36d8b6f0c07f",
|
||||
"sha256:22c178a091fc6630d0d045bdb5992d2dfe14e3259760e713c490da5323866c39",
|
||||
"sha256:24982cc2533820871eba85ba648cd53d8623687ff11cbb805be4ff7b4c971aff",
|
||||
"sha256:29872e92839765e546828bb7754a68c418d927cd064fd4708fab9fe9c8bb116b",
|
||||
"sha256:2beec1e0de6924ea551859edb9e7679da6e4870d32cb766240ce17e0a0ba2014",
|
||||
"sha256:3b8a6499709d29c2e2399569d96719a1b21dcd94410a586a18526b143ec8470f",
|
||||
"sha256:43a55c2930bbc139570ac2452adf3d70cdbb3cfe5912c71cdce1c2c6bbd9c5d1",
|
||||
"sha256:46c99d2de99945ec5cb54f23c8cd5689f6d7177305ebff350a58ce5f8de1669e",
|
||||
"sha256:500d4957e52ddc3351cabf489e79c91c17f6e0899158447047588650b5e69183",
|
||||
"sha256:535f6fc4d397c1563d08b88e485c3496cf5784e927af890fb3c3aac7f933ec66",
|
||||
"sha256:596510de112c685489095da617b5bcbbac7dd6384aeebeda4df6025d0256a81b",
|
||||
"sha256:62fe6c95e3ec8a7fad637b7f3d372c15ec1caa01ab47926cfdf7a75b40e0eac1",
|
||||
"sha256:6788b695d50a51edb699cb55e35487e430fa21f1ed838122d722e0ff0ac5ba15",
|
||||
"sha256:6dd73240d2af64df90aa7c4e7481e23825ea70af4b4922f8ede5b9e35f78a3b1",
|
||||
"sha256:6f1e273a344928347c1290119b493a1f0303c52f5a5eae5f16d74f48c15d4a85",
|
||||
"sha256:6fffc775d90dcc9aed1b89219549b329a9250d918fd0b8fa8d93d154918422e1",
|
||||
"sha256:717ba8fe3ae9cc0006d7c451f0bb265ee07739daf76355d06366154ee68d221e",
|
||||
"sha256:79855e1c5b8da654cf486b830bd42c06e8780cea587384cf6545b7d9ac013a0b",
|
||||
"sha256:7c1699dfe0cf8ff607dbdcc1e9b9af1755371f92a68f706051cc8c37d447c905",
|
||||
"sha256:7fed13866cf14bba33e7176717346713881f56d9d2bcebab207f7a036f41b850",
|
||||
"sha256:84dee80c15f1b560d55bcfe6d47b27d070b4681c699c572af2e3c7cc90a3b8e0",
|
||||
"sha256:88e5fcfb52ee7b911e8bb6d6aa2fd21fbecc674eadd44118a9cc3863f938e735",
|
||||
"sha256:8defac2f2ccd6805ebf65f5eeb132adcf2ab57aa11fdf4c0dd5169a004710e7d",
|
||||
"sha256:98bae9582248d6cf62321dcb52aaf5d9adf0bad3b40582925ef7c7f0ed85fceb",
|
||||
"sha256:98c7086708b163d425c67c7a91bad6e466bb99d797aa64f965e9d25c12111a5e",
|
||||
"sha256:9add70b36c5666a2ed02b43b335fe19002ee5235efd4b8a89bfcf9005bebac0d",
|
||||
"sha256:9bf40443012702a1d2070043cb6291650a0841ece432556f784f004937f0f32c",
|
||||
"sha256:a6a744282b7718a2a62d2ed9d993cad6f5f585605ad352c11de459f4108df0a1",
|
||||
"sha256:acf08ac40292838b3cbbb06cfe9b2cb9ec78fce8baca31ddb87aaac2e2dc3bc2",
|
||||
"sha256:ade5e387d2ad0d7ebf59146cc00c8044acbd863725f887353a10df825fc8ae21",
|
||||
"sha256:b00c1de48212e4cc9603895652c5c410df699856a2853135b3967591e4beebc2",
|
||||
"sha256:b1282f8c00509d99fef04d8ba936b156d419be841854fe901d8ae224c59f0be5",
|
||||
"sha256:b1dba4527182c95a0db8b6060cc98ac49b9e2f5e64320e2b56e47cb2831978c7",
|
||||
"sha256:b2051432115498d3562c084a49bba65d97cf251f5a331c64a12ee7e04dacc51b",
|
||||
"sha256:b7d644ddb4dbd407d31ffb699f1d140bc35478da613b441c582aeb7c43838dd8",
|
||||
"sha256:ba59edeaa2fc6114428f1637ffff42da1e311e29382d81b339c1817d37ec93c6",
|
||||
"sha256:bf5aa3cbcfdf57fa2ee9cd1822c862ef23037f5c832ad09cfea57fa846dec193",
|
||||
"sha256:c8716a48d94b06bb3b2524c2b77e055fb313aeb4ea620c8dd03a105574ba704f",
|
||||
"sha256:caabedc8323f1e93231b52fc32bdcde6db817623d33e100708d9a68e1f53b26b",
|
||||
"sha256:cd5df75523866410809ca100dc9681e301e3c27567cf498077e8551b6d20e42f",
|
||||
"sha256:cdb132fc825c38e1aeec2c8aa9338310d29d337bebbd7baa06889d09a60a1fa2",
|
||||
"sha256:d53bc011414228441014aa71dbec320c66468c1030aae3a6e29778a3382d96e5",
|
||||
"sha256:d73a845f227b0bfe8a7455ee623525ee656a9e2e749e4742706d80a6065d5e2c",
|
||||
"sha256:d9be0ba6c527163cbed5e0857c451fcd092ce83947944d6c14bc95441203f032",
|
||||
"sha256:e249096428b3ae81b08327a63a485ad0878de3fb939049038579ac0ef61e17e7",
|
||||
"sha256:e8313f01ba26fbbe36c7be1966a7b7424942f670f38e666995b88d012765b9be",
|
||||
"sha256:feb7b34d6325451ef96bc0e36e1a6c0c1c64bc1fbec4b854f4529e51887b1621"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.1.1"
|
||||
},
|
||||
"more-itertools": {
|
||||
"hashes": [
|
||||
"sha256:38a936c0a6d98a38bcc2d03fdaaedaba9f412879461dd2ceff8d37564d6522e4",
|
||||
"sha256:c0a5785b1109a6bd7fac76d6837fd1feca158e54e521ccd2ae8bfe393cc9d4fc",
|
||||
"sha256:fe7a7cae1ccb57d33952113ff4fa1bc5f879963600ed74918f1236e212ee50b9"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.0.0"
|
||||
},
|
||||
"packaging": {
|
||||
"hashes": [
|
||||
"sha256:5b327ac1320dc863dca72f4514ecc086f31186744b84a230374cc1fd776feae5",
|
||||
"sha256:67714da7f7bc052e064859c05c595155bd1ee9f69f76557e21f051443c20947a"
|
||||
],
|
||||
"markers": "python_version >= '2.7' and python_version not in '3.0, 3.1, 3.2, 3.3'",
|
||||
"version": "==20.9"
|
||||
},
|
||||
"pathlib2": {
|
||||
"hashes": [
|
||||
"sha256:0ec8205a157c80d7acc301c0b18fbd5d44fe655968f5d947b6ecef5290fc35db",
|
||||
"sha256:6cd9a47b597b37cc57de1c05e56fb1a1c9cc9fab04fe78c29acd090418529868"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.3.5"
|
||||
},
|
||||
"pluggy": {
|
||||
"hashes": [
|
||||
"sha256:15b2acde666561e1298d71b523007ed7364de07029219b604cf808bfa1c765b0",
|
||||
"sha256:966c145cd83c96502c3c3868f50408687b38434af77734af1e9ca461a4081d2d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.13.1"
|
||||
},
|
||||
"py": {
|
||||
"hashes": [
|
||||
"sha256:21b81bda15b66ef5e1a777a21c4dcd9c20ad3efd0b3f817e7a809035269e1bd3",
|
||||
"sha256:3b80836aa6d1feeaa108e046da6423ab8f6ceda6468545ae8d02d9d58d18818a"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.10.0"
|
||||
},
|
||||
"pycparser": {
|
||||
"hashes": [
|
||||
"sha256:a988718abfad80b6b157acce7bf130a30876d27603738ac39f140993246b25b3"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.19"
|
||||
},
|
||||
"pyparsing": {
|
||||
"hashes": [
|
||||
"sha256:4c830582a84fb022400b85429791bc551f1f4871c33f23e44f353119e92f969f",
|
||||
"sha256:c342dccb5250c08d45fd6f8b4a559613ca603b57498511740e65cd11a2e7dcec"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.4.6"
|
||||
},
|
||||
"pyrsistent": {
|
||||
"hashes": [
|
||||
"sha256:f3b280d030afb652f79d67c5586157c5c1355c9a58dfc7940566e28d28f3df1b"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.15.6"
|
||||
},
|
||||
"pytest": {
|
||||
"hashes": [
|
||||
"sha256:6192875be8af57b694b7c4904e909680102befcb99e610ef3d9f786952f795aa",
|
||||
"sha256:f8447ebf8fd3d362868a5d3f43a9df786dfdfe9608843bd9002a2d47a104808f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==4.6.8"
|
||||
},
|
||||
"pytest-cov": {
|
||||
"hashes": [
|
||||
"sha256:cc6742d8bac45070217169f5f72ceee1e0e55b0221f54bcf24845972d3a47f2b",
|
||||
"sha256:cdbdef4f870408ebdbfeb44e63e07eb18bb4619fae852f6e760645fa36172626"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.8.1"
|
||||
},
|
||||
"pytest-forked": {
|
||||
"hashes": [
|
||||
"sha256:1805699ed9c9e60cb7a8179b8d4fa2b8898098e82d229b0825d8095f0f261100",
|
||||
"sha256:1ae25dba8ee2e56fb47311c9638f9e58552691da87e82d25b0ce0e4bf52b7d87"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.1.3"
|
||||
},
|
||||
"pytest-xdist": {
|
||||
"hashes": [
|
||||
"sha256:0f46020d3d9619e6d17a65b5b989c1ebbb58fc7b1da8fb126d70f4bac4dfeed1",
|
||||
"sha256:7dc0d027d258cd0defc618fb97055fbd1002735ca7a6d17037018cf870e24011"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.31.0"
|
||||
},
|
||||
"python-dotenv": {
|
||||
"hashes": [
|
||||
"sha256:00aa34e92d992e9f8383730816359647f358f4a3be1ba45e5a5cefd27ee91544",
|
||||
"sha256:b1ae5e9643d5ed987fc57cc2583021e38db531946518130777734f9589b3141f"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.17.1"
|
||||
},
|
||||
"pyyaml": {
|
||||
"hashes": [
|
||||
"sha256:02c78d77281d8f8d07a255e57abdbf43b02257f59f50cc6b636937d68efa5dd0",
|
||||
"sha256:0dc9f2eb2e3c97640928dec63fd8dc1dd91e6b6ed236bd5ac00332b99b5c2ff9",
|
||||
"sha256:124fd7c7bc1e95b1eafc60825f2daf67c73ce7b33f1194731240d24b0d1bf628",
|
||||
"sha256:26fcb33776857f4072601502d93e1a619f166c9c00befb52826e7b774efaa9db",
|
||||
"sha256:31ba07c54ef4a897758563e3a0fcc60077698df10180abe4b8165d9895c00ebf",
|
||||
"sha256:3c49e39ac034fd64fd576d63bb4db53cda89b362768a67f07749d55f128ac18a",
|
||||
"sha256:52bf0930903818e600ae6c2901f748bc4869c0c406056f679ab9614e5d21a166",
|
||||
"sha256:5a3f345acff76cad4aa9cb171ee76c590f37394186325d53d1aa25318b0d4a09",
|
||||
"sha256:5e7ac4e0e79a53451dc2814f6876c2fa6f71452de1498bbe29c0b54b69a986f4",
|
||||
"sha256:7242790ab6c20316b8e7bb545be48d7ed36e26bbe279fd56f2c4a12510e60b4b",
|
||||
"sha256:737bd70e454a284d456aa1fa71a0b429dd527bcbf52c5c33f7c8eee81ac16b89",
|
||||
"sha256:8635d53223b1f561b081ff4adecb828fd484b8efffe542edcfdff471997f7c39",
|
||||
"sha256:8b818b6c5a920cbe4203b5a6b14256f0e5244338244560da89b7b0f1313ea4b6",
|
||||
"sha256:8bf38641b4713d77da19e91f8b5296b832e4db87338d6aeffe422d42f1ca896d",
|
||||
"sha256:a36a48a51e5471513a5aea920cdad84cbd56d70a5057cca3499a637496ea379c",
|
||||
"sha256:b2243dd033fd02c01212ad5c601dafb44fbb293065f430b0d3dbf03f3254d615",
|
||||
"sha256:cc547d3ead3754712223abb7b403f0a184e4c3eae18c9bb7fd15adef1597cc4b",
|
||||
"sha256:cc552b6434b90d9dbed6a4f13339625dc466fd82597119897e9489c953acbc22",
|
||||
"sha256:f3790156c606299ff499ec44db422f66f05a7363b39eb9d5b064f17bd7d7c47b",
|
||||
"sha256:f7a21e3d99aa3095ef0553e7ceba36fb693998fbb1226f1392ce33681047465f",
|
||||
"sha256:fdc6b2cb4b19e431994f25a9160695cc59a4e861710cc6fc97161c5e845fc579"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==5.4"
|
||||
},
|
||||
"requests": {
|
||||
"hashes": [
|
||||
"sha256:11e007a8a2aa0323f5a921e9e6a2d7e4e67d9877e85773fba9ba6419025cbeb4",
|
||||
"sha256:9cf5292fcd0f598c671cfc1e0d7d1a7f13bb8085e9a590f48c010551dc6c4b31"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==2.22.0"
|
||||
},
|
||||
"scandir": {
|
||||
"hashes": [
|
||||
"sha256:2586c94e907d99617887daed6c1d102b5ca28f1085f90446554abf1faf73123e",
|
||||
"sha256:2ae41f43797ca0c11591c0c35f2f5875fa99f8797cb1a1fd440497ec0ae4b022",
|
||||
"sha256:2b8e3888b11abb2217a32af0766bc06b65cc4a928d8727828ee68af5a967fa6f",
|
||||
"sha256:2c712840c2e2ee8dfaf36034080108d30060d759c7b73a01a52251cc8989f11f",
|
||||
"sha256:4d4631f6062e658e9007ab3149a9b914f3548cb38bfb021c64f39a025ce578ae",
|
||||
"sha256:67f15b6f83e6507fdc6fca22fedf6ef8b334b399ca27c6b568cbfaa82a364173",
|
||||
"sha256:7d2d7a06a252764061a020407b997dd036f7bd6a175a5ba2b345f0a357f0b3f4",
|
||||
"sha256:8c5922863e44ffc00c5c693190648daa6d15e7c1207ed02d6f46a8dcc2869d32",
|
||||
"sha256:92c85ac42f41ffdc35b6da57ed991575bdbe69db895507af88b9f499b701c188",
|
||||
"sha256:b24086f2375c4a094a6b51e78b4cf7ca16c721dcee2eddd7aa6494b42d6d519d",
|
||||
"sha256:cb925555f43060a1745d0a321cca94bcea927c50114b623d73179189a4e100ac"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.10.0"
|
||||
},
|
||||
"six": {
|
||||
"hashes": [
|
||||
"sha256:1f1b7d42e254082a9db6279deae68afb421ceba6158efa6131de7b3003ee93fd",
|
||||
"sha256:30f610279e8b2578cab6db20741130331735c781b56053c59c4076da27f06b66"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.13.0"
|
||||
},
|
||||
"subprocess32": {
|
||||
"hashes": [
|
||||
"sha256:88e37c1aac5388df41cc8a8456bb49ebffd321a3ad4d70358e3518176de3a56b",
|
||||
"sha256:e45d985aef903c5b7444d34350b05da91a9e0ea015415ab45a21212786c649d0",
|
||||
"sha256:eb2937c80497978d181efa1b839ec2d9622cf9600a039a79d0e108d1f9aec79d"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.5.4"
|
||||
},
|
||||
"testinfra": {
|
||||
"hashes": [
|
||||
"sha256:780e6c2ab392ea93c26cee1777c968a144c2189a56b3e239a3a66e6d256925b5",
|
||||
"sha256:c3492b39c8d2c98d8419ce1a91d7fe348213f9b98b91198d2e7e88b3954b050b"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.3.0"
|
||||
},
|
||||
"texttable": {
|
||||
"hashes": [
|
||||
"sha256:7dc282a5b22564fe0fdc1c771382d5dd9a54742047c61558e071c8cd595add86",
|
||||
"sha256:eff3703781fbc7750125f50e10f001195174f13825a92a45e9403037d539b4f4"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.6.2"
|
||||
},
|
||||
"toml": {
|
||||
"hashes": [
|
||||
"sha256:229f81c57791a41d65e399fc06bf0848bab550a9dfd5ed66df18ce5f05e73d5c",
|
||||
"sha256:235682dd292d5899d361a811df37e04a8828a5b1da3115886b73cf81ebc9100e",
|
||||
"sha256:f1db651f9657708513243e61e6cc67d101a39bad662eaa9b5546f789338e07a3"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.10.0"
|
||||
},
|
||||
"tox": {
|
||||
"hashes": [
|
||||
"sha256:06ba73b149bf838d5cd25dc30c2dd2671ae5b2757cf98e5c41a35fe449f131b3",
|
||||
"sha256:806d0a9217584558cc93747a945a9d9bff10b141a5287f0c8429a08828a22192"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==3.14.3"
|
||||
},
|
||||
"urllib3": {
|
||||
"hashes": [
|
||||
"sha256:2f3db8b19923a873b3e5256dc9c2dedfa883e33d87c690d9c7913e1f40673cdc",
|
||||
"sha256:87716c2d2a7121198ebcb7ce7cccf6ce5e9ba539041cfbaeecfb641dc0bf6acc"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==1.25.8"
|
||||
},
|
||||
"virtualenv": {
|
||||
"hashes": [
|
||||
"sha256:0d62c70883c0342d59c11d0ddac0d954d0431321a41ab20851facf2b222598f3",
|
||||
"sha256:55059a7a676e4e19498f1aad09b8313a38fcc0cdbe4fdddc0e9b06946d21b4bb"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==16.7.9"
|
||||
},
|
||||
"wcwidth": {
|
||||
"hashes": [
|
||||
"sha256:3df37372226d6e63e1b1e1eda15c594bca98a22d33a23832a90998faa96bc65e",
|
||||
"sha256:f4ebe71925af7b40a864553f761ed559b43544f8f71746c2d756c7fe788ade7c"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.1.7"
|
||||
},
|
||||
"websocket-client": {
|
||||
"hashes": [
|
||||
"sha256:0fc45c961324d79c781bab301359d5a1b00b13ad1b10415a4780229ef71a5549",
|
||||
"sha256:d735b91d6d1692a6a181f2a8c9e0238e5f6373356f561bb9dc4c7af36f452010"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.57.0"
|
||||
},
|
||||
"zipp": {
|
||||
"hashes": [
|
||||
"sha256:3718b1cbcd963c7d4c5511a8240812904164b7f381b647143a89d3b98f9bcd8e",
|
||||
"sha256:f06903e9f1f43b12d371004b4ac7b06ab39a44adc747266928ae6debfa7b3335"
|
||||
],
|
||||
"index": "pypi",
|
||||
"version": "==0.6.0"
|
||||
}
|
||||
},
|
||||
"develop": {}
|
||||
}
|
||||
199
README.md
199
README.md
@@ -7,6 +7,7 @@
|
||||
|
||||
## Quick Start
|
||||
|
||||
1. Copy docker-compose.yml.example to docker-compose.yml and update as needed. See example below:
|
||||
[Docker-compose](https://docs.docker.com/compose/install/) example:
|
||||
|
||||
```yaml
|
||||
@@ -17,74 +18,58 @@ services:
|
||||
pihole:
|
||||
container_name: pihole
|
||||
image: pihole/pihole:latest
|
||||
# For DHCP it is recommended to remove these ports and instead add: network_mode: "host"
|
||||
ports:
|
||||
- "53:53/tcp"
|
||||
- "53:53/udp"
|
||||
- "67:67/udp"
|
||||
- "67:67/udp" # Only required if you are using Pi-hole as your DHCP server
|
||||
- "80:80/tcp"
|
||||
- "443:443/tcp"
|
||||
environment:
|
||||
TZ: 'America/Chicago'
|
||||
# WEBPASSWORD: 'set a secure password here or it will be random'
|
||||
# Volumes store your data between container upgrades
|
||||
volumes:
|
||||
- './etc-pihole/:/etc/pihole/'
|
||||
- './etc-dnsmasq.d/:/etc/dnsmasq.d/'
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
- 1.1.1.1
|
||||
# Recommended but not required (DHCP needs NET_ADMIN)
|
||||
- './etc-pihole:/etc/pihole'
|
||||
- './etc-dnsmasq.d:/etc/dnsmasq.d'
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
- NET_ADMIN # Recommended but not required (DHCP needs NET_ADMIN)
|
||||
restart: unless-stopped
|
||||
```
|
||||
2. Run `docker-compose up -d` to build and start pi-hole
|
||||
3. Use the Pi-hole web UI to change the DNS settings *Interface listening behavior* to "Listen on all interfaces, permit all origins", if using Docker's default `bridge` network setting
|
||||
|
||||
[Here is an equivilent docker run script](https://github.com/pi-hole/docker-pi-hole/blob/master/docker_run.sh).
|
||||
[Here is an equivalent docker run script](https://github.com/pi-hole/docker-pi-hole/blob/master/docker_run.sh).
|
||||
|
||||
## Upgrade Notices:
|
||||
## Upgrade Notes
|
||||
In `2022.01` and later, the default `DNSMASQ_USER` has been changed to `pihole`, however this may cause issues on some systems such as Synology, see Issue [#963](https://github.com/pi-hole/docker-pi-hole/issues/963) for more information.
|
||||
|
||||
### Docker Pi-Hole v4.2.2
|
||||
|
||||
- ServerIP no longer a required enviroment variable! Feel free to remove it unless you need it to customize lighttpd
|
||||
- --cap-add NET_ADMIN no longer required unless using DHCP, leaving in examples for consistency
|
||||
|
||||
### Docker Pi-Hole v4.1.1+
|
||||
|
||||
Starting with the v4.1.1 release your Pi-hole container may encounter issues starting the DNS service unless ran with the following setting:
|
||||
|
||||
- `--dns=127.0.0.1 --dns=1.1.1.1` The second server can be any DNS IP of your choosing, but the **first dns must be 127.0.0.1**
|
||||
- A WARNING stating "Misconfigured DNS in /etc/resolv.conf" may show in docker logs without this.
|
||||
- 4.1 required --cap-add NET_ADMIN until 4.2.1-1
|
||||
|
||||
These are the raw [docker run cli](https://docs.docker.com/engine/reference/commandline/cli/) versions of the commands. We provide no official support for docker GUIs but the community forums may be able to help if you do not see a place for these settings. Remember, always consult your manual too!
|
||||
If the container wont start due to issues setting capabilities, set `DNSMASQ_USER` to `root` in your environment.
|
||||
|
||||
## Overview
|
||||
|
||||
#### Renamed from `diginc/pi-hole` to `pihole/pihole`
|
||||
|
||||
A [Docker](https://www.docker.com/what-docker) project to make a lightweight x86 and ARM container with [Pi-hole](https://pi-hole.net) functionality.
|
||||
|
||||
1) Install docker for your [x86-64 system](https://www.docker.com/community-edition) or [ARMv7 system](https://www.raspberrypi.org/blog/docker-comes-to-raspberry-pi/) using those links. [Docker-compose](https://docs.docker.com/compose/install/) is also recommended.
|
||||
2) Use the above quick start example, customize if desired.
|
||||
3) Enjoy!
|
||||
|
||||
[](https://travis-ci.org/pi-hole/docker-pi-hole) [](https://store.docker.com/community/images/pihole/pihole) [](https://store.docker.com/community/images/pihole/pihole)
|
||||
[](https://github.com/pi-hole/docker-pi-hole/actions?query=workflow%3A%22Test+%26+Build%22) [](https://store.docker.com/community/images/pihole/pihole) [](https://store.docker.com/community/images/pihole/pihole)
|
||||
|
||||
## Running Pi-hole Docker
|
||||
|
||||
This container uses 2 popular ports, port 53 and port 80, so **may conflict with existing applications ports**. If you have no other services or docker containers using port 53/80 (if you do, keep reading below for a reverse proxy example), the minimum arguments required to run this container are in the script [docker_run.sh](https://github.com/pi-hole/docker-pi-hole/blob/master/docker_run.sh)
|
||||
|
||||
If you're using a Red Hat based distrubution with an SELinux Enforcing policy add `:z` to line with volumes like so:
|
||||
If you're using a Red Hat based distribution with an SELinux Enforcing policy add `:z` to line with volumes like so:
|
||||
|
||||
```
|
||||
-v "$(pwd)/etc-pihole/:/etc/pihole/:z" \
|
||||
-v "$(pwd)/etc-dnsmasq.d/:/etc/dnsmasq.d/:z" \
|
||||
-v "$(pwd)/etc-pihole:/etc/pihole:z" \
|
||||
-v "$(pwd)/etc-dnsmasq.d:/etc/dnsmasq.d:z" \
|
||||
```
|
||||
|
||||
Volumes are recommended for persisting data across container re-creations for updating images. The IP lookup variables may not work for everyone, please review their values and hard code IP and IPv6 if necessary.
|
||||
|
||||
Port 443 is to provide a sinkhole for ads that use SSL. If only port 80 is used, then blocked HTTPS queries will fail to connect to port 443 and may cause long loading times. Rejecting 443 on your firewall can also serve this same purpose. Ubuntu firewall example: `sudo ufw reject https`
|
||||
You can customize where to store persistent data by setting the `PIHOLE_BASE` environment variable when invoking `docker_run.sh` (e.g. `PIHOLE_BASE=/opt/pihole-storage ./docker_run.sh`). If `PIHOLE_BASE` is not set, files are stored in your current directory when you invoke the script.
|
||||
|
||||
**Automatic Ad List Updates** - since the 3.0+ release, `cron` is baked into the container and will grab the newest versions of your lists and flush your logs. **Set your TZ** environment variable to make sure the midnight log rotation syncs up with your timezone's midnight.
|
||||
|
||||
@@ -96,20 +81,77 @@ There are multiple different ways to run DHCP from within your Docker Pi-hole co
|
||||
|
||||
There are other environment variables if you want to customize various things inside the docker container:
|
||||
|
||||
| Docker Environment Var. | Description |
|
||||
| ----------------------- | ----------- |
|
||||
| `TZ: <Timezone>`<br/> **Recommended** *Default: UTC* | Set your [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to make sure logs rotate at local midnight instead of at UTC midnight.
|
||||
| `WEBPASSWORD: <Admin password>`<br/> **Recommended** *Default: random* | http://pi.hole/admin password. Run `docker logs pihole \| grep random` to find your random pass.
|
||||
| `DNS1: <IP>`<br/> *Optional* *Default: 8.8.8.8* | Primary upstream DNS provider, default is google DNS
|
||||
| `DNS2: <IP>`<br/> *Optional* *Default: 8.8.4.4* | Secondary upstream DNS provider, default is google DNS, `no` if only one DNS should used
|
||||
| `ServerIP: <Host's IP>`<br/> **Recommended** | Set to your server's LAN IP, used by web block modes and lighttpd bind address
|
||||
| `ServerIPv6: <Host's IPv6>`<br/> *Required if using IPv6* | **If you have a v6 network** set to your server's LAN IPv6 to block IPv6 ads fully
|
||||
| `VIRTUAL_HOST: <Custom Hostname>`<br/> *Optional* *Default: $ServerIP* | What your web server 'virtual host' is, accessing admin through this Hostname/IP allows you to make changes to the whitelist / blacklists in addition to the default 'http://pi.hole/admin/' address
|
||||
| `IPv6: <True\|False>`<br/> *Optional* *Default: True* | For unraid compatibility, strips out all the IPv6 configuration from DNS/Web services when false.
|
||||
| `INTERFACE: <NIC>`<br/> *Advanced/Optional* | The default works fine with our basic example docker run commands. If you're trying to use DHCP with `--net host` mode then you may have to customize this or DNSMASQ_LISTENING.
|
||||
| `DNSMASQ_LISTENING: <local\|all\|NIC>`<br/> *Advanced/Optional* | `local` listens on all local subnets, `all` permits listening on internet origin subnets in addition to local.
|
||||
| `WEB_PORT: <PORT>`<br/> *Advanced/Optional* | **This will break the 'webpage blocked' functionality of Pi-hole** however it may help advanced setups like those running synology or `--net=host` docker argument. This guide explains how to restore webpage blocked functionality using a linux router DNAT rule: [Alternative Synology installation method](https://discourse.pi-hole.net/t/alternative-synology-installation-method/5454?u=diginc)
|
||||
| `DNSMASQ_USER: <pihole\|root>`<br/> *Experimental Default: root* | Allows running FTLDNS as non-root.
|
||||
### Recommended Variables
|
||||
|
||||
| Variable | Default | Value | Description |
|
||||
| -------- | ------- | ----- | ---------- |
|
||||
| `TZ` | UTC | `<Timezone>` | Set your [timezone](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones) to make sure logs rotate at local midnight instead of at UTC midnight.
|
||||
| `WEBPASSWORD` | random | `<Admin password>` | http://pi.hole/admin password. Run `docker logs pihole \| grep random` to find your random pass.
|
||||
| `FTLCONF_REPLY_ADDR4` | unset | `<Host's IP>` | Set to your server's LAN IP, used by web block modes and lighttpd bind address.
|
||||
|
||||
### Optional Variables
|
||||
|
||||
| Variable | Default | Value | Description |
|
||||
| -------- | ------- | ----- | ---------- |
|
||||
| `ADMIN_EMAIL` | unset | email address | Set an administrative contact address for the Block Page |
|
||||
| `PIHOLE_DNS_` | `8.8.8.8;8.8.4.4` | IPs delimited by `;` | Upstream DNS server(s) for Pi-hole to forward queries to, seperated by a semicolon <br/> (supports non-standard ports with `#[port number]`) e.g `127.0.0.1#5053;8.8.8.8;8.8.4.4` Note: The existence of this environment variable assumes this as the _sole_ management of upstream DNS. Upstream DNS added via the web interface will be overwritten on container restart/recreation |
|
||||
| `DNSSEC` | `false` | `<"true"\|"false">` | Enable DNSSEC support |
|
||||
| `DNS_BOGUS_PRIV` | `true` |`<"true"\|"false">`| Never forward reverse lookups for private ranges |
|
||||
| `DNS_FQDN_REQUIRED` | `true` | `<"true"\|"false">`| Never forward non-FQDNs |
|
||||
| `REV_SERVER` | `false` | `<"true"\|"false">` | Enable DNS conditional forwarding for device name resolution |
|
||||
| `REV_SERVER_DOMAIN` | unset | Network Domain | If conditional forwarding is enabled, set the domain of the local network router |
|
||||
| `REV_SERVER_TARGET` | unset | Router's IP | If conditional forwarding is enabled, set the IP of the local network router |
|
||||
| `REV_SERVER_CIDR` | unset | Reverse DNS | If conditional forwarding is enabled, set the reverse DNS zone (e.g. `192.168.0.0/24`) |
|
||||
| `DHCP_ACTIVE` | `false` | `<"true"\|"false">` | Enable DHCP server. Static DHCP leases can be configured with a custom `/etc/dnsmasq.d/04-pihole-static-dhcp.conf`
|
||||
| `DHCP_START` | unset | `<Start IP>` | Start of the range of IP addresses to hand out by the DHCP server (mandatory if DHCP server is enabled).
|
||||
| `DHCP_END` | unset | `<End IP>` | End of the range of IP addresses to hand out by the DHCP server (mandatory if DHCP server is enabled).
|
||||
| `DHCP_ROUTER` | unset | `<Router's IP>` | Router (gateway) IP address sent by the DHCP server (mandatory if DHCP server is enabled).
|
||||
| `DHCP_LEASETIME` | 24 | `<hours>` | DHCP lease time in hours.
|
||||
| `PIHOLE_DOMAIN` | `lan` | `<domain>` | Domain name sent by the DHCP server.
|
||||
| `DHCP_IPv6` | `false` | `<"true"\|"false">` | Enable DHCP server IPv6 support (SLAAC + RA).
|
||||
| `DHCP_rapid_commit` | `false` | `<"true"\|"false">` | Enable DHCPv4 rapid commit (fast address assignment).
|
||||
| `VIRTUAL_HOST` | `$ServerIP` | `<Custom Hostname>` | What your web server 'virtual host' is, accessing admin through this Hostname/IP allows you to make changes to the whitelist / blacklists in addition to the default 'http://pi.hole/admin/' address
|
||||
| `IPv6:` | `true` | `<"true"\|"false">` | For unraid compatibility, strips out all the IPv6 configuration from DNS/Web services when false.
|
||||
| `TEMPERATUREUNIT` | `c` | `<c\|k\|f>` | Set preferred temperature unit to `c`: Celsius, `k`: Kelvin, or `f` Fahrenheit units.
|
||||
| `WEBUIBOXEDLAYOUT` | `boxed` | `<boxed\|traditional>` | Use boxed layout (helpful when working on large screens)
|
||||
| `QUERY_LOGGING` | `true` | `<"true"\|"false">` | Enable query logging or not.
|
||||
| `WEBTHEME` | `default-light` | `<"default-dark"\|"default-darker"\|"default-light"\|"default-auto"\|"lcars">`| User interface theme to use.
|
||||
| `WEBPASSWORD_FILE`| unset | `<Docker secret path>` |Set an Admin password using [Docker secrets](https://docs.docker.com/engine/swarm/secrets/). If `WEBPASSWORD` is set, `WEBPASSWORD_FILE` is ignored. If `WEBPASSWORD` is empty, and `WEBPASSWORD_FILE` is set to a valid readable file path, then `WEBPASSWORD` will be set to the contents of `WEBPASSWORD_FILE`.
|
||||
|
||||
### Advanced Variables
|
||||
| Variable | Default | Value | Description |
|
||||
| -------- | ------- | ----- | ---------- |
|
||||
| `INTERFACE` | unset | `<NIC>` | The default works fine with our basic example docker run commands. If you're trying to use DHCP with `--net host` mode then you may have to customize this or DNSMASQ_LISTENING.
|
||||
| `DNSMASQ_LISTENING` | unset | `<local\|all\|single>` | `local` listens on all local subnets, `all` permits listening on internet origin subnets in addition to local, `single` listens only on the interface specified.
|
||||
| `WEB_PORT` | unset | `<PORT>` | **This will break the 'webpage blocked' functionality of Pi-hole** however it may help advanced setups like those running synology or `--net=host` docker argument. This guide explains how to restore webpage blocked functionality using a linux router DNAT rule: [Alternative Synology installation method](https://discourse.pi-hole.net/t/alternative-synology-installation-method/5454?u=diginc)
|
||||
| `SKIPGRAVITYONBOOT` | unset | `<unset\|1>` | Use this option to skip updating the Gravity Database when booting up the container. By default this environment variable is not set so the Gravity Database will be updated when the container starts up. Setting this environment variable to 1 (or anything) will cause the Gravity Database to not be updated when container starts up.
|
||||
| `CORS_HOSTS` | unset | `<FQDNs delimited by ,>` | List of domains/subdomains on which CORS is allowed. Wildcards are not supported. Eg: `CORS_HOSTS: domain.com,home.domain.com,www.domain.com`.
|
||||
| `CUSTOM_CACHE_SIZE` | `10000` | Number | Set the cache size for dnsmasq. Useful for increasing the default cache size or to set it to 0. Note that when `DNSSEC` is "true", then this setting is ignored.
|
||||
| `FTLCONF_[SETTING]` | unset | As per documentation | Customize pihole-FTL.conf with settings described in the [FTLDNS Configuration page](https://docs.pi-hole.net/ftldns/configfile/). For example, to customize REPLY_ADDR6, ensure you have the `FTLCONF_REPLY_ADDR6` environment variable set.
|
||||
|
||||
### Experimental Variables
|
||||
| Variable | Default | Value | Description |
|
||||
| -------- | ------- | ----- | ---------- |
|
||||
| `DNSMASQ_USER` | unset | `<pihole\|root>` | Allows changing the user that FTLDNS runs as. Default: `pihole`
|
||||
| PIHOLE_UID | debian system value | Number | Overrides image's default pihole user id to match a host user id |
|
||||
| PIHOLE_GID | debian system value | Number | Overrides image's default pihole group id to match a host group id |
|
||||
| WEB_UID | debian system value | Number | Overrides image's default www-data user id to match a host user id |
|
||||
| WEB_GID | debian system value | Number | Overrides image's default www-data group id to match a host group id |
|
||||
| WEBLOGS_STDOUT | 0 | 0|1 | 0 logs to defined files, 1 redirect access and error logs to stdout |
|
||||
|
||||
## Deprecated environment variables:
|
||||
While these may still work, they are likely to be removed in a future version. Where applicible, alternative variable names are indicated. Please review the table above for usage of the alternative variables
|
||||
|
||||
| Docker Environment Var. | Description | Replaced By |
|
||||
| ----------------------- | ----------- | ----------- |
|
||||
| `CONDITIONAL_FORWARDING` | Enable DNS conditional forwarding for device name resolution | `REV_SERVER`|
|
||||
| `CONDITIONAL_FORWARDING_IP` | If conditional forwarding is enabled, set the IP of the local network router | `REV_SERVER_TARGET` |
|
||||
| `CONDITIONAL_FORWARDING_DOMAIN` | If conditional forwarding is enabled, set the domain of the local network router | `REV_SERVER_DOMAIN` |
|
||||
| `CONDITIONAL_FORWARDING_REVERSE` | If conditional forwarding is enabled, set the reverse DNS of the local network router (e.g. `0.168.192.in-addr.arpa`) | `REV_SERVER_CIDR` |
|
||||
| `DNS1` | Primary upstream DNS provider, default is google DNS | `PIHOLE_DNS_` |
|
||||
| `DNS2` | Secondary upstream DNS provider, default is google DNS, `no` if only one DNS should used | `PIHOLE_DNS_` |
|
||||
| `ServerIP` | Set to your server's LAN IP, used by web block modes and lighttpd bind address | `FTLCONF_REPLY_ADDR4` |
|
||||
| `ServerIPv6` | **If you have a v6 network** set to your server's LAN IPv6 to block IPv6 ads fully | `FTLCONF_REPLY_ADDR6` |
|
||||
|
||||
To use these env vars in docker run format style them like: `-e DNS1=1.1.1.1`
|
||||
|
||||
@@ -117,13 +159,13 @@ Here is a rundown of other arguments for your docker-compose / docker run.
|
||||
|
||||
| Docker Arguments | Description |
|
||||
| ---------------- | ----------- |
|
||||
| `-p <port>:<port>` **Recommended** | Ports to expose (54, 80, 67, 443), the bare minimum ports required for Pi-holes HTTP and DNS services
|
||||
| `-p <port>:<port>` **Recommended** | Ports to expose (53, 80, 67), the bare minimum ports required for Pi-holes HTTP and DNS services
|
||||
| `--restart=unless-stopped`<br/> **Recommended** | Automatically (re)start your Pi-hole on boot or in the event of a crash
|
||||
| `-v $(pwd)/etc-pihole:/etc/pihole`<br/> **Recommended** | Volumes for your Pi-hole configs help persist changes across docker image updates
|
||||
| `-v $(pwd)/etc-dnsmasq.d:/etc/dnsmasq.d`<br/> **Recommended** | Volumes for your dnsmasq configs help persist changes across docker image updates
|
||||
| `--net=host`<br/> *Optional* | Alternative to `-p <port>:<port>` arguments (Cannot be used at same time as -p) if you don't run any other web application. DHCP runs best with --net=host, otherwise your router must support dhcp-relay settings.
|
||||
| `--cap-add=NET_ADMIN`<br/> *Recommended* | Commonly added capability for DHCP, see [Note on Capabilities](#note-on-capabilities) below for other capabilities.
|
||||
| `--dns=127.0.0.1`<br/> *Recommended* | Sets your container's resolve settings to localhost so it can resolve DHCP hostnames from Pi-hole's DNSMasq, also fixes common resolution errors on container restart.
|
||||
| `--dns=127.0.0.1`<br/> *Optional* | Sets your container's resolve settings to localhost so it can resolve DHCP hostnames from Pi-hole's DNSMasq, may fix resolution errors on container restart.
|
||||
| `--dns=1.1.1.1`<br/> *Optional* | Sets a backup server of your choosing in case DNSMasq has problems starting
|
||||
| `--env-file .env` <br/> *Optional* | File to store environment variables for docker replacing `-e key=value` settings. Here for convenience
|
||||
|
||||
@@ -133,24 +175,51 @@ Here is a rundown of other arguments for your docker-compose / docker run.
|
||||
* [How do I set or reset the Web interface Password?](https://discourse.pi-hole.net/t/how-do-i-set-or-reset-the-web-interface-password/1328)
|
||||
* `docker exec -it pihole_container_name pihole -a -p` - then enter your password into the prompt
|
||||
* Port conflicts? Stop your server's existing DNS / Web services.
|
||||
* Ubuntu users especially may need to shut off dns on your docker server so it can run in the container on port 53
|
||||
* 17.04 and later should disable dnsmasq.
|
||||
* 17.10 should disable systemd-resolved service. See this page: [How to disable systemd-resolved in Ubuntu](https://askubuntu.com/questions/907246/how-to-disable-systemd-resolved-in-ubuntu)
|
||||
* Don't forget to stop your services from auto-starting again after you reboot
|
||||
* Port 80 is highly recommended because if you have another site/service using port 80 by default then the ads may not transform into blank ads correctly. To make sure docker-pi-hole plays nicely with an existing webserver you run you'll probably need a reverse proxy webserver config if you don't have one already. Pi-hole must be the default web app on the proxy e.g. if you go to your host by IP instead of domain then Pi-hole is served out instead of any other sites hosted by the proxy. This is the '[default_server](http://nginx.org/en/docs/http/ngx_http_core_module.html#listen)' in nginx or ['_default_' virtual host](https://httpd.apache.org/docs/2.4/vhosts/examples.html#default) in Apache and is taken advantage of so any undefined ad domain can be directed to your webserver and get a 'blocked' response instead of ads.
|
||||
* You can still map other ports to Pi-hole port 80 using docker's port forwarding like this `-p 8080:80`, but again the ads won't render properly. Changing the inner port 80 shouldn't be required unless you run docker host networking mode.
|
||||
* [Here is an example of running with jwilder/proxy](https://github.com/pi-hole/docker-pi-hole/blob/master/docker-compose-jwilder-proxy.yml) (an nginx auto-configuring docker reverse proxy for docker) on my port 80 with Pi-hole on another port. Pi-hole needs to be `DEFAULT_HOST` env in jwilder/proxy and you need to set the matching `VIRTUAL_HOST` for the Pi-hole's container. Please read jwilder/proxy readme for more info if you have trouble.
|
||||
* Ubuntu users see below for more detailed information
|
||||
* You can map other ports to Pi-hole port 80 using docker's port forwarding like this `-p 8080:80` if you are using the default blocking mode. If you are using the legacy IP blocking mode, you should not remap this port.
|
||||
* [Here is an example of running with nginxproxy/nginx-proxy](https://github.com/pi-hole/docker-pi-hole/blob/master/docker-compose-nginx-proxy.yml) (an nginx auto-configuring docker reverse proxy for docker) on my port 80 with Pi-hole on another port. Pi-hole needs to be `DEFAULT_HOST` env in nginxproxy/nginx-proxy and you need to set the matching `VIRTUAL_HOST` for the Pi-hole's container. Please read nginxproxy/nginx-proxy readme for more info if you have trouble.
|
||||
* Docker's default network mode `bridge` isolates the container from the host's network. This is a more secure setting, but requires setting the Pi-hole DNS option for *Interface listening behavior* to "Listen on all interfaces, permit all origins".
|
||||
|
||||
### Installing on Ubuntu
|
||||
Modern releases of Ubuntu (17.10+) include [`systemd-resolved`](http://manpages.ubuntu.com/manpages/bionic/man8/systemd-resolved.service.8.html) which is configured by default to implement a caching DNS stub resolver. This will prevent pi-hole from listening on port 53.
|
||||
The stub resolver should be disabled with: `sudo sed -r -i.orig 's/#?DNSStubListener=yes/DNSStubListener=no/g' /etc/systemd/resolved.conf`
|
||||
|
||||
This will not change the nameserver settings, which point to the stub resolver thus preventing DNS resolution. Change the `/etc/resolv.conf` symlink to point to `/run/systemd/resolve/resolv.conf`, which is automatically updated to follow the system's [`netplan`](https://netplan.io/):
|
||||
`sudo sh -c 'rm /etc/resolv.conf && ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf'`
|
||||
After making these changes, you should restart systemd-resolved using `systemctl restart systemd-resolved`
|
||||
|
||||
Once pi-hole is installed, you'll want to configure your clients to use it ([see here](https://discourse.pi-hole.net/t/how-do-i-configure-my-devices-to-use-pi-hole-as-their-dns-server/245)). If you used the symlink above, your docker host will either use whatever is served by DHCP, or whatever static setting you've configured. If you want to explicitly set your docker host's nameservers you can edit the netplan(s) found at `/etc/netplan`, then run `sudo netplan apply`.
|
||||
Example netplan:
|
||||
```yaml
|
||||
network:
|
||||
ethernets:
|
||||
ens160:
|
||||
dhcp4: true
|
||||
dhcp4-overrides:
|
||||
use-dns: false
|
||||
nameservers:
|
||||
addresses: [127.0.0.1]
|
||||
version: 2
|
||||
```
|
||||
|
||||
Note that it is also possible to disable `systemd-resolved` entirely. However, this can cause problems with name resolution in vpns ([see bug report](https://bugs.launchpad.net/network-manager/+bug/1624317)). It also disables the functionality of netplan since systemd-resolved is used as the default renderer ([see `man netplan`](http://manpages.ubuntu.com/manpages/bionic/man5/netplan.5.html#description)). If you choose to disable the service, you will need to manually set the nameservers, for example by creating a new `/etc/resolv.conf`.
|
||||
|
||||
Users of older Ubuntu releases (circa 17.04) will need to disable dnsmasq.
|
||||
|
||||
## Docker tags and versioning
|
||||
|
||||
The primary docker tags / versions are explained in the following table. [Click here to see the full list of tags](https://store.docker.com/community/images/pihole/pihole/tags) ([arm tags are here](https://store.docker.com/community/images/pihole/pihole/tags)), I also try to tag with the specific version of Pi-hole Core for version archival purposes, the web version that comes with the core releases should be in the [GitHub Release notes](https://github.com/pi-hole/docker-pi-hole/releases).
|
||||
The primary docker tags / versions are explained in the following table. [Click here to see the full list of tags](https://store.docker.com/community/images/pihole/pihole/tags), I also try to tag with the specific version of Pi-hole Core for version archival purposes, the web version that comes with the core releases should be in the [GitHub Release notes](https://github.com/pi-hole/docker-pi-hole/releases).
|
||||
|
||||
| tag | architecture | description | Dockerfile |
|
||||
| --- | ------------ | ----------- | ---------- |
|
||||
| `latest` | auto detect | x86, arm, or arm64 container, docker auto detects your architecture. | [Dockerfile](https://github.com/pi-hole/docker-pi-hole/blob/master/Dockerfile_amd64) |
|
||||
| `v4.0.0-1` | auto detect | Versioned tags, if you want to pin against a specific version, use one of thesse | |
|
||||
| `v4.0.0-1_<arch>` | based on tag | Specific architectures tags | |
|
||||
| `dev` | auto detect | like latest tag, but for the development branch (pushed occasionally) | |
|
||||
| tag | architecture | description | Dockerfile |
|
||||
| --- | ------------ | ----------- | ---------- |
|
||||
| `latest` | auto detect | x86, arm, or arm64 container, docker auto detects your architecture. | [Dockerfile](https://github.com/pi-hole/docker-pi-hole/blob/master/Dockerfile) |
|
||||
| `v5.0` | auto detect | Versioned tags, if you want to pin against a specific Pi-hole version, use one of these | |
|
||||
| `v5.0-buster` | auto detect | Versioned tags, if you want to pin against a specific Pi-hole and Debian version, use one of these | |
|
||||
| `v5.0-<arch>-buster ` | based on tag | Specific architectures and Debian version tags | |
|
||||
| `dev` | auto detect | like latest tag, but for the development branch (pushed occasionally) | |
|
||||
| `beta-*` | auto detect | Early beta releases of upcoming versions - here be dragons | |
|
||||
| `nightly` | auto detect | Like `dev` but pushed every night and pulls from the latest `development` branches of the core Pi-hole components (Pi-hole, AdminLTE, FTL) | |
|
||||
|
||||
### `pihole/pihole:latest` [](https://microbadger.com/images/pihole/pihole "Get your own image badge on microbadger.com") [](https://microbadger.com/images/pihole/pihole "Get your own version badge on microbadger.com") [](https://microbadger.com/images/pihole/pihole "Get your own version badge on microbadger.com")
|
||||
|
||||
@@ -160,7 +229,7 @@ https://hub.docker.com/r/pihole/pihole/tags/
|
||||
|
||||
## Upgrading, Persistence, and Customizations
|
||||
|
||||
The standard Pi-hole customization abilities apply to this docker, but with docker twists such as using docker volume mounts to map host stored file configurations over the container defaults. Volumes are also important to persist the configuration in case you have removed the Pi-hole container which is a typical docker upgrade pattern.
|
||||
The standard Pi-hole customization abilities apply to this docker, but with docker twists such as using docker volume mounts to map host stored file configurations over the container defaults. However, mounting these configuration files as read-only should be avoided. Volumes are also important to persist the configuration in case you have removed the Pi-hole container which is a typical docker upgrade pattern.
|
||||
|
||||
### Upgrading / Reconfiguring
|
||||
|
||||
@@ -171,11 +240,11 @@ Do not attempt to upgrade (`pihole -up`) or reconfigure (`pihole -r`). New imag
|
||||
* We will try to put common break/fixes at the top of this readme too
|
||||
1. Download the latest version of the image: `docker pull pihole/pihole`
|
||||
2. Throw away your container: `docker rm -f pihole`
|
||||
* **Warning** When removing your pihole container you may be stuck without DNS until step 3; **docker pull** before **docker rm -f** to avoid DNS inturruption **OR** always have a fallback DNS server configured in DHCP to avoid this problem altogether.
|
||||
* **Warning** When removing your pihole container you may be stuck without DNS until step 3; **docker pull** before **docker rm -f** to avoid DNS interruption **OR** always have a fallback DNS server configured in DHCP to avoid this problem altogether.
|
||||
* If you care about your data (logs/customizations), make sure you have it volume-mapped or it will be deleted in this step.
|
||||
3. Start your container with the newer base image: `docker run <args> pihole/pihole` (`<args>` being your preferred run volumes and env vars)
|
||||
|
||||
Why is this style of upgrading good? A couple reasons: Everyone is starting from the same base image which has been tested to known it works. No worrying about upgrading from A to B, B to C, or A to C is required when rolling out updates, it reducing complexity, and simply allows a 'fresh start' every time while preserving customizations with volumes. Basically I'm encouraging [phoenix server](https://www.google.com/?q=phoenix+servers) principles for your containers.
|
||||
Why is this style of upgrading good? A couple reasons: Everyone is starting from the same base image which has been tested to known it works. No worrying about upgrading from A to B, B to C, or A to C is required when rolling out updates, it reduces complexity, and simply allows a 'fresh start' every time while preserving customizations with volumes. Basically I'm encouraging [phoenix server](https://www.google.com/?q=phoenix+servers) principles for your containers.
|
||||
|
||||
To reconfigure Pi-hole you'll either need to use an existing container environment variables or if there is no a variable for what you need, use the web UI or CLI commands.
|
||||
|
||||
@@ -207,6 +276,8 @@ DNSMasq / [FTLDNS](https://docs.pi-hole.net/ftldns/in-depth/#linux-capabilities)
|
||||
- `CAP_NET_BIND_SERVICE`: Allows FTLDNS binding to TCP/UDP sockets below 1024 (specifically DNS service on port 53)
|
||||
- `CAP_NET_RAW`: use raw and packet sockets (needed for handling DHCPv6 requests, and verifying that an IP is not in use before leasing it)
|
||||
- `CAP_NET_ADMIN`: modify routing tables and other network-related operations (in particular inserting an entry in the neighbor table to answer DHCP requests using unicast packets)
|
||||
- `CAP_SYS_NICE`: FTL sets itself as an important process to get some more processing time if the latter is running low
|
||||
- `CAP_CHOWN`: we need to be able to change ownership of log files and databases in case FTL is started as a different user than `pihole`
|
||||
|
||||
This image automatically grants those capabilities, if available, to the FTLDNS process, even when run as non-root.\
|
||||
By default, docker does not include the `NET_ADMIN` capability for non-privileged containers, and it is recommended to explicitly add it to the container using `--cap-add=NET_ADMIN`.\
|
||||
|
||||
17
TESTING.md
17
TESTING.md
@@ -1,21 +1,24 @@
|
||||
# Prerequisites
|
||||
|
||||
Make sure you have docker, python, and pip. I won't cover how to install those here, please search the internet for that info if you need it.
|
||||
Make sure you have bash & docker installed.
|
||||
Python and some test hacks are crammed into the `Dockerfile_build` file for now.
|
||||
Revisions in the future may re-enable running python on your host (not just in docker).
|
||||
|
||||
# Running tests locally
|
||||
|
||||
Travis-ci auto runs tests during pull requests (PR) but it only has 2 cores and if you have more/faster cpus your PC's local tests will be faster and you'll have quicker feedback loops than continually pushing to have your PR run travis-ci
|
||||
`ARCH=amd64 ./gh-actions-test.sh`
|
||||
|
||||
After you have the prereqs, to get the required pip packages run: `pip install -r requirements.txt`
|
||||
Should result in:
|
||||
|
||||
To run the Dockerfile templating, image build, and tests all in one command just run: `tox`
|
||||
- An image named `pihole:amd64` being built
|
||||
- Tests being ran to confirm the image doesn't have any regressions
|
||||
|
||||
# Local image names
|
||||
|
||||
Docker images built by `tox` or `python Dockerfile.py` are named the same but stripped of the `pihole/` docker repository namespace.
|
||||
Docker images built by `Dockerfile.py` are named the same but stripped of the `pihole/` docker repository namespace.
|
||||
|
||||
e.g. `pi-hole:debian_amd64` or `pi-hole-multiarch:debian_aarch64`
|
||||
e.g. `pi-hole:debian_amd64` or `pi-hole-multiarch:debian_arm64`
|
||||
|
||||
You can run the multiarch images on an amd64 development system if you [enable binfmt-support as described in the multiarch image docs](https://hub.docker.com/r/multiarch/multiarch/debian-debootstrap/)
|
||||
You can run the multiarch images on an amd64 development system if you [enable binfmt-support as described in the multiarch image docs](https://hub.docker.com/r/multiarch/debian-debootstrap/)
|
||||
|
||||
`docker run --rm --privileged multiarch/qemu-user-static:register --reset`
|
||||
|
||||
@@ -2,92 +2,50 @@
|
||||
# Some of the bash_functions use variables these core pi-hole/web scripts
|
||||
. /opt/pihole/webpage.sh
|
||||
|
||||
docker_checks() {
|
||||
warn_msg='WARNING Misconfigured DNS in /etc/resolv.conf'
|
||||
ns_count="$(grep -c nameserver /etc/resolv.conf)"
|
||||
ns_primary="$(grep nameserver /etc/resolv.conf | head -1)"
|
||||
ns_primary="${ns_primary/nameserver /}"
|
||||
warned=false
|
||||
|
||||
if [ "$ns_count" -lt 2 ] ; then
|
||||
echo "$warn_msg: Two DNS servers are recommended, 127.0.0.1 and any backup server"
|
||||
warned=true
|
||||
fi
|
||||
|
||||
if [ "$ns_primary" != "127.0.0.1" ] ; then
|
||||
echo "$warn_msg: Primary DNS should be 127.0.0.1 (found ${ns_primary})"
|
||||
warned=true
|
||||
fi
|
||||
|
||||
if ! $warned ; then
|
||||
echo "OK: Checks passed for /etc/resolv.conf DNS servers"
|
||||
fi
|
||||
|
||||
echo
|
||||
cat /etc/resolv.conf
|
||||
}
|
||||
|
||||
fix_capabilities() {
|
||||
setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN+ei $(which pihole-FTL) || ret=$?
|
||||
setcap CAP_NET_BIND_SERVICE,CAP_NET_RAW,CAP_NET_ADMIN,CAP_SYS_NICE,CAP_CHOWN+ei $(which pihole-FTL) || ret=$?
|
||||
|
||||
if [[ $ret -ne 0 && "${DNSMASQ_USER:-root}" != "root" ]]; then
|
||||
echo "ERROR: Failed to set capabilities for pihole-FTL. Cannot run as non-root."
|
||||
if [[ $ret -ne 0 && "${DNSMASQ_USER:-pihole}" != "root" ]]; then
|
||||
echo "ERROR: Unable to set capabilities for pihole-FTL. Cannot run as non-root."
|
||||
echo " If you are seeing this error, please set the environment variable 'DNSMASQ_USER' to the value 'root'"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
prepare_configs() {
|
||||
# Done in /start.sh, don't do twice
|
||||
PH_TEST=true . $PIHOLE_INSTALL
|
||||
distro_check
|
||||
PH_TEST=true . "${PIHOLE_INSTALL}"
|
||||
# Set Debian webserver variables for installConfigs
|
||||
LIGHTTPD_USER="www-data"
|
||||
LIGHTTPD_GROUP="www-data"
|
||||
LIGHTTPD_CFG="lighttpd.conf.debian"
|
||||
installConfigs
|
||||
touch "$setupVars"
|
||||
set +e
|
||||
mkdir -p /var/run/pihole /var/log/pihole
|
||||
# Re-apply perms from basic-install over any volume mounts that may be present (or not)
|
||||
# Also similar to preflights for FTL https://github.com/pi-hole/pi-hole/blob/master/advanced/Templates/pihole-FTL.service
|
||||
|
||||
chown pihole:root /etc/lighttpd
|
||||
chown pihole:pihole "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf" "/var/log/pihole" "${regexFile}"
|
||||
chmod 644 "${PI_HOLE_CONFIG_DIR}/pihole-FTL.conf"
|
||||
# not sure why pihole:pihole user/group write perms are not enough for web to write...dirty fix:
|
||||
chmod 777 "${regexFile}"
|
||||
touch /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log
|
||||
chown pihole:pihole /var/run/pihole /var/log/pihole
|
||||
test -f /var/run/pihole/FTL.sock && rm /var/run/pihole/FTL.sock
|
||||
chown pihole:pihole /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /etc/pihole /etc/pihole/dhcp.leases /var/log/pihole.log
|
||||
chmod 0644 /var/log/pihole-FTL.log /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole.log
|
||||
|
||||
# In case of `pihole` UID being changed, re-chown the pihole scripts and pihole commmand
|
||||
chown -R pihole:root "${PI_HOLE_INSTALL_DIR}"
|
||||
chown pihole:root "${PI_HOLE_BIN_DIR}/pihole"
|
||||
|
||||
set -e
|
||||
# Update version numbers
|
||||
pihole updatechecker
|
||||
# Re-write all of the setupVars to ensure required ones are present (like QUERY_LOGGING)
|
||||
|
||||
|
||||
# If the setup variable file exists,
|
||||
if [[ -e "${setupVars}" ]]; then
|
||||
# update the variables in the file
|
||||
local USERWEBPASSWORD="${WEBPASSWORD}"
|
||||
. "${setupVars}"
|
||||
# Stash and pop the user password to avoid setting the password to the hashed setupVar variable
|
||||
WEBPASSWORD="${USERWEBPASSWORD}"
|
||||
# Clean up old before re-writing the required setupVars
|
||||
sed -i.update.bak '/PIHOLE_INTERFACE/d;/IPV4_ADDRESS/d;/IPV6_ADDRESS/d;/QUERY_LOGGING/d;/INSTALL_WEB_SERVER/d;/INSTALL_WEB_INTERFACE/d;/LIGHTTPD_ENABLED/d;' "${setupVars}"
|
||||
cp -f "${setupVars}" "${setupVars}.update.bak"
|
||||
fi
|
||||
# echo the information to the user
|
||||
{
|
||||
echo "PIHOLE_INTERFACE=${PIHOLE_INTERFACE}"
|
||||
echo "IPV4_ADDRESS=${IPV4_ADDRESS}"
|
||||
echo "IPV6_ADDRESS=${IPV6_ADDRESS}"
|
||||
echo "QUERY_LOGGING=${QUERY_LOGGING}"
|
||||
echo "INSTALL_WEB_SERVER=${INSTALL_WEB_SERVER}"
|
||||
echo "INSTALL_WEB_INTERFACE=${INSTALL_WEB_INTERFACE}"
|
||||
echo "LIGHTTPD_ENABLED=${LIGHTTPD_ENABLED}"
|
||||
}>> "${setupVars}"
|
||||
}
|
||||
|
||||
validate_env() {
|
||||
# Optional ServerIP is a valid IP
|
||||
# nc won't throw any text based errors when it times out connecting to a valid IP, otherwise it complains about the DNS name being garbage
|
||||
# if nc doesn't behave as we expect on a valid IP the routing table should be able to look it up and return a 0 retcode
|
||||
if [[ "$(nc -4 -w1 -z "$ServerIP" 53 2>&1)" != "" ]] || ! ip route get "$ServerIP" > /dev/null ; then
|
||||
if [[ "$(nc -4 -w1 -z "$ServerIP" 53 2>&1)" != "" ]] && ! ip route get "$ServerIP" > /dev/null ; then
|
||||
echo "ERROR: ServerIP Environment variable ($ServerIP) doesn't appear to be a valid IPv4 address"
|
||||
exit 1
|
||||
fi
|
||||
@@ -99,7 +57,7 @@ validate_env() {
|
||||
unset ServerIPv6
|
||||
exit 1
|
||||
fi
|
||||
if [[ "$(nc -6 -w1 -z "$ServerIPv6" 53 2>&1)" != "" ]] || ! ip route get "$ServerIPv6" > /dev/null ; then
|
||||
if [[ "$(nc -6 -w1 -z "$ServerIPv6" 53 2>&1)" != "" ]] && ! ip route get "$ServerIPv6" > /dev/null ; then
|
||||
echo "ERROR: ServerIPv6 Environment variable ($ServerIPv6) doesn't appear to be a valid IPv6 address"
|
||||
echo " TIP: If your server is not IPv6 enabled just remove '-e ServerIPv6' from your docker container"
|
||||
exit 1
|
||||
@@ -107,43 +65,6 @@ validate_env() {
|
||||
fi;
|
||||
}
|
||||
|
||||
setup_dnsmasq_dns() {
|
||||
. /opt/pihole/webpage.sh
|
||||
local DNS1="${1:-8.8.8.8}"
|
||||
local DNS2="${2:-8.8.4.4}"
|
||||
local dnsType='default'
|
||||
if [ "$DNS1" != '8.8.8.8' ] || [ "$DNS2" != '8.8.4.4' ] ; then
|
||||
dnsType='custom'
|
||||
fi;
|
||||
|
||||
# TODO With the addition of this to /start.sh this needs a refactor
|
||||
if [ ! -f /.piholeFirstBoot ] ; then
|
||||
local setupDNS1="$(grep 'PIHOLE_DNS_1' ${setupVars})"
|
||||
local setupDNS2="$(grep 'PIHOLE_DNS_2' ${setupVars})"
|
||||
setupDNS1="${setupDNS1/PIHOLE_DNS_1=/}"
|
||||
setupDNS2="${setupDNS2/PIHOLE_DNS_2=/}"
|
||||
if [[ -n "$DNS1" && -n "$setupDNS1" ]] || \
|
||||
[[ -n "$DNS2" && -n "$setupDNS2" ]] ; then
|
||||
echo "Docker DNS variables not used"
|
||||
fi
|
||||
echo "Existing DNS servers used (${setupDNS1:-unset} & ${setupDNS2:-unset})"
|
||||
return
|
||||
fi
|
||||
|
||||
echo "Using $dnsType DNS servers: $DNS1 & $DNS2"
|
||||
if [[ -n "$DNS1" && -z "$setupDNS1" ]] ; then
|
||||
change_setting "PIHOLE_DNS_1" "${DNS1}"
|
||||
fi
|
||||
if [[ -n "$DNS2" && -z "$setupDNS2" ]] ; then
|
||||
if [[ "$DNS2" == "no" ]] ; then
|
||||
delete_setting "PIHOLE_DNS_2"
|
||||
unset PIHOLE_DNS_2
|
||||
else
|
||||
change_setting "PIHOLE_DNS_2" "${DNS2}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_dnsmasq_interface() {
|
||||
local interface="${1:-eth0}"
|
||||
local interfaceType='default'
|
||||
@@ -170,16 +91,14 @@ setup_dnsmasq_config_if_missing() {
|
||||
}
|
||||
|
||||
setup_dnsmasq() {
|
||||
local dns1="$1"
|
||||
local dns2="$2"
|
||||
local interface="$3"
|
||||
local dnsmasq_listening_behaviour="$4"
|
||||
# Coordinates
|
||||
local interface="$1"
|
||||
local dnsmasq_listening_behaviour="$2"
|
||||
# Coordinates
|
||||
setup_dnsmasq_config_if_missing
|
||||
setup_dnsmasq_dns "$dns1" "$dns2"
|
||||
setup_dnsmasq_interface "$interface"
|
||||
setup_dnsmasq_listening_behaviour "$dnsmasq_listening_behaviour"
|
||||
setup_dnsmasq_user "${DNSMASQ_USER}"
|
||||
setup_cache_size "${CUSTOM_CACHE_SIZE}"
|
||||
ProcessDNSSettings
|
||||
}
|
||||
|
||||
@@ -234,10 +153,36 @@ setup_dnsmasq_hostnames() {
|
||||
fi
|
||||
}
|
||||
|
||||
setup_cache_size() {
|
||||
local warning="WARNING: CUSTOM_CACHE_SIZE not used"
|
||||
local dnsmasq_pihole_01_location="/etc/dnsmasq.d/01-pihole.conf"
|
||||
# Quietly exit early for empty or default
|
||||
if [[ -z "${1}" || "${1}" == '10000' ]] ; then return ; fi
|
||||
|
||||
if [[ "${DNSSEC}" == "true" ]] ; then
|
||||
echo "$warning - Cannot change cache size if DNSSEC is enabled"
|
||||
return
|
||||
fi
|
||||
|
||||
if ! echo $1 | grep -q '^[0-9]*$' ; then
|
||||
echo "$warning - $1 is not an integer"
|
||||
return
|
||||
fi
|
||||
|
||||
local -i custom_cache_size="$1"
|
||||
if (( $custom_cache_size < 0 )); then
|
||||
echo "$warning - $custom_cache_size is not a positive integer or zero"
|
||||
return
|
||||
fi
|
||||
echo "Custom CUSTOM_CACHE_SIZE set to $custom_cache_size"
|
||||
|
||||
sed -i "s/^cache-size=\s*[0-9]*/cache-size=$custom_cache_size/" ${dnsmasq_pihole_01_location}
|
||||
}
|
||||
|
||||
setup_lighttpd_bind() {
|
||||
local serverip="$1"
|
||||
# if using '--net=host' only bind lighttpd on $ServerIP and localhost
|
||||
if grep -q "docker" /proc/net/dev ; then #docker (docker0 by default) should only be present on the host system
|
||||
if grep -q "docker" /proc/net/dev && [[ $serverip != 0.0.0.0 ]]; then #docker (docker0 by default) should only be present on the host system
|
||||
if ! grep -q "server.bind" /etc/lighttpd/lighttpd.conf ; then # if the declaration is already there, don't add it again
|
||||
sed -i -E "s/server\.port\s+\=\s+([0-9]+)/server.bind\t\t = \"${serverip}\"\nserver.port\t\t = \1\n"\$SERVER"\[\"socket\"\] == \"127\.0\.0\.1:\1\" \{\}/" /etc/lighttpd/lighttpd.conf
|
||||
fi
|
||||
@@ -248,20 +193,20 @@ setup_php_env() {
|
||||
if [ -z "$VIRTUAL_HOST" ] ; then
|
||||
VIRTUAL_HOST="$ServerIP"
|
||||
fi;
|
||||
local vhost_line="\t\t\t\"VIRTUAL_HOST\" => \"${VIRTUAL_HOST}\","
|
||||
local serverip_line="\t\t\t\"ServerIP\" => \"${ServerIP}\","
|
||||
local php_error_line="\t\t\t\"PHP_ERROR_LOG\" => \"${PHP_ERROR_LOG}\","
|
||||
|
||||
# idempotent line additions
|
||||
grep -qP "$vhost_line" "$PHP_ENV_CONFIG" || \
|
||||
sed -i "/bin-environment/ a\\${vhost_line}" "$PHP_ENV_CONFIG"
|
||||
grep -qP "$serverip_line" "$PHP_ENV_CONFIG" || \
|
||||
sed -i "/bin-environment/ a\\${serverip_line}" "$PHP_ENV_CONFIG"
|
||||
grep -qP "$php_error_line" "$PHP_ENV_CONFIG" || \
|
||||
sed -i "/bin-environment/ a\\${php_error_line}" "$PHP_ENV_CONFIG"
|
||||
for config_var in "VIRTUAL_HOST" "CORS_HOSTS" "ServerIP" "PHP_ERROR_LOG" "PIHOLE_DOCKER_TAG" "TZ"; do
|
||||
local beginning_of_line="\t\t\t\"${config_var}\" => "
|
||||
if grep -qP "$beginning_of_line" "$PHP_ENV_CONFIG" ; then
|
||||
# replace line if already present
|
||||
sed -i "/${beginning_of_line}/c\\${beginning_of_line}\"${!config_var}\"," "$PHP_ENV_CONFIG"
|
||||
else
|
||||
# add line otherwise
|
||||
sed -i "/bin-environment/ a\\${beginning_of_line}\"${!config_var}\"," "$PHP_ENV_CONFIG"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "Added ENV to php:"
|
||||
grep -E '(VIRTUAL_HOST|ServerIP|PHP_ERROR_LOG)' "$PHP_ENV_CONFIG"
|
||||
grep -E '(VIRTUAL_HOST|CORS_HOSTS|ServerIP|PHP_ERROR_LOG|PIHOLE_DOCKER_TAG|TZ)' "$PHP_ENV_CONFIG"
|
||||
}
|
||||
|
||||
setup_web_port() {
|
||||
@@ -269,7 +214,7 @@ setup_web_port() {
|
||||
# Quietly exit early for empty or default
|
||||
if [[ -z "${1}" || "${1}" == '80' ]] ; then return ; fi
|
||||
|
||||
if ! echo $1 | grep -q '^[0-9][0-9]*$' ; then
|
||||
if ! echo $1 | grep -q '^[0-9][0-9]*$' ; then
|
||||
echo "$warning - $1 is not an integer"
|
||||
return
|
||||
fi
|
||||
@@ -284,12 +229,17 @@ setup_web_port() {
|
||||
|
||||
# Update lighttpd's port
|
||||
sed -i '/server.port\s*=\s*80\s*$/ s/80/'$WEB_PORT'/g' /etc/lighttpd/lighttpd.conf
|
||||
# Update any default port 80 references in the HTML
|
||||
grep -Prl '://127\.0\.0\.1/' /var/www/html/ | xargs -r sed -i "s|/127\.0\.0\.1/|/127.0.0.1:${WEB_PORT}/|g"
|
||||
grep -Prl '://pi\.hole/' /var/www/html/ | xargs -r sed -i "s|/pi\.hole/|/pi\.hole:${WEB_PORT}/|g"
|
||||
|
||||
}
|
||||
|
||||
load_web_password_secret() {
|
||||
# If WEBPASSWORD is not set at all, attempt to read password from WEBPASSWORD_FILE,
|
||||
# allowing secrets to be passed via docker secrets
|
||||
if [ -z "${WEBPASSWORD+x}" ] && [ -n "${WEBPASSWORD_FILE}" ] && [ -r "${WEBPASSWORD_FILE}" ]; then
|
||||
WEBPASSWORD=$(<"${WEBPASSWORD_FILE}")
|
||||
fi;
|
||||
}
|
||||
|
||||
generate_password() {
|
||||
if [ -z "${WEBPASSWORD+x}" ] ; then
|
||||
# Not set at all, give the user a random pass
|
||||
@@ -302,17 +252,14 @@ setup_web_password() {
|
||||
setup_var_exists "WEBPASSWORD" && return
|
||||
|
||||
PASS="$1"
|
||||
# Turn bash debug on while setting up password (to print it)
|
||||
# Explicitly turn off bash printing when working with secrets
|
||||
{ set +x; } 2>/dev/null
|
||||
|
||||
if [[ "$PASS" == "" ]] ; then
|
||||
echo "" | pihole -a -p
|
||||
else
|
||||
echo "Setting password: ${PASS}"
|
||||
set -x
|
||||
pihole -a -p "$PASS" "$PASS"
|
||||
fi
|
||||
# Turn bash debug back off after print password setup
|
||||
# (subshell to null hides printing output)
|
||||
{ set +x; } 2>/dev/null
|
||||
|
||||
# To avoid printing this if conditional in bash debug, turn off debug above..
|
||||
# then re-enable debug if necessary (more code but cleaner printed output)
|
||||
@@ -323,7 +270,7 @@ setup_web_password() {
|
||||
|
||||
setup_ipv4_ipv6() {
|
||||
local ip_versions="IPv4 and IPv6"
|
||||
if [ "$IPv6" != "True" ] ; then
|
||||
if [ "${IPv6,,}" != "true" ] ; then
|
||||
ip_versions="IPv4"
|
||||
sed -i '/use-ipv6.pl/ d' /etc/lighttpd/lighttpd.conf
|
||||
fi;
|
||||
@@ -332,17 +279,14 @@ setup_ipv4_ipv6() {
|
||||
|
||||
test_configs() {
|
||||
set -e
|
||||
echo -n '::: Testing pihole-FTL DNS: '
|
||||
sudo -u ${DNSMASQ_USER:-root} pihole-FTL test || exit 1
|
||||
echo -n '::: Testing lighttpd config: '
|
||||
lighttpd -t -f /etc/lighttpd/lighttpd.conf || exit 1
|
||||
set +e
|
||||
echo "::: All config checks passed, cleared for startup ..."
|
||||
}
|
||||
|
||||
|
||||
setup_blocklists() {
|
||||
local blocklists="$1"
|
||||
local blocklists="$1"
|
||||
# Exit/return early without setting up adlists with defaults for any of the following conditions:
|
||||
# 1. skip_setup_blocklists env is set
|
||||
exit_string="(exiting ${FUNCNAME[0]} early)"
|
||||
@@ -380,3 +324,48 @@ setup_var_exists() {
|
||||
fi
|
||||
}
|
||||
|
||||
setup_temp_unit() {
|
||||
local UNIT="$1"
|
||||
# check if var is empty
|
||||
if [[ "$UNIT" != "" ]] ; then
|
||||
# check if we have valid units
|
||||
if [[ "$UNIT" == "c" || "$UNIT" == "k" || $UNIT == "f" ]] ; then
|
||||
pihole -a -${UNIT}
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_ui_layout() {
|
||||
local LO=$1
|
||||
# check if var is empty
|
||||
if [[ "$LO" != "" ]] ; then
|
||||
# check if we have valid types boxed | traditional
|
||||
if [[ "$LO" == "traditional" || "$LO" == "boxed" ]] ; then
|
||||
change_setting "WEBUIBOXEDLAYOUT" "$WEBUIBOXEDLAYOUT"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
setup_admin_email() {
|
||||
local EMAIL=$1
|
||||
# check if var is empty
|
||||
if [[ "$EMAIL" != "" ]] ; then
|
||||
pihole -a -e "$EMAIL"
|
||||
fi
|
||||
}
|
||||
|
||||
setup_dhcp() {
|
||||
if [ -z "${DHCP_START}" ] || [ -z "${DHCP_END}" ] || [ -z "${DHCP_ROUTER}" ]; then
|
||||
echo "ERROR: Won't enable DHCP server because mandatory Environment variables are missing: DHCP_START, DHCP_END and/or DHCP_ROUTER"
|
||||
change_setting "DHCP_ACTIVE" "false"
|
||||
else
|
||||
change_setting "DHCP_ACTIVE" "${DHCP_ACTIVE}"
|
||||
change_setting "DHCP_START" "${DHCP_START}"
|
||||
change_setting "DHCP_END" "${DHCP_END}"
|
||||
change_setting "DHCP_ROUTER" "${DHCP_ROUTER}"
|
||||
change_setting "DHCP_LEASETIME" "${DHCP_LEASETIME}"
|
||||
change_setting "PIHOLE_DOMAIN" "${PIHOLE_DOMAIN}"
|
||||
change_setting "DHCP_IPv6" "${DHCP_IPv6}"
|
||||
change_setting "DHCP_rapid_commit" "${DHCP_rapid_commit}"
|
||||
fi
|
||||
}
|
||||
|
||||
17
build.yml
Normal file
17
build.yml
Normal file
@@ -0,0 +1,17 @@
|
||||
# Docker Compose build file: docker-compose -f build.yml build
|
||||
version: "3.7"
|
||||
|
||||
x-common-args: &common-args
|
||||
PIHOLE_DOCKER_TAG: ${PIHOLE_DOCKER_TAG}
|
||||
CORE_VERSION: ${CORE_VERSION}
|
||||
WEB_VERSION: ${WEB_VERSION}
|
||||
FTL_VERSION: ${FTL_VERSION}
|
||||
|
||||
services:
|
||||
amd64:
|
||||
image: pihole:${PIHOLE_DOCKER_TAG}-amd64-${DEBIAN_VERSION:-buster}
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
<<: *common-args
|
||||
PIHOLE_BASE: ghcr.io/pi-hole/docker-pi-hole-base:${DEBIAN_VERSION:-buster}-slim
|
||||
@@ -1,95 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
# Script for manually pushing the docker arm images for diginc only
|
||||
# (no one else has docker repo permissions)
|
||||
if [ ! -f ~/.docker/config.json ] ; then
|
||||
echo "Error: You should setup your docker push authorization first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
parse_git_branch() {
|
||||
git branch 2> /dev/null | sed -e '/^[^*]/d' -e 's/* \(.*\)/\1/'
|
||||
}
|
||||
|
||||
annotate() {
|
||||
local base=$1
|
||||
local image=$2
|
||||
local arch=${image##*_}
|
||||
local docker_arch=${arch_map[$arch]}
|
||||
|
||||
if [ -z $docker_arch ]; then
|
||||
echo "Unknown arch in docker tag: ${arch}"
|
||||
exit 1
|
||||
else
|
||||
$dry docker manifest annotate ${base} ${image} --os linux --arch ${docker_arch}
|
||||
fi
|
||||
}
|
||||
|
||||
namespace='pihole'
|
||||
localimg='pihole'
|
||||
remoteimg="$namespace/$localimg"
|
||||
branch="$(parse_git_branch)"
|
||||
local_version="$(cat VERSION)"
|
||||
version="${version:-unset}"
|
||||
dry="${dry}"
|
||||
latest="${latest:-false}" # true as shell env var to deploy latest
|
||||
|
||||
# arch aliases
|
||||
# ARMv6/armel doesn't have a FTL binary for v4.0 pi-hole
|
||||
declare -A arch_map=( ["amd64"]="amd64" ["armhf"]="arm" ["aarch64"]="arm64")
|
||||
|
||||
# Set anything to dry prior to running this in order to print what would run instead of actually run it.
|
||||
if [[ -n "$dry" ]]; then dry='echo '; fi
|
||||
|
||||
if [[ "$version" == 'unset' ]]; then
|
||||
if [[ "$branch" == "master" ]]; then
|
||||
echo "Version number var is unset and master branch needs a version...pass in \$version variable!"
|
||||
exit 1
|
||||
elif [[ "$branch" = "release/"* ]]; then
|
||||
version="$(echo $branch | grep -Po 'v[\d\w\.-]*')"
|
||||
echo "Version number is being taken from this release branch $version"
|
||||
else
|
||||
version="$branch"
|
||||
# Use a different image for segregating dev tags maybe? Not right now, just a thought I had
|
||||
#remoteimg="${namespace}/${localimg}-dev"
|
||||
echo "Using the branch ($branch) for deployed image version since not passed in"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "# DEPLOYING:"
|
||||
echo "version: $version"
|
||||
echo "branch: $branch"
|
||||
[[ -n "$dry" ]] && echo "DRY RUN: $dry"
|
||||
echo "Example tagging: docker tag ${localimg}:armhf ${remoteimg}:${version}_amd64"
|
||||
|
||||
if [[ -z "$dry" ]] ; then
|
||||
echo "Deleting all manifest data to work around cached old copies preventing updates"
|
||||
rm -rf ~/.docker/manifests/*
|
||||
fi
|
||||
|
||||
$dry tox
|
||||
|
||||
images=()
|
||||
for tag in ${!arch_map[@]}; do
|
||||
# Verison specific tags for ongoing history
|
||||
$dry docker tag $localimg:${local_version}_$tag $remoteimg:${version}_${tag}
|
||||
$dry docker push pihole/pihole:${version}_${tag}
|
||||
images+=(pihole/pihole:${version}_${tag})
|
||||
done
|
||||
|
||||
$dry docker manifest create --amend pihole/pihole:${version} ${images[*]}
|
||||
|
||||
for image in "${images[@]}"; do
|
||||
annotate pihole/pihole:${version} ${image}
|
||||
done
|
||||
|
||||
$dry docker manifest push pihole/pihole:${version}
|
||||
|
||||
# Floating latest tag alias
|
||||
if [[ "$latest" == 'true' && "$branch" == "master" ]] ; then
|
||||
latestimg="$remoteimg:latest"
|
||||
$dry docker manifest create --amend "$latestimg" ${images[*]}
|
||||
for image in "${images[@]}"; do
|
||||
annotate "$latestimg" "${image}"
|
||||
done
|
||||
$dry docker manifest push "$latestimg"
|
||||
fi
|
||||
@@ -1,62 +0,0 @@
|
||||
version: "3"
|
||||
|
||||
# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md
|
||||
|
||||
applist:
|
||||
image: jwilder/nginx-proxy
|
||||
ports:
|
||||
- '80:80'
|
||||
environment:
|
||||
DEFAULT_HOST: pihole.yourDomain.lan
|
||||
volumes:
|
||||
- '/var/run/docker.sock:/tmp/docker.sock'
|
||||
restart: always
|
||||
|
||||
pihole:
|
||||
image: pihole/pihole:latest
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
- 1.1.1.1
|
||||
ports:
|
||||
- '53:53/tcp'
|
||||
- '53:53/udp'
|
||||
- "67:67/udp"
|
||||
- '8053:80/tcp'
|
||||
- "443:443/tcp"
|
||||
volumes:
|
||||
- './etc-pihole/:/etc/pihole/'
|
||||
- './etc-dnsmasq.d/:/etc/dnsmasq.d/'
|
||||
# run `touch ./var-log/pihole.log` first unless you like errors
|
||||
# - './var-log/pihole.log:/var/log/pihole.log'
|
||||
# Recommended but not required (DHCP needs NET_ADMIN)
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
ServerIP: 192.168.41.55
|
||||
PROXY_LOCATION: pihole
|
||||
VIRTUAL_HOST: pihole.yourDomain.lan
|
||||
VIRTUAL_PORT: 80
|
||||
extra_hosts:
|
||||
# Resolve to nothing domains (terminate connection)
|
||||
- 'nw2master.bioware.com nwn2.master.gamespy.com:0.0.0.0'
|
||||
# LAN hostnames for other docker containers using jwilder
|
||||
- 'yourDomain.lan:192.168.41.55'
|
||||
- 'pihole pihole.yourDomain.lan:192.168.41.55'
|
||||
- 'ghost ghost.yourDomain.lan:192.168.41.55'
|
||||
- 'wordpress wordpress.yourDomain.lan:192.168.41.55'
|
||||
restart: always
|
||||
|
||||
# Another container you might want to have running through the proxy
|
||||
# Note it also have ENV Vars like pihole and a host under pihole's extra_hosts
|
||||
#ghost:
|
||||
# image: fractalf/ghost
|
||||
# ports:
|
||||
# - '2368:2368/tcp'
|
||||
# volumes:
|
||||
# - '/etc/ghost/:/ghost-override'
|
||||
# environment:
|
||||
# PROXY_LOCATION: ghost
|
||||
# VIRTUAL_HOST: ghost.yourDomain.lan
|
||||
# VIRTUAL_PORT: 2368
|
||||
# restart: always
|
||||
59
docker-compose-nginx-proxy.yml
Normal file
59
docker-compose-nginx-proxy.yml
Normal file
@@ -0,0 +1,59 @@
|
||||
version: "3"
|
||||
|
||||
# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md
|
||||
|
||||
services:
|
||||
nginx-proxy:
|
||||
image: nginxproxy/nginx-proxy
|
||||
ports:
|
||||
- '80:80'
|
||||
environment:
|
||||
DEFAULT_HOST: pihole.yourDomain.lan
|
||||
volumes:
|
||||
- '/var/run/docker.sock:/tmp/docker.sock'
|
||||
restart: always
|
||||
|
||||
pihole:
|
||||
image: pihole/pihole:latest
|
||||
ports:
|
||||
- '53:53/tcp'
|
||||
- '53:53/udp'
|
||||
- "67:67/udp"
|
||||
- '8053:80/tcp'
|
||||
volumes:
|
||||
- './etc-pihole:/etc/pihole'
|
||||
- './etc-dnsmasq.d:/etc/dnsmasq.d'
|
||||
# run `touch ./var-log/pihole.log` first unless you like errors
|
||||
# - './var-log/pihole.log:/var/log/pihole.log'
|
||||
# Recommended but not required (DHCP needs NET_ADMIN)
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
environment:
|
||||
ServerIP: 192.168.41.55
|
||||
PROXY_LOCATION: pihole
|
||||
VIRTUAL_HOST: pihole.yourDomain.lan
|
||||
VIRTUAL_PORT: 80
|
||||
extra_hosts:
|
||||
# Resolve to nothing domains (terminate connection)
|
||||
- 'nw2master.bioware.com nwn2.master.gamespy.com:0.0.0.0'
|
||||
# LAN hostnames for other docker containers using nginx-proxy
|
||||
- 'yourDomain.lan:192.168.41.55'
|
||||
- 'pihole pihole.yourDomain.lan:192.168.41.55'
|
||||
- 'ghost ghost.yourDomain.lan:192.168.41.55'
|
||||
- 'wordpress wordpress.yourDomain.lan:192.168.41.55'
|
||||
restart: always
|
||||
|
||||
# Another container you might want to have running through the proxy
|
||||
# Note it also have ENV Vars like pihole and a host under pihole's extra_hosts
|
||||
# ghost:
|
||||
# image: fractalf/ghost
|
||||
# ports:
|
||||
# - '2368:2368/tcp'
|
||||
# volumes:
|
||||
# - '/etc/ghost:/ghost-override'
|
||||
# environment:
|
||||
# PROXY_LOCATION: ghost
|
||||
# VIRTUAL_HOST: ghost.yourDomain.lan
|
||||
# VIRTUAL_PORT: 2368
|
||||
# restart: always
|
||||
@@ -1,109 +0,0 @@
|
||||
Please note the following about this [traefik](https://traefik.io/) example for Docker Pi-hole
|
||||
|
||||
- Still requires standard Pi-hole setup steps, make sure you've gone through the [README](https://github.com/pihole/docker-pi-hole/blob/master/README.md) and understand how to setup Pi-hole without traefik first
|
||||
- Update these things before using:
|
||||
- set instances of `homedomain.lan` below to your home domain (typically set in your router)
|
||||
- set your Pi-hole ENV WEBPASSWORD if you don't want a random admin pass
|
||||
- This works for me, Your mileage may vary!
|
||||
- For support, do your best to figure out traefik issues on your own:
|
||||
- by looking at logs and traefik web interface on port 8080
|
||||
- also by searching the web and searching their forums/docker issues for similar question/problems
|
||||
- Port 8053 is mapped directly to Pi-hole to serve as a back door without going through traefik
|
||||
- There is some delay after starting your container before traefik forwards the HTTP traffic correctly, give it a minute
|
||||
|
||||
```
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
#
|
||||
traefik:
|
||||
container_name: traefik
|
||||
domainname: homedomain.lan
|
||||
|
||||
image: traefik
|
||||
restart: unless-stopped
|
||||
# Note I opt to whitelist certain apps for exposure to traefik instead of auto discovery
|
||||
# use `--docker.exposedbydefault=true` if you don't want to have to do this
|
||||
command: "--web --docker --docker.domain=homedomain.lan --docker.exposedbydefault=false --logLevel=DEBUG"
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
- /dev/null:/traefik.toml
|
||||
networks:
|
||||
- default
|
||||
- discovery
|
||||
dns:
|
||||
- 192.168.1.50
|
||||
- 192.168.1.1
|
||||
|
||||
pihole:
|
||||
container_name: pihole
|
||||
domainname: homedomain.lan
|
||||
|
||||
image: pihole/pihole:latest
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
- 1.1.1.1
|
||||
ports:
|
||||
- '0.0.0.0:53:53/tcp'
|
||||
- '0.0.0.0:53:53/udp'
|
||||
- '0.0.0.0:67:67/udp'
|
||||
- '0.0.0.0:8053:80/tcp'
|
||||
volumes:
|
||||
- ./etc-pihole/:/etc/pihole/
|
||||
- ./etc-dnsmasqd/:/etc/dnsmasq.d/
|
||||
# run `touch ./pihole.log` first unless you like errors
|
||||
# - ./pihole.log:/var/log/pihole.log
|
||||
environment:
|
||||
ServerIP: 192.168.1.50
|
||||
PROXY_LOCATION: pihole
|
||||
VIRTUAL_HOST: pihole.homedomain.lan
|
||||
VIRTUAL_PORT: 80
|
||||
TZ: 'America/Chicago'
|
||||
# WEBPASSWORD:
|
||||
restart: unless-stopped
|
||||
labels:
|
||||
# required when using --docker.exposedbydefault=false
|
||||
- "traefik.enable=true"
|
||||
# https://www.techjunktrunk.com/docker/2017/11/03/traefik-default-server-catch-all/
|
||||
- "traefik.frontend.rule=HostRegexp:pihole.homedomain.lan,{catchall:.*}"
|
||||
- "traefik.frontend.priority=1"
|
||||
- "traefik.backend=pihole"
|
||||
- "traefik.port=80"
|
||||
|
||||
networks:
|
||||
# Discovery is manually created to avoid forcing any order of docker-compose stack creation (`docker network create discovery`)
|
||||
# allows other compose files to be seen by proxy
|
||||
# Not required if you aren't using multiple docker-compose files...
|
||||
discovery:
|
||||
external: true
|
||||
```
|
||||
|
||||
After running `docker-compose up -d` you should see this if you look at logs on traefik `docker-compose logs -f traefik`
|
||||
|
||||
```
|
||||
traefik | time="2018-03-07T18:57:41Z" level=debug msg="Provider event received {Status:health_status: healthy ID:33567e94e02c5adba3d47fa44c391e94fdea359fb05eecb196c95de288ffb861 From:pihole/pihole:latest Type:container Action:health_status: healthy Actor:{ID:33567e94
|
||||
e02c5adba3d47fa44c391e94fdea359fb05eecb196c95de288ffb861 Attributes:map[com.docker.compose.project:traefik image:pihole/pihole:latest traefik.frontend.priority:1 com.docker.compose.container-number:1 com.docker.compose.service:pihole com.docker.compose.version:1.19.0 name:pihole traefik.enable:true url:https://www.github.com/pihole/docker-pi-hole com.docker.compose.oneoff:False maintainer:adam@diginc.us traefik.backend:pihole traefik.frontend.rule:HostRegexp:pihole.homedomain.lan,{catchall:.*} traefik.port:80 com.docker.compose.config-
|
||||
hash:7551c3f4bd11766292c7dad81473ef21da91cae8666d1b04a42d1daab53fba0f]} Scope:local Time:1520449061 TimeNano:1520449061934970670}"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Filtering disabled container /traefik"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Could not load traefik.frontend.whitelistSourceRange labels"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Could not load traefik.frontend.entryPoints labels"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Could not load traefik.frontend.auth.basic labels"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Validation of load balancer method for backend backend-pihole failed: invalid load-balancing method ''. Using default method wrr."
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Configuration received from provider docker: {"backends":{"backend-pihole":{"servers":{"server-pihole":{"url":"http://172.18.0.2:80","weight":0}},"loadBalancer":{"method":"wrr"}}},"frontends":{"frontend-HostRegexp
|
||||
-pihole-homedomain-lan-catchall-0":{"entryPoints":["http"],"backend":"backend-pihole","routes":{"route-frontend-HostRegexp-pihole-homedomain-lan-catchall-0":{"rule":"HostRegexp:pihole.homedomain.lan,{catchall:.*}"}},"passHostHeader":true,"priority":1,"basicAuth":[]}}}"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Creating frontend frontend-HostRegexp-pihole-homedomain-lan-catchall-0"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Wiring frontend frontend-HostRegexp-pihole-homedomain-lan-catchall-0 to entryPoint http"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Creating route route-frontend-HostRegexp-pihole-homedomain-lan-catchall-0 HostRegexp:pihole.homedomain.lan,{catchall:.*}"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Creating backend backend-pihole"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Creating load-balancer wrr"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=debug msg="Creating server server-pihole at http://172.18.0.2:80 with weight 0"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=info msg="Server configuration reloaded on :80"
|
||||
traefik | time="2018-03-07T18:57:42Z" level=info msg="Server configuration reloaded on :8080"
|
||||
```
|
||||
|
||||
Also your port 8080 should list the Route/Rule for pihole and backend-pihole container.
|
||||
|
||||
@@ -12,21 +12,14 @@ services:
|
||||
- "53:53/udp"
|
||||
- "67:67/udp"
|
||||
- "80:80/tcp"
|
||||
- "443:443/tcp"
|
||||
environment:
|
||||
TZ: 'America/Chicago'
|
||||
# WEBPASSWORD: 'set a secure password here or it will be random'
|
||||
# Volumes store your data between container upgrades
|
||||
volumes:
|
||||
- './etc-pihole/:/etc/pihole/'
|
||||
- './etc-dnsmasq.d/:/etc/dnsmasq.d/'
|
||||
# run `touch ./var-log/pihole.log` first unless you like errors
|
||||
# - './var-log/pihole.log:/var/log/pihole.log'
|
||||
dns:
|
||||
- 127.0.0.1
|
||||
- 1.1.1.1
|
||||
# Recommended but not required (DHCP needs NET_ADMIN)
|
||||
- './etc-pihole:/etc/pihole'
|
||||
- './etc-dnsmasq.d:/etc/dnsmasq.d'
|
||||
# https://github.com/pi-hole/docker-pi-hole#note-on-capabilities
|
||||
cap_add:
|
||||
- NET_ADMIN
|
||||
restart: unless-stopped
|
||||
restart: unless-stopped # Recommended but not required (DHCP needs NET_ADMIN)
|
||||
@@ -22,7 +22,7 @@ PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
|
||||
# Download any updates from the adlists
|
||||
59 1 * * 7 root PATH="$PATH:/usr/local/bin/" docker exec $DOCKER_NAME pihole updateGravity > /dev/null
|
||||
|
||||
# Update docker-pi-hole by pulling the latest docker image ane re-creating your container.
|
||||
# Update docker-pi-hole by pulling the latest docker image and re-creating your container.
|
||||
# pihole software update commands are unsupported in docker!
|
||||
#30 2 * * 7 root PATH="$PATH:/usr/local/bin/" docker exec $DOCKER_NAME pihole updatePihole > /dev/null
|
||||
|
||||
|
||||
@@ -2,16 +2,23 @@
|
||||
|
||||
# https://github.com/pi-hole/docker-pi-hole/blob/master/README.md
|
||||
|
||||
PIHOLE_BASE="${PIHOLE_BASE:-$(pwd)}"
|
||||
[[ -d "$PIHOLE_BASE" ]] || mkdir -p "$PIHOLE_BASE" || { echo "Couldn't create storage directory: $PIHOLE_BASE"; exit 1; }
|
||||
|
||||
# Note: ServerIP should be replaced with your external ip.
|
||||
docker run -d \
|
||||
--name pihole \
|
||||
-p 53:53/tcp -p 53:53/udp \
|
||||
-p 80:80 \
|
||||
-p 443:443 \
|
||||
-e TZ="America/Chicago" \
|
||||
-v "$(pwd)/etc-pihole/:/etc/pihole/" \
|
||||
-v "$(pwd)/etc-dnsmasq.d/:/etc/dnsmasq.d/" \
|
||||
-v "${PIHOLE_BASE}/etc-pihole:/etc/pihole" \
|
||||
-v "${PIHOLE_BASE}/etc-dnsmasq.d:/etc/dnsmasq.d" \
|
||||
--dns=127.0.0.1 --dns=1.1.1.1 \
|
||||
--restart=unless-stopped \
|
||||
--hostname pi.hole \
|
||||
-e VIRTUAL_HOST="pi.hole" \
|
||||
-e PROXY_LOCATION="pi.hole" \
|
||||
-e ServerIP="127.0.0.1" \
|
||||
pihole/pihole:latest
|
||||
|
||||
printf 'Starting up pihole container '
|
||||
@@ -26,7 +33,7 @@ for i in $(seq 1 20); do
|
||||
fi
|
||||
|
||||
if [ $i -eq 20 ] ; then
|
||||
echo -e "\nTimed out waiting for Pi-hole start start, consult check your container logs for more info (\`docker logs pihole\`)"
|
||||
echo -e "\nTimed out waiting for Pi-hole start, consult your container logs for more info (\`docker logs pihole\`)"
|
||||
exit 1
|
||||
fi
|
||||
done;
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
docker-compose.yml
|
||||
74
gh-actions-deploy.sh
Executable file
74
gh-actions-deploy.sh
Executable file
@@ -0,0 +1,74 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
# Github Actions Job for merging/deploying all architectures (post-test passing)
|
||||
. gh-actions-vars.sh
|
||||
|
||||
function annotate() {
|
||||
local base=$1
|
||||
local image=$2
|
||||
local arch=$3
|
||||
local annotate_flags="${annotate_map[$arch]}"
|
||||
|
||||
$dry docker manifest annotate ${base} ${image} --os linux ${annotate_flags}
|
||||
}
|
||||
|
||||
function create_manifest() {
|
||||
local debian_version=$1
|
||||
local images=()
|
||||
cd "${debian_version}"
|
||||
|
||||
for arch in *; do
|
||||
arch_image=$(cat "${arch}")
|
||||
docker pull "${arch_image}"
|
||||
images+=("${arch_image}")
|
||||
done
|
||||
|
||||
multiarch_images=$(get_multiarch_images)
|
||||
for docker_tag in ${multiarch_images}; do
|
||||
docker manifest create ${docker_tag} ${images[*]}
|
||||
for arch in *; do
|
||||
arch_image=$(cat "${arch}")
|
||||
annotate "${docker_tag}" "${arch_image}" "${arch}"
|
||||
done
|
||||
|
||||
docker manifest inspect "${docker_tag}"
|
||||
docker manifest push --purge "${docker_tag}"
|
||||
done
|
||||
cd ../
|
||||
}
|
||||
|
||||
function get_multiarch_images() {
|
||||
multiarch_images="${MULTIARCH_IMAGE}-${debian_version}"
|
||||
if [[ "${debian_version}" == "${DEFAULT_DEBIAN_VERSION}" ]] ; then
|
||||
# default debian version gets a non-debian tag as well as latest tag
|
||||
multiarch_images="${multiarch_images} ${MULTIARCH_IMAGE} ${LATEST_IMAGE}"
|
||||
fi
|
||||
echo "${multiarch_images}"
|
||||
}
|
||||
|
||||
|
||||
# Keep in sync with build.yml names
|
||||
declare -A annotate_map=(
|
||||
["amd64"]="--arch amd64"
|
||||
["armel"]="--arch arm --variant v6"
|
||||
["armhf"]="--arch arm --variant v7"
|
||||
["arm64"]="--arch arm64 --variant v8"
|
||||
["i386"]="--arch 386"
|
||||
)
|
||||
|
||||
mkdir -p ~/.docker
|
||||
export DOCKER_CLI_EXPERIMENTAL='enabled'
|
||||
echo "{}" | jq '.experimental="enabled"' | tee ~/.docker/config.json
|
||||
# I tried to keep this login command outside of this script
|
||||
# but for some reason auth would always fail in Github Actions.
|
||||
# I think setting up a cred store would fix it
|
||||
# https://docs.docker.com/engine/reference/commandline/login/#credentials-store
|
||||
echo "${DOCKERHUB_PASS}" | docker login --username="${DOCKERHUB_USER}" --password-stdin
|
||||
docker info
|
||||
|
||||
ls -lat ./.gh-workspace/
|
||||
cd .gh-workspace
|
||||
|
||||
for debian_version in *; do
|
||||
create_manifest "${debian_version}"
|
||||
done
|
||||
35
gh-actions-test.sh
Executable file
35
gh-actions-test.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/env bash
|
||||
set -ex
|
||||
|
||||
# Script ran by Github actions for tests
|
||||
#
|
||||
# @environment ${ARCH} The architecture to build. Example: amd64.
|
||||
# @environment ${DEBIAN_VERSION} Debian version to build. ('buster' or 'stretch').
|
||||
# @environment ${ARCH_IMAGE} What the Docker Hub Image should be tagged as. Example: pihole/pihole:master-amd64-buster
|
||||
|
||||
# setup qemu/variables
|
||||
docker run --rm --privileged multiarch/qemu-user-static:register --reset > /dev/null
|
||||
. gh-actions-vars.sh
|
||||
|
||||
if [[ "$1" == "enter" ]]; then
|
||||
enter="-it --entrypoint=sh"
|
||||
fi
|
||||
|
||||
# generate and build dockerfile
|
||||
docker build --tag image_pipenv --file Dockerfile_build .
|
||||
docker run --rm \
|
||||
--volume /var/run/docker.sock:/var/run/docker.sock \
|
||||
--volume "$(pwd):/$(pwd)" \
|
||||
--workdir "$(pwd)" \
|
||||
--env PIPENV_CACHE_DIR="$(pwd)/.pipenv" \
|
||||
--env ARCH="${ARCH}" \
|
||||
--env ARCH_IMAGE="${ARCH_IMAGE}" \
|
||||
--env DEBIAN_VERSION="${DEBIAN_VERSION}" \
|
||||
--env GIT_TAG="${GIT_TAG}" \
|
||||
--env CORE_VERSION="${CORE_VERSION}" \
|
||||
--env WEB_VERSION="${WEB_VERSION}" \
|
||||
--env FTL_VERSION="${FTL_VERSION}" \
|
||||
${enter} image_pipenv
|
||||
|
||||
mkdir -p ".gh-workspace/${DEBIAN_VERSION}/"
|
||||
echo "${ARCH_IMAGE}" | tee "./.gh-workspace/${DEBIAN_VERSION}/${ARCH}"
|
||||
53
gh-actions-vars.sh
Executable file
53
gh-actions-vars.sh
Executable file
@@ -0,0 +1,53 @@
|
||||
#!/usr/bin/env bash
|
||||
set -a
|
||||
|
||||
# @environment ${ARCH} The architecture to build. Defaults to 'amd64'.
|
||||
# @environment ${DEBIAN_VERSION} Debian version to build. Defaults to 'buster'.
|
||||
# @environment ${DOCKER_HUB_REPO} The docker hub repo to tag images for. Defaults to 'pihole'.
|
||||
# @environment ${DOCKER_HUB_IMAGE_NAME} The name of the resulting image. Defaults to 'pihole'.
|
||||
|
||||
GIT_BRANCH=$(git rev-parse --abbrev-ref HEAD | sed "s/\//-/g")
|
||||
GIT_TAG=$(git describe --tags --exact-match 2> /dev/null || true)
|
||||
|
||||
DEFAULT_DEBIAN_VERSION="buster"
|
||||
|
||||
if [[ -z "${ARCH}" ]]; then
|
||||
ARCH="amd64"
|
||||
echo "Defaulting arch to ${ARCH}"
|
||||
fi
|
||||
|
||||
if [[ -z "${DEBIAN_VERSION}" ]]; then
|
||||
DEBIAN_VERSION="${DEFAULT_DEBIAN_VERSION}"
|
||||
echo "Defaulting DEBIAN_VERSION to ${DEBIAN_VERSION}"
|
||||
fi
|
||||
|
||||
if [[ -z "${DOCKER_HUB_REPO}" ]]; then
|
||||
DOCKER_HUB_REPO="pihole"
|
||||
echo "Defaulting DOCKER_HUB_REPO to ${DOCKER_HUB_REPO}"
|
||||
fi
|
||||
|
||||
if [[ -z "${DOCKER_HUB_IMAGE_NAME}" ]]; then
|
||||
DOCKER_HUB_IMAGE_NAME="pihole"
|
||||
echo "Defaulting DOCKER_HUB_IMAGE_NAME to ${DOCKER_HUB_IMAGE_NAME}"
|
||||
fi
|
||||
|
||||
BASE_IMAGE="${DOCKER_HUB_REPO}/${DOCKER_HUB_IMAGE_NAME}"
|
||||
|
||||
GIT_TAG="${GIT_TAG:-$GIT_BRANCH}"
|
||||
ARCH_IMAGE="${BASE_IMAGE}:${GIT_TAG}-${ARCH}-${DEBIAN_VERSION}"
|
||||
MULTIARCH_IMAGE="${BASE_IMAGE}:${GIT_TAG}"
|
||||
|
||||
|
||||
|
||||
# To get latest released, cut a release on https://github.com/pi-hole/docker-pi-hole/releases (manually gated for quality control)
|
||||
latest_tag='UNKNOWN'
|
||||
if ! latest_tag=$(curl -sI https://github.com/pi-hole/docker-pi-hole/releases/latest | grep --color=never -i Location: | awk -F / '{print $NF}' | tr -d '[:cntrl:]'); then
|
||||
print "Failed to retrieve latest docker-pi-hole release metadata"
|
||||
else
|
||||
if [[ "${GIT_TAG}" == "${latest_tag}" ]] ; then
|
||||
LATEST_IMAGE="${BASE_IMAGE}:latest"
|
||||
fi
|
||||
fi
|
||||
|
||||
|
||||
set +a
|
||||
106
install.sh
Executable file → Normal file
106
install.sh
Executable file → Normal file
@@ -2,38 +2,40 @@
|
||||
|
||||
mkdir -p /etc/pihole/
|
||||
mkdir -p /var/run/pihole
|
||||
# Production tags with valid web footers
|
||||
export CORE_VERSION="$(cat /etc/docker-pi-hole-version)"
|
||||
# Major.Minor for web tag until patches are released for it
|
||||
export WEB_VERSION="$(echo ${CORE_VERSION} | grep -Po "v\d+\.\d+")"
|
||||
# Only use for pre-production / testing
|
||||
export USE_CUSTOM_BRANCHES=false
|
||||
|
||||
apt-get update
|
||||
apt-get install -y curl procps
|
||||
curl -L -s $S6OVERLAY_RELEASE | tar xvzf - -C /
|
||||
CORE_LOCAL_REPO=/etc/.pihole
|
||||
WEB_LOCAL_REPO=/var/www/html/admin
|
||||
|
||||
setupVars=/etc/pihole/setupVars.conf
|
||||
|
||||
s6_download_url() {
|
||||
DETECTED_ARCH=$(dpkg --print-architecture)
|
||||
S6_ARCH=$DETECTED_ARCH
|
||||
case $DETECTED_ARCH in
|
||||
armel)
|
||||
S6_ARCH="arm";;
|
||||
armhf)
|
||||
S6_ARCH="arm";;
|
||||
arm64)
|
||||
S6_ARCH="aarch64";;
|
||||
i386)
|
||||
S6_ARCH="x86";;
|
||||
ppc64el)
|
||||
S6_ARCH="ppc64le";;
|
||||
esac
|
||||
echo "https://github.com/just-containers/s6-overlay/releases/download/${S6_OVERLAY_VERSION}/s6-overlay-${S6_ARCH}.tar.gz"
|
||||
}
|
||||
|
||||
ln -s `which echo` /usr/local/bin/whiptail
|
||||
curl -L -s "$(s6_download_url)" | tar xvzf - -C /
|
||||
mv /init /s6-init
|
||||
|
||||
if [[ $USE_CUSTOM_BRANCHES == true ]] ; then
|
||||
CORE_VERSION="hotfix/${CORE_VERSION}"
|
||||
WEB_VERSION="release/v4.2"
|
||||
fi
|
||||
|
||||
# debconf-apt-progress seems to hang so get rid of it too
|
||||
which debconf-apt-progress
|
||||
mv "$(which debconf-apt-progress)" /bin/no_debconf-apt-progress
|
||||
|
||||
# Get the install functions
|
||||
curl https://raw.githubusercontent.com/pi-hole/pi-hole/${CORE_VERSION}/automated%20install/basic-install.sh > "$PIHOLE_INSTALL"
|
||||
PH_TEST=true . "${PIHOLE_INSTALL}"
|
||||
|
||||
# Preseed variables to assist with using --unattended install
|
||||
{
|
||||
echo "PIHOLE_INTERFACE=eth0"
|
||||
echo "IPV4_ADDRESS=0.0.0.0"
|
||||
echo "IPV6_ADDRESS=0:0:0:0:0:0"
|
||||
echo "PIHOLE_DNS_1=8.8.8.8"
|
||||
echo "PIHOLE_DNS_2=8.8.4.4"
|
||||
echo "QUERY_LOGGING=true"
|
||||
echo "INSTALL_WEB_SERVER=true"
|
||||
echo "INSTALL_WEB_INTERFACE=true"
|
||||
@@ -42,54 +44,36 @@ PH_TEST=true . "${PIHOLE_INSTALL}"
|
||||
source $setupVars
|
||||
|
||||
export USER=pihole
|
||||
distro_check
|
||||
|
||||
# fix permission denied to resolvconf post-inst /etc/resolv.conf moby/moby issue #1297
|
||||
apt-get -y install debconf-utils
|
||||
echo resolvconf resolvconf/linkify-resolvconf boolean false | debconf-set-selections
|
||||
export PIHOLE_SKIP_OS_CHECK=true
|
||||
|
||||
# Tried this - unattended causes starting services during a build, should probably PR a flag to shut that off and switch to that
|
||||
#bash -ex "./${PIHOLE_INSTALL}" --unattended
|
||||
install_dependent_packages INSTALLER_DEPS[@]
|
||||
install_dependent_packages PIHOLE_DEPS[@]
|
||||
install_dependent_packages PIHOLE_WEB_DEPS[@]
|
||||
# IPv6 support for nc openbsd better than traditional
|
||||
apt-get install -y --force-yes netcat-openbsd
|
||||
# Run the installer in unattended mode using the preseeded variables above and --reconfigure so that local repos are not updated
|
||||
curl -sSL https://install.pi-hole.net | bash -sex -- --unattended
|
||||
|
||||
piholeGitUrl="${piholeGitUrl}"
|
||||
webInterfaceGitUrl="${webInterfaceGitUrl}"
|
||||
webInterfaceDir="${webInterfaceDir}"
|
||||
git clone "${piholeGitUrl}" "${PI_HOLE_LOCAL_REPO}"
|
||||
git clone "${webInterfaceGitUrl}" "${webInterfaceDir}"
|
||||
|
||||
tmpLog="/tmp/pihole-install.log"
|
||||
installLogLoc="${installLogLoc}"
|
||||
FTLdetect 2>&1 | tee "${tmpLog}"
|
||||
installPihole 2>&1 | tee "${tmpLog}"
|
||||
mv "${tmpLog}" /
|
||||
|
||||
if [[ $USE_CUSTOM_BRANCHES == true ]] ; then
|
||||
ln -s /bin/true /usr/local/bin/service
|
||||
ln -s /bin/true /usr/local/bin/update-rc.d
|
||||
echo y | bash -x pihole checkout core ${CORE_VERSION}
|
||||
echo y | bash -x pihole checkout web ${WEB_VERSION}
|
||||
echo y | bash -x pihole checkout ftl tweak/overhaul_overTime
|
||||
# If the v is forgotten: ${CORE_VERSION/v/}
|
||||
unlink /usr/local/bin/service
|
||||
unlink /usr/local/bin/update-rc.d
|
||||
else
|
||||
# Reset to our tags so version numbers get detected correctly
|
||||
pushd "${PI_HOLE_LOCAL_REPO}"; git reset --hard "${CORE_VERSION}"; popd;
|
||||
pushd "${webInterfaceDir}"; git reset --hard "${WEB_VERSION}"; popd;
|
||||
# At this stage, if we are building a :nightly tag, then switch the Pi-hole install to dev versions
|
||||
if [[ "${PIHOLE_DOCKER_TAG}" = 'nightly' ]]; then
|
||||
yes | pihole checkout dev
|
||||
fi
|
||||
|
||||
sed -i 's/readonly //g' /opt/pihole/webpage.sh
|
||||
sed -i '/^WEBPASSWORD/d' /etc/pihole/setupVars.conf
|
||||
|
||||
# Replace the call to `updatePiholeFunc` in arg parse with new `unsupportedFunc`
|
||||
# sed a new function into the `pihole` script just above the `helpFunc()` function for later use.
|
||||
sed -i $'s/helpFunc() {/unsupportedFunc() {\\\n echo "Function not supported in Docker images"\\\n exit 0\\\n}\\\n\\\nhelpFunc() {/g' /usr/local/bin/pihole
|
||||
|
||||
# Replace a few of the `pihole` options with calls to `unsupportedFunc`:
|
||||
# pihole -up / pihole updatePihole
|
||||
sed -i $'s/)\s*updatePiholeFunc/) unsupportedFunc/g' /usr/local/bin/pihole
|
||||
# pihole uninstall
|
||||
sed -i $'s/)\s*uninstallFunc/) unsupportedFunc/g' /usr/local/bin/pihole
|
||||
# pihole -r / pihole reconfigure
|
||||
sed -i $'s/)\s*reconfigurePiholeFunc/) unsupportedFunc/g' /usr/local/bin/pihole
|
||||
|
||||
if [[ "${PIHOLE_DOCKER_TAG}" != "dev" && "${PIHOLE_DOCKER_TAG}" != "nightly" ]]; then
|
||||
# If we are on a version other than dev or nightly, disable `pihole checkout`, otherwise it is useful to have for quick troubleshooting sometimes
|
||||
sed -i $'s/)\s*piholeCheckoutFunc/) unsupportedFunc/g' /usr/local/bin/pihole
|
||||
fi
|
||||
|
||||
touch /.piholeFirstBoot
|
||||
|
||||
echo 'Docker install successful'
|
||||
echo 'Docker install successful'
|
||||
@@ -1,7 +1,55 @@
|
||||
docker-compose
|
||||
jinja2
|
||||
pytest>=3.6.0
|
||||
pytest-cov
|
||||
pytest-xdist
|
||||
testinfra==1.5.1
|
||||
tox
|
||||
-i https://pypi.org/simple/
|
||||
apipkg==1.5
|
||||
atomicwrites==1.3.0
|
||||
attrs==19.3.0
|
||||
backports.shutil-get-terminal-size==1.0.0
|
||||
backports.ssl-match-hostname==3.7.0.1
|
||||
bcrypt==3.1.7
|
||||
cached-property==1.5.1
|
||||
certifi==2019.11.28
|
||||
cffi==1.13.2
|
||||
chardet==3.0.4
|
||||
configparser==4.0.2
|
||||
contextlib2==0.6.0.post1
|
||||
coverage==5.0.1
|
||||
cryptography==3.3.2
|
||||
docker==4.1.0
|
||||
dockerpty==0.4.1
|
||||
docopt==0.6.2
|
||||
enum34==1.1.6
|
||||
execnet==1.7.1
|
||||
filelock==3.0.12
|
||||
funcsigs==1.0.2
|
||||
idna==2.8
|
||||
importlib-metadata==1.3.0
|
||||
ipaddress==1.0.23
|
||||
jinja2==2.11.3
|
||||
jsonschema==3.2.0
|
||||
markupsafe==1.1.1
|
||||
more-itertools==5.0.0
|
||||
packaging==20.9
|
||||
pathlib2==2.3.5
|
||||
pluggy==0.13.1
|
||||
py==1.10.0
|
||||
pycparser==2.19
|
||||
pyparsing==2.4.6
|
||||
pyrsistent==0.15.6
|
||||
pytest-cov==2.8.1
|
||||
pytest-forked==1.1.3
|
||||
pytest-xdist==1.31.0
|
||||
pytest==4.6.8
|
||||
pyyaml==5.4
|
||||
requests==2.22.0
|
||||
scandir==1.10.0
|
||||
six==1.13.0
|
||||
subprocess32==3.5.4
|
||||
testinfra==3.3.0
|
||||
texttable==1.6.2
|
||||
toml==0.10.0
|
||||
tox==3.14.3
|
||||
urllib3==1.25.8
|
||||
virtualenv==16.7.9
|
||||
wcwidth==0.1.7
|
||||
websocket-client==0.57.0
|
||||
zipp==0.6.0
|
||||
python-dotenv==0.17.1
|
||||
|
||||
35
s6/debian-root/etc/cont-init.d/05-changer-uid-gid.sh
Normal file
35
s6/debian-root/etc/cont-init.d/05-changer-uid-gid.sh
Normal file
@@ -0,0 +1,35 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
set -e
|
||||
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
set -x ;
|
||||
fi
|
||||
|
||||
modifyUser()
|
||||
{
|
||||
declare username=${1:-} newId=${2:-}
|
||||
[[ -z ${username} || -z ${newId} ]] && return
|
||||
|
||||
local currentId=$(id -u ${username})
|
||||
[[ ${currentId} -eq ${newId} ]] && return
|
||||
|
||||
echo "Changing ID for user: ${username} (${currentId} => ${newId})"
|
||||
usermod -o -u ${newId} ${username}
|
||||
}
|
||||
|
||||
modifyGroup()
|
||||
{
|
||||
declare groupname=${1:-} newId=${2:-}
|
||||
[[ -z ${groupname} || -z ${newId} ]] && return
|
||||
|
||||
local currentId=$(id -g ${groupname})
|
||||
[[ ${currentId} -eq ${newId} ]] && return
|
||||
|
||||
echo "Changing ID for group: ${groupname} (${currentId} => ${newId})"
|
||||
groupmod -o -g ${newId} ${groupname}
|
||||
}
|
||||
|
||||
modifyUser www-data ${WEB_UID}
|
||||
modifyGroup www-data ${WEB_GID}
|
||||
modifyUser pihole ${PIHOLE_UID}
|
||||
modifyGroup pihole ${PIHOLE_GID}
|
||||
@@ -2,19 +2,38 @@
|
||||
set -e
|
||||
|
||||
bashCmd='bash -e'
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
if [ "${PH_VERBOSE:-0}" -gt 0 ] ; then
|
||||
set -x ;
|
||||
bashCmd='bash -e -x'
|
||||
fi
|
||||
|
||||
# used to start dnsmasq here for gravity to use...now that conflicts port 53
|
||||
|
||||
$bashCmd /start.sh
|
||||
# Gotta go fast, no time for gravity
|
||||
if [ -n "$PYTEST" ]; then
|
||||
sed -i 's/^gravity_spinup$/#gravity_spinup # DISABLED FOR PYTEST/g' "$(which gravity.sh)"
|
||||
if [ -n "$PYTEST" ]; then
|
||||
sed -i 's/^gravity_spinup$/#gravity_spinup # DISABLED FOR PYTEST/g' "$(which gravity.sh)"
|
||||
fi
|
||||
gravity.sh
|
||||
|
||||
# Kill dnsmasq because s6 won't like it if it's running when s6 services start
|
||||
kill -9 $(pgrep pihole-FTL) || true
|
||||
gravityDBfile="/etc/pihole/gravity.db"
|
||||
config_file="/etc/pihole/pihole-FTL.conf"
|
||||
# make a point to mention which config file we're checking, as breadcrumb to revisit if/when pihole-FTL.conf is succeeded by TOML
|
||||
echo " Checking if custom gravity.db is set in ${config_file}"
|
||||
if [[ -f "${config_file}" ]]; then
|
||||
gravityDBfile="$(grep --color=never -Po "^GRAVITYDB=\K.*" "${config_file}" 2> /dev/null || echo "/etc/pihole/gravity.db")"
|
||||
fi
|
||||
|
||||
|
||||
if [ -z "$SKIPGRAVITYONBOOT" ] || [ ! -e "${gravityDBfile}" ]; then
|
||||
if [ -n "$SKIPGRAVITYONBOOT" ];then
|
||||
echo " SKIPGRAVITYONBOOT is set, however ${gravityDBfile} does not exist (Likely due to a fresh volume). This is a required file for Pi-hole to operate."
|
||||
echo " Ignoring SKIPGRAVITYONBOOT on this occaision."
|
||||
fi
|
||||
|
||||
echo '@reboot root PATH="$PATH:/usr/sbin:/usr/local/bin/" pihole updateGravity >/var/log/pihole_updateGravity.log || cat /var/log/pihole_updateGravity.log' > /etc/cron.d/gravity-on-boot
|
||||
else
|
||||
echo " Skipping Gravity Database Update."
|
||||
[ ! -e /etc/cron.d/gravity-on-boot ] || rm /etc/cron.d/gravity-on-boot &>/dev/null
|
||||
fi
|
||||
|
||||
pihole -v
|
||||
|
||||
echo " Container tag is: ${PIHOLE_DOCKER_TAG}"
|
||||
8
s6/debian-root/etc/services.d/lighttpd-access-log/finish
Normal file
8
s6/debian-root/etc/services.d/lighttpd-access-log/finish
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Stopping lighttpd-access-log"
|
||||
pid=$(ps -C cat -o pid=,args= |grep -oP "([0-9]+).+access\.log" |cut -f1 -d" ")
|
||||
if [[ -n ${pid} ]]; then
|
||||
kill -9 ${pid}
|
||||
fi
|
||||
s6-echo "Stopped lighttpd-access-log"
|
||||
5
s6/debian-root/etc/services.d/lighttpd-access-log/run
Normal file
5
s6/debian-root/etc/services.d/lighttpd-access-log/run
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Starting lighttpd-access-log"
|
||||
|
||||
s6-setuidgid www-data cat /var/log/lighttpd/access.log 2>&1
|
||||
8
s6/debian-root/etc/services.d/lighttpd-error-log/finish
Normal file
8
s6/debian-root/etc/services.d/lighttpd-error-log/finish
Normal file
@@ -0,0 +1,8 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Stopping lighttpd-error-log"
|
||||
pid=$(ps -C cat -o pid=,args= |grep -oP "([0-9]+).+error\.log" |cut -f1 -d" ")
|
||||
if [[ -n ${pid} ]]; then
|
||||
kill -9 ${pid}
|
||||
fi
|
||||
s6-echo "Stopped lighttpd-error-log"
|
||||
5
s6/debian-root/etc/services.d/lighttpd-error-log/run
Normal file
5
s6/debian-root/etc/services.d/lighttpd-error-log/run
Normal file
@@ -0,0 +1,5 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Starting lighttpd-error-log"
|
||||
|
||||
s6-setuidgid www-data cat /var/log/lighttpd/error.log 2>&1
|
||||
@@ -1,4 +1,6 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Stopping lighttpd"
|
||||
service lighttpd-access-log stop
|
||||
service lighttpd-error-log stop
|
||||
killall -9 lighttpd
|
||||
|
||||
@@ -1,4 +1,30 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Starting lighttpd"
|
||||
|
||||
if [[ 1 -eq ${WEBLOGS_STDOUT:-0} ]]; then
|
||||
#lighthttpd cannot use /dev/stdout https://redmine.lighttpd.net/issues/2731
|
||||
for fi in /var/log/lighttpd/access.log /var/log/lighttpd/error.log
|
||||
do
|
||||
if [[ ! -p ${fi} ]]; then
|
||||
rm -f ${fi}
|
||||
mkfifo -m 600 ${fi}
|
||||
fi
|
||||
done
|
||||
chown -R www-data:www-data /var/log/lighttpd
|
||||
service lighttpd-access-log start
|
||||
service lighttpd-error-log start
|
||||
sleep 2
|
||||
else
|
||||
#remove fifo if exists
|
||||
[[ -p /var/log/lighttpd/access.log ]] && rm -Rf /var/log/lighttpd/access.log
|
||||
[[ -p /var/log/lighttpd/error.log ]] && rm -Rf /var/log/lighttpd/error.log
|
||||
# Touch log files to ensure they exist (create if non-existing, preserve if existing)
|
||||
touch /var/log/lighttpd/access.log /var/log/lighttpd/error.log
|
||||
|
||||
# Ensure that permissions are set so that lighttpd can write to the logs
|
||||
chown -R www-data:www-data /var/log/lighttpd
|
||||
chmod 0644 /var/log/lighttpd/access.log /var/log/lighttpd/error.log
|
||||
fi
|
||||
|
||||
lighttpd -D -f /etc/lighttpd/lighttpd.conf
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Stopping pihole-FTL"
|
||||
kill -9 $(pgrep pihole-FTL)
|
||||
killall -15 pihole-FTL
|
||||
|
||||
@@ -1,9 +1,28 @@
|
||||
#!/usr/bin/with-contenv bash
|
||||
|
||||
s6-echo "Starting pihole-FTL ($FTL_CMD) as ${DNSMASQ_USER}"
|
||||
# Remove possible leftovers from previous pihole-FTL processes
|
||||
rm -f /dev/shm/FTL-* 2> /dev/null
|
||||
rm /run/pihole/FTL.sock 2> /dev/null
|
||||
|
||||
# Touch files to ensure they exist (create if non-existing, preserve if existing)
|
||||
mkdir -pm 0755 /run/pihole
|
||||
touch /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases
|
||||
|
||||
# Ensure that permissions are set so that pihole-FTL can edit all necessary files
|
||||
chown pihole:pihole /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases /run/pihole /etc/pihole
|
||||
chmod 0644 /run/pihole-FTL.pid /run/pihole-FTL.port /var/log/pihole-FTL.log /var/log/pihole.log /etc/pihole/dhcp.leases
|
||||
|
||||
# Ensure that permissions are set so that pihole-FTL can edit the files. We ignore errors as the file may not (yet) exist
|
||||
chmod -f 0644 /etc/pihole/macvendor.db
|
||||
# Chown database files to the user FTL runs as. We ignore errors as the files may not (yet) exist
|
||||
chown -f pihole:pihole /etc/pihole/pihole-FTL.db /etc/pihole/gravity.db /etc/pihole/macvendor.db
|
||||
# Chown database file permissions so that the pihole group (web interface) can edit the file. We ignore errors as the files may not (yet) exist
|
||||
chmod -f 0664 /etc/pihole/pihole-FTL.db
|
||||
|
||||
s6-setuidgid ${DNSMASQ_USER} pihole-FTL $FTL_CMD >/dev/null 2>&1
|
||||
|
||||
# Notes on above:
|
||||
# - DNSMASQ_USER default of root is in Dockerfile & can be overwritten by runtime container env
|
||||
# - DNSMASQ_USER default of pihole is in Dockerfile & can be overwritten by runtime container env
|
||||
# - /var/log/pihole*.log has FTL's output that no-daemon would normally print in FG too
|
||||
# prevent duplicating it in docker logs by sending to dev null
|
||||
|
||||
32
s6/service
32
s6/service
@@ -2,29 +2,49 @@
|
||||
# This script patches all service commands into the appropriate s6- commands
|
||||
# pi-hole upstream scripts need a 'service' interface. why not systemd? docker said so.
|
||||
start() {
|
||||
s6-svc -wU -u -T2500 /var/run/s6/services/$service
|
||||
s6-svc -wu -u -T2500 /var/run/s6/services/$service
|
||||
}
|
||||
|
||||
stop() {
|
||||
s6-svc -wD -d -T2500 /var/run/s6/services/$service
|
||||
s6-svc -wD -d -T2500 /var/run/s6/services/$service
|
||||
}
|
||||
|
||||
restart() {
|
||||
local pid
|
||||
|
||||
# Get the PID of the service we are asking to restart
|
||||
pid=$(pgrep $service)
|
||||
|
||||
# Only attempt to stop the service if it is already running
|
||||
if [ -n "$pid" ]; then
|
||||
stop
|
||||
|
||||
# Loop until we are certain that the process has been stopped
|
||||
while test -d /proc/$pid; do
|
||||
sleep 0.2
|
||||
done
|
||||
fi
|
||||
|
||||
# Check it hasn't been started by something else in the meantime
|
||||
pid=$(pgrep $service)
|
||||
|
||||
# Only attempt to start the service if it is not already running
|
||||
if [ -z "$pid" ]; then
|
||||
start
|
||||
#s6-svc -t -wR -T5000 /var/run/s6/services/$service
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
status() {
|
||||
s6-svstat /var/run/s6/services/$service
|
||||
s6-svstat /var/run/s6/services/$service
|
||||
}
|
||||
|
||||
service="$1"
|
||||
command="$2"
|
||||
|
||||
if [[ ! -d "/var/run/s6/services/$service" ]] ; then
|
||||
echo "s6 service not found for $service, exiting..."
|
||||
exit
|
||||
echo "s6 service not found for $service, exiting..."
|
||||
exit
|
||||
fi;
|
||||
|
||||
${command} "${service}"
|
||||
|
||||
162
start.sh
162
start.sh
@@ -5,28 +5,59 @@ export ServerIP
|
||||
export ServerIPv6
|
||||
export PYTEST
|
||||
export PHP_ENV_CONFIG
|
||||
export PHP_ERROR_LOG
|
||||
export PHP_ERROR_LOG
|
||||
export HOSTNAME
|
||||
export WEBLOGDIR
|
||||
export DNS1
|
||||
export DNS2
|
||||
export DNSSEC
|
||||
export DNS_BOGUS_PRIV
|
||||
export DNS_FQDN_REQUIRED
|
||||
export INTERFACE
|
||||
export DNSMASQ_LISTENING_BEHAVIOUR="$DNSMASQ_LISTENING"
|
||||
export IPv6
|
||||
export WEB_PORT
|
||||
export REV_SERVER
|
||||
export REV_SERVER_DOMAIN
|
||||
export REV_SERVER_TARGET
|
||||
export REV_SERVER_CIDR
|
||||
export CONDITIONAL_FORWARDING
|
||||
export CONDITIONAL_FORWARDING_IP
|
||||
export CONDITIONAL_FORWARDING_DOMAIN
|
||||
export CONDITIONAL_FORWARDING_REVERSE
|
||||
export TEMPERATUREUNIT
|
||||
export ADMIN_EMAIL
|
||||
export WEBUIBOXEDLAYOUT
|
||||
export QUERY_LOGGING
|
||||
export PIHOLE_DNS_
|
||||
export DHCP_ACTIVE
|
||||
export DHCP_START
|
||||
export DHCP_END
|
||||
export DHCP_ROUTER
|
||||
export DHCP_LEASETIME
|
||||
export PIHOLE_DOMAIN
|
||||
export DHCP_IPv6
|
||||
export DHCP_rapid_commit
|
||||
export WEBTHEME
|
||||
export CUSTOM_CACHE_SIZE
|
||||
|
||||
export adlistFile='/etc/pihole/adlists.list'
|
||||
|
||||
# If user has set QUERY_LOGGING Env Var, copy it out to _OVERRIDE, else it will get reset when we source the next two files
|
||||
# Come back to it at the end of the file
|
||||
[ -n "${QUERY_LOGGING}" ] && QUERY_LOGGING_OVERRIDE="${QUERY_LOGGING}"
|
||||
|
||||
# The below functions are all contained in bash_functions.sh
|
||||
. /bash_functions.sh
|
||||
|
||||
# Ensure we have all functions available to update our configurations
|
||||
. /opt/pihole/webpage.sh
|
||||
|
||||
# PH_TEST prevents the install from actually running (someone should rename that)
|
||||
PH_TEST=true . $PIHOLE_INSTALL
|
||||
PH_TEST=true . "${PIHOLE_INSTALL}"
|
||||
|
||||
echo " ::: Starting docker specific checks & setup for docker pihole/pihole"
|
||||
|
||||
docker_checks
|
||||
|
||||
# TODO:
|
||||
#if [ ! -f /.piholeFirstBoot ] ; then
|
||||
# echo " ::: Not first container startup so not running docker's setup, re-create container to run setup again"
|
||||
@@ -35,14 +66,114 @@ docker_checks
|
||||
#fi
|
||||
|
||||
fix_capabilities
|
||||
load_web_password_secret
|
||||
generate_password
|
||||
validate_env || exit 1
|
||||
prepare_configs
|
||||
change_setting "IPV4_ADDRESS" "$ServerIP"
|
||||
change_setting "IPV6_ADDRESS" "$ServerIPv6"
|
||||
|
||||
[ -n "${PIHOLE_INTERFACE}" ] && change_setting "PIHOLE_INTERFACE" "$PIHOLE_INTERFACE"
|
||||
[ -n "${IPV4_ADDRESS}" ] && change_setting "IPV4_ADDRESS" "$IPV4_ADDRESS"
|
||||
[ -n "${INSTALL_WEB_SERVER}" ] && change_setting "INSTALL_WEB_SERVER" "$INSTALL_WEB_SERVER"
|
||||
[ -n "${INSTALL_WEB_INTERFACE}" ] && change_setting "INSTALL_WEB_INTERFACE" "$INSTALL_WEB_INTERFACE"
|
||||
[ -n "${LIGHTTPD_ENABLED}" ] && change_setting "LIGHTTPD_ENABLED" "$LIGHTTPD_ENABLED"
|
||||
[ -n "${DNS_BOGUS_PRIV}" ] && change_setting "DNS_BOGUS_PRIV" "$DNS_BOGUS_PRIV"
|
||||
[ -n "${ServerIP}" ] && changeFTLsetting "REPLY_ADDR4" "$ServerIP"
|
||||
[ -n "${ServerIPv6}" ] && changeFTLsetting "REPLY_ADDR6" "$ServerIPv6"
|
||||
[ -n "${DNS_FQDN_REQUIRED}" ] && change_setting "DNS_FQDN_REQUIRED" "$DNS_FQDN_REQUIRED"
|
||||
[ -n "${DNSSEC}" ] && change_setting "DNSSEC" "$DNSSEC"
|
||||
[ -n "${REV_SERVER}" ] && change_setting "REV_SERVER" "$REV_SERVER"
|
||||
[ -n "${REV_SERVER_DOMAIN}" ] && change_setting "REV_SERVER_DOMAIN" "$REV_SERVER_DOMAIN"
|
||||
[ -n "${REV_SERVER_TARGET}" ] && change_setting "REV_SERVER_TARGET" "$REV_SERVER_TARGET"
|
||||
[ -n "${REV_SERVER_CIDR}" ] && change_setting "REV_SERVER_CIDR" "$REV_SERVER_CIDR"
|
||||
|
||||
# Get all exported environment variables starting with FTLCONF_ as a prefix and call the changeFTLsetting
|
||||
# function with the environment variable's suffix as the key. This allows applying any pihole-FTL.conf
|
||||
# setting defined here: https://docs.pi-hole.net/ftldns/configfile/
|
||||
declare -px | grep FTLCONF_ | sed -E 's/declare -x FTLCONF_([^=]+)=\"(.+)\"/\1 \2/' | while read -r name value
|
||||
do
|
||||
echo "Applying pihole-FTL.conf setting $name=$value"
|
||||
changeFTLsetting "$name" "$value"
|
||||
done
|
||||
|
||||
if [ -z "$REV_SERVER" ];then
|
||||
# If the REV_SERVER* variables are set, then there is no need to add these.
|
||||
# If it is not set, then adding these variables is fine, and they will be converted by the Pi-hole install script
|
||||
[ -n "${CONDITIONAL_FORWARDING}" ] && change_setting "CONDITIONAL_FORWARDING" "$CONDITIONAL_FORWARDING"
|
||||
[ -n "${CONDITIONAL_FORWARDING_IP}" ] && change_setting "CONDITIONAL_FORWARDING_IP" "$CONDITIONAL_FORWARDING_IP"
|
||||
[ -n "${CONDITIONAL_FORWARDING_DOMAIN}" ] && change_setting "CONDITIONAL_FORWARDING_DOMAIN" "$CONDITIONAL_FORWARDING_DOMAIN"
|
||||
[ -n "${CONDITIONAL_FORWARDING_REVERSE}" ] && change_setting "CONDITIONAL_FORWARDING_REVERSE" "$CONDITIONAL_FORWARDING_REVERSE"
|
||||
fi
|
||||
|
||||
if [ -z "${PIHOLE_DNS_}" ]; then
|
||||
# For backward compatibility, if DNS1 and/or DNS2 are set, but PIHOLE_DNS_ is not, convert them to
|
||||
# a semi-colon delimited string and store in PIHOLE_DNS_
|
||||
# They are not used anywhere if PIHOLE_DNS_ is set already
|
||||
[ -n "${DNS1}" ] && echo "Converting DNS1 to PIHOLE_DNS_" && PIHOLE_DNS_="$DNS1"
|
||||
[[ -n "${DNS2}" && "${DNS2}" != "no" ]] && echo "Converting DNS2 to PIHOLE_DNS_" && PIHOLE_DNS_="$PIHOLE_DNS_;$DNS2"
|
||||
fi
|
||||
|
||||
# Parse the PIHOLE_DNS variable, if it exists, and apply upstream servers to Pi-hole config
|
||||
if [ -n "${PIHOLE_DNS_}" ]; then
|
||||
echo "Setting DNS servers based on PIHOLE_DNS_ variable"
|
||||
# Remove any PIHOLE_DNS_ entries from setupVars.conf, if they exist
|
||||
sed -i '/PIHOLE_DNS_/d' /etc/pihole/setupVars.conf
|
||||
# Split into an array (delimited by ;)
|
||||
# Loop through and add them one by one to setupVars.conf
|
||||
PIHOLE_DNS_ARR=(${PIHOLE_DNS_//;/ })
|
||||
count=1
|
||||
valid_entries=0
|
||||
for i in "${PIHOLE_DNS_ARR[@]}"; do
|
||||
if valid_ip "$i" || valid_ip6 "$i" ; then
|
||||
change_setting "PIHOLE_DNS_$count" "$i"
|
||||
((count=count+1))
|
||||
((valid_entries=valid_entries+1))
|
||||
else
|
||||
echo "Invalid IP detected in PIHOLE_DNS_: ${i}"
|
||||
fi
|
||||
done
|
||||
|
||||
if [ $valid_entries -eq 0 ]; then
|
||||
echo "No Valid IPs dectected in PIHOLE_DNS_. Aborting"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# Environment variable has not been set, but there may be existing values in an existing setupVars.conf
|
||||
# if this is the case, we do not want to overwrite these with the defaults of 8.8.8.8 and 8.8.4.4
|
||||
# Pi-hole can run with only one upstream configured, so we will just check for one.
|
||||
setupVarsDNS="$(grep 'PIHOLE_DNS_' /etc/pihole/setupVars.conf || true)"
|
||||
|
||||
if [ -z "${setupVarsDNS}" ]; then
|
||||
echo "Configuring default DNS servers: 8.8.8.8, 8.8.4.4"
|
||||
change_setting "PIHOLE_DNS_1" "8.8.8.8"
|
||||
change_setting "PIHOLE_DNS_2" "8.8.4.4"
|
||||
else
|
||||
echo "Existing DNS servers detected in setupVars.conf. Leaving them alone"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Parse the WEBTHEME variable, if it exists, and set the selected theme if it is one of the supported values.
|
||||
# If an invalid theme name was supplied, setup WEBTHEME to use the default-light theme.
|
||||
if [ -n "${WEBTHEME}" ]; then
|
||||
case "${WEBTHEME}" in
|
||||
"default-dark" | "default-darker" | "default-light" | "default-auto" | "lcars")
|
||||
echo "Setting Web Theme based on WEBTHEME variable, using value ${WEBTHEME}"
|
||||
change_setting "WEBTHEME" "${WEBTHEME}"
|
||||
;;
|
||||
*)
|
||||
echo "Invalid theme name supplied: ${WEBTHEME}, falling back to default-light."
|
||||
change_setting "WEBTHEME" "default-light"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
[[ -n "${DHCP_ACTIVE}" && ${DHCP_ACTIVE} == "true" ]] && echo "Setting DHCP server" && setup_dhcp
|
||||
|
||||
setup_web_port "$WEB_PORT"
|
||||
setup_web_password "$WEBPASSWORD"
|
||||
setup_dnsmasq "$DNS1" "$DNS2" "$INTERFACE" "$DNSMASQ_LISTENING_BEHAVIOUR"
|
||||
setup_temp_unit "$TEMPERATUREUNIT"
|
||||
setup_ui_layout "$WEBUIBOXEDLAYOUT"
|
||||
setup_admin_email "$ADMIN_EMAIL"
|
||||
setup_dnsmasq "$INTERFACE" "$DNSMASQ_LISTENING_BEHAVIOUR"
|
||||
setup_php_env
|
||||
setup_dnsmasq_hostnames "$ServerIP" "$ServerIPv6" "$HOSTNAME"
|
||||
setup_ipv4_ipv6
|
||||
@@ -52,4 +183,21 @@ test_configs
|
||||
|
||||
[ -f /.piholeFirstBoot ] && rm /.piholeFirstBoot
|
||||
|
||||
# Set QUERY_LOGGING value in setupVars to be that which the user has passed in as an ENV var (if they have)
|
||||
[ -n "${QUERY_LOGGING_OVERRIDE}" ] && change_setting "QUERY_LOGGING" "$QUERY_LOGGING_OVERRIDE"
|
||||
|
||||
# Source setupVars.conf to get the true value of QUERY_LOGGING
|
||||
. ${setupVars}
|
||||
|
||||
if [ ${QUERY_LOGGING} == "false" ]; then
|
||||
echo "::: Disabling Query Logging"
|
||||
pihole logging off
|
||||
else
|
||||
# If it is anything other than false, set it to true
|
||||
change_setting "QUERY_LOGGING" "true"
|
||||
# Set pihole logging on for good measure
|
||||
echo "::: Enabling Query Logging"
|
||||
pihole logging on
|
||||
fi
|
||||
|
||||
echo " ::: Docker start setup complete"
|
||||
|
||||
@@ -1,19 +1,31 @@
|
||||
import pytest
|
||||
import testinfra
|
||||
|
||||
import os
|
||||
import pytest
|
||||
import subprocess
|
||||
import testinfra
|
||||
|
||||
check_output = testinfra.get_backend(
|
||||
"local://"
|
||||
).get_module("Command").check_output
|
||||
local_host = testinfra.get_host('local://')
|
||||
check_output = local_host.check_output
|
||||
|
||||
__version__ = None
|
||||
dotdot = os.path.abspath(os.path.join(os.path.abspath(__file__), os.pardir, os.pardir))
|
||||
with open('{}/VERSION'.format(dotdot), 'r') as v:
|
||||
__version__ = v.read().strip()
|
||||
DEBIAN_VERSION = os.environ.get('DEBIAN_VERSION', 'buster')
|
||||
|
||||
@pytest.fixture()
|
||||
def args_dns():
|
||||
return '--dns 127.0.0.1 --dns 1.1.1.1'
|
||||
def run_and_stream_command_output():
|
||||
def run_and_stream_command_output_inner(command, verbose=False):
|
||||
print("Running", command)
|
||||
build_env = os.environ.copy()
|
||||
build_env['PIHOLE_DOCKER_TAG'] = version
|
||||
build_result = subprocess.Popen(command.split(), env=build_env, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
|
||||
bufsize=1, universal_newlines=True)
|
||||
if verbose:
|
||||
while build_result.poll() is None:
|
||||
for line in build_result.stdout:
|
||||
print(line, end='')
|
||||
build_result.wait()
|
||||
if build_result.returncode != 0:
|
||||
print(" ::: Error running".format(command))
|
||||
print(build_result.stderr)
|
||||
return run_and_stream_command_output_inner
|
||||
|
||||
@pytest.fixture()
|
||||
def args_volumes():
|
||||
@@ -21,11 +33,11 @@ def args_volumes():
|
||||
|
||||
@pytest.fixture()
|
||||
def args_env():
|
||||
return '-e ServerIP="127.0.0.1" -e ServerIPv6="::1"'
|
||||
return '-e ServerIP="127.0.0.1"'
|
||||
|
||||
@pytest.fixture()
|
||||
def args(args_dns, args_volumes, args_env):
|
||||
return "{} {} {}".format(args_dns, args_volumes, args_env)
|
||||
def args(args_volumes, args_env):
|
||||
return "{} {}".format(args_volumes, args_env)
|
||||
|
||||
@pytest.fixture()
|
||||
def test_args():
|
||||
@@ -33,14 +45,14 @@ def test_args():
|
||||
return ''
|
||||
|
||||
def DockerGeneric(request, _test_args, _args, _image, _cmd, _entrypoint):
|
||||
assert 'docker' in check_output('id'), "Are you in the docker group?"
|
||||
#assert 'docker' in check_output('id'), "Are you in the docker group?"
|
||||
# Always appended PYTEST arg to tell pihole we're testing
|
||||
if 'pihole' in _image and 'PYTEST=1' not in _args:
|
||||
_args = '{} -e PYTEST=1'.format(_args)
|
||||
docker_run = 'docker run -d -t {args} {test_args} {entry} {image} {cmd}'\
|
||||
.format(args=_args, test_args=_test_args, entry=_entrypoint, image=_image, cmd=_cmd)
|
||||
# Print a human runable version of the container run command for faster debugging
|
||||
print docker_run.replace('-d -t', '--rm -it').replace('tail -f /dev/null', 'bash')
|
||||
print(docker_run.replace('-d -t', '--rm -it').replace('tail -f /dev/null', 'bash'))
|
||||
docker_id = check_output(docker_run)
|
||||
|
||||
def teardown():
|
||||
@@ -48,24 +60,9 @@ def DockerGeneric(request, _test_args, _args, _image, _cmd, _entrypoint):
|
||||
check_output("docker rm -f {}".format(docker_id))
|
||||
request.addfinalizer(teardown)
|
||||
|
||||
docker_container = testinfra.get_backend("docker://" + docker_id)
|
||||
docker_container = testinfra.backend.get_backend("docker://" + docker_id, sudo=False)
|
||||
docker_container.id = docker_id
|
||||
|
||||
def run_bash(self, command, *args, **kwargs):
|
||||
cmd = self.get_command(command, *args)
|
||||
if self.user is not None:
|
||||
out = self.run_local(
|
||||
"docker exec -u %s %s /bin/bash -c %s",
|
||||
self.user, self.name, cmd)
|
||||
else:
|
||||
out = self.run_local(
|
||||
"docker exec %s /bin/bash -c %s", self.name, cmd)
|
||||
out.command = self.encode(cmd)
|
||||
return out
|
||||
|
||||
funcType = type(docker_container.run)
|
||||
# override run function to use bash not sh
|
||||
docker_container.run = funcType(run_bash, docker_container, testinfra.backend.docker.DockerBackend)
|
||||
return docker_container
|
||||
|
||||
|
||||
@@ -78,7 +75,7 @@ def Docker(request, test_args, args, image, cmd, entrypoint):
|
||||
def DockerPersist(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint, Dig):
|
||||
''' Persistent Docker container for multiple tests, instead of stopping container after one test '''
|
||||
''' Uses DUP'd module scoped fixtures because smaller scoped fixtures won't mix with module scope '''
|
||||
persistent_container = DockerGeneric(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint)
|
||||
persistent_container = DockerGeneric(request, persist_test_args, persist_args, persist_image, persist_cmd, persist_entrypoint)
|
||||
''' attach a dig conatiner for lookups '''
|
||||
persistent_container.dig = Dig(persistent_container.id)
|
||||
return persistent_container
|
||||
@@ -87,17 +84,21 @@ def DockerPersist(request, persist_test_args, persist_args, persist_image, persi
|
||||
def entrypoint():
|
||||
return ''
|
||||
|
||||
@pytest.fixture(params=['amd64', 'armhf', 'aarch64'])
|
||||
@pytest.fixture(params=['amd64', 'armhf', 'arm64', 'armel', 'i386'])
|
||||
def arch(request):
|
||||
return request.param
|
||||
|
||||
@pytest.fixture()
|
||||
def version():
|
||||
return __version__
|
||||
return os.environ.get('GIT_TAG', None)
|
||||
|
||||
@pytest.fixture()
|
||||
def tag(version, arch):
|
||||
return '{}_{}'.format(version, arch)
|
||||
def debian_version():
|
||||
return DEBIAN_VERSION
|
||||
|
||||
@pytest.fixture()
|
||||
def tag(version, arch, debian_version):
|
||||
return '{}-{}-{}'.format(version, arch, debian_version)
|
||||
|
||||
@pytest.fixture
|
||||
def webserver(tag):
|
||||
@@ -120,7 +121,11 @@ def persist_arch():
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_version():
|
||||
return __version__
|
||||
return version
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_debian_version():
|
||||
return DEBIAN_VERSION
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_args_dns():
|
||||
@@ -132,11 +137,11 @@ def persist_args_volumes():
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_args_env():
|
||||
return '-e ServerIP="127.0.0.1" -e ServerIPv6="::1"'
|
||||
return '-e ServerIP="127.0.0.1"'
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_args(persist_args_dns, persist_args_volumes, persist_args_env):
|
||||
return "{} {} {}".format(args_dns, args_volumes, args_env)
|
||||
def persist_args(persist_args_volumes, persist_args_env):
|
||||
return "{} {}".format(persist_args_volumes, persist_args_env)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_test_args():
|
||||
@@ -144,8 +149,8 @@ def persist_test_args():
|
||||
return ''
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_tag(persist_version, persist_arch):
|
||||
return '{}_{}'.format(persist_version, persist_arch)
|
||||
def persist_tag(persist_version, persist_arch, persist_debian_version):
|
||||
return '{}_{}_{}'.format(persist_version, persist_arch, persist_debian_version)
|
||||
|
||||
@pytest.fixture(scope='module')
|
||||
def persist_webserver(persist_tag):
|
||||
@@ -176,7 +181,7 @@ def Slow():
|
||||
while True:
|
||||
try:
|
||||
assert check()
|
||||
except AssertionError, e:
|
||||
except AssertionError as e:
|
||||
if time.time() < timeout_at:
|
||||
time.sleep(1)
|
||||
else:
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
import os
|
||||
import pytest
|
||||
import re
|
||||
@@ -21,27 +22,20 @@ def test_IPv6_not_True_removes_ipv6(Docker, Slow, test_args, expected_ipv6, expe
|
||||
# On overlay2(?) docker sometimes writes to disk are slow enough to break some tests...
|
||||
expected_ipv6_check = lambda: (\
|
||||
IPV6_LINE in Docker.run('grep \'use-ipv6.pl\' {}'.format(WEB_CONFIG)).stdout
|
||||
) == expected_ipv6
|
||||
) == expected_ipv6
|
||||
Slow(expected_ipv6_check)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_args', ['-e "WEB_PORT=999"'])
|
||||
def test_overrides_default_WEB_PORT(Docker, Slow, test_args):
|
||||
''' When a --net=host user sets WEB_PORT to avoid synology's 80 default IPv4 and or IPv6 ports are updated'''
|
||||
CONFIG_LINE = 'server.port\s*=\s*999'
|
||||
CONFIG_LINE = r'server.port\s*=\s*999'
|
||||
WEB_CONFIG = '/etc/lighttpd/lighttpd.conf'
|
||||
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep setup_web_port /start.sh`')
|
||||
assert "Custom WEB_PORT set to 999" in function.stdout
|
||||
assert "INFO: Without proper router DNAT forwarding to 127.0.0.1:999, you may not get any blocked websites on ads" in function.stdout
|
||||
Slow(lambda: re.search(CONFIG_LINE, Docker.run('cat {}'.format(WEB_CONFIG)).stdout) != None)
|
||||
# grep fails to find any of the old address w/o port
|
||||
assert Docker.run('grep -rq "://127.0.0.1/" /var/www/html/').rc == 1
|
||||
assert Docker.run('grep -rq "://pi.hole/" /var/www/html/').rc == 1
|
||||
# Find at least one instance of our changes
|
||||
# upstream repos determines how many and I don't want to keep updating this test
|
||||
assert int(Docker.run('grep -rl "://127.0.0.1:999/" /var/www/html/ | wc -l').stdout) >= 1
|
||||
assert int(Docker.run('grep -rl "://pi.hole:999/" /var/www/html/ | wc -l').stdout) >= 1
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_args,expected_error', [
|
||||
@@ -54,7 +48,41 @@ def test_bad_input_to_WEB_PORT(Docker, test_args, expected_error):
|
||||
assert expected_error in function.stdout
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_args,cache_size', [('-e CUSTOM_CACHE_SIZE="0"', '0'), ('-e CUSTOM_CACHE_SIZE="20000"', '20000')])
|
||||
def test_overrides_default_CUSTOM_CACHE_SIZE(Docker, Slow, test_args, cache_size):
|
||||
''' Changes the cache_size setting to increase or decrease the cache size for dnsmasq'''
|
||||
CONFIG_LINE = r'cache-size\s*=\s*{}'.format(cache_size)
|
||||
DNSMASQ_CONFIG = '/etc/dnsmasq.d/01-pihole.conf'
|
||||
|
||||
function = Docker.run('echo ${CUSTOM_CACHE_SIZE};. ./bash_functions.sh; echo ${CUSTOM_CACHE_SIZE}; eval `grep setup_dnsmasq /start.sh`')
|
||||
assert "Custom CUSTOM_CACHE_SIZE set to {}".format(cache_size) in function.stdout
|
||||
Slow(lambda: re.search(CONFIG_LINE, Docker.run('cat {}'.format(DNSMASQ_CONFIG)).stdout) != None)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('test_args', [
|
||||
'-e CUSTOM_CACHE_SIZE="-1"',
|
||||
'-e CUSTOM_CACHE_SIZE="1,000"',
|
||||
])
|
||||
def test_bad_input_to_CUSTOM_CACHE_SIZE(Docker, Slow, test_args):
|
||||
CONFIG_LINE = r'cache-size\s*=\s*10000'
|
||||
DNSMASQ_CONFIG = '/etc/dnsmasq.d/01-pihole.conf'
|
||||
|
||||
Docker.run('. ./bash_functions.sh; eval `grep setup_dnsmasq /start.sh`')
|
||||
Slow(lambda: re.search(CONFIG_LINE, Docker.run('cat {}'.format(DNSMASQ_CONFIG)).stdout) != None)
|
||||
|
||||
@pytest.mark.parametrize('test_args', [
|
||||
'-e DNSSEC="true" -e CUSTOM_CACHE_SIZE="0"',
|
||||
])
|
||||
def test_dnssec_enabled_with_CUSTOM_CACHE_SIZE(Docker, Slow, test_args):
|
||||
CONFIG_LINE = r'cache-size\s*=\s*10000'
|
||||
DNSMASQ_CONFIG = '/etc/dnsmasq.d/01-pihole.conf'
|
||||
|
||||
Docker.run('. ./bash_functions.sh; eval `grep setup_dnsmasq /start.sh`')
|
||||
Slow(lambda: re.search(CONFIG_LINE, Docker.run('cat {}'.format(DNSMASQ_CONFIG)).stdout) != None)
|
||||
|
||||
|
||||
# DNS Environment Variable behavior in combinations of modified pihole LTE settings
|
||||
@pytest.mark.skip('broke, needs investigation in v5.0 beta')
|
||||
@pytest.mark.parametrize('args_env, expected_stdout, dns1, dns2', [
|
||||
('', 'default DNS', '8.8.8.8', '8.8.4.4' ),
|
||||
('-e DNS1="1.2.3.4"', 'custom DNS', '1.2.3.4', '8.8.4.4' ),
|
||||
@@ -70,11 +98,12 @@ def test_override_default_servers_with_DNS_EnvVars(Docker, Slow, args_env, expec
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep "^setup_dnsmasq " /start.sh`')
|
||||
assert expected_stdout in function.stdout
|
||||
expected_servers = 'server={}\n'.format(dns1) if dns2 == None else 'server={}\nserver={}\n'.format(dns1, dns2)
|
||||
Slow(lambda: expected_servers == Docker.run('grep "^server=" /etc/dnsmasq.d/01-pihole.conf').stdout)
|
||||
Slow(lambda: expected_servers == Docker.run('grep "^server=[^/]" /etc/dnsmasq.d/01-pihole.conf').stdout)
|
||||
|
||||
|
||||
@pytest.mark.skipif(os.environ.get('TRAVIS') == 'true',
|
||||
reason="Can't get setupVar setup to work on travis")
|
||||
#@pytest.mark.skipif(os.environ.get('CI') == 'true',
|
||||
# reason="Can't get setupVar setup to work on travis")
|
||||
@pytest.mark.skip('broke, needs investigation in v5.0 beta')
|
||||
@pytest.mark.parametrize('args_env, dns1, dns2, expected_stdout', [
|
||||
|
||||
('', '9.9.9.1', '9.9.9.2',
|
||||
@@ -110,20 +139,19 @@ def test_DNS_Envs_are_secondary_to_setupvars(Docker, Slow, args_env, expected_st
|
||||
expected_servers = ['server={}'.format(dns1)]
|
||||
if dns2:
|
||||
expected_servers.append('server={}'.format(dns2))
|
||||
Slow(lambda: Docker.run('grep "^server=" /etc/dnsmasq.d/01-pihole.conf').stdout.strip().split('\n') == \
|
||||
Slow(lambda: Docker.run('grep "^server=[^/]" /etc/dnsmasq.d/01-pihole.conf').stdout.strip().split('\n') == \
|
||||
expected_servers)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('args_env, expected_stdout, expected_config_line', [
|
||||
('', 'binding to default interface: eth0', 'interface=eth0' ),
|
||||
('-e INTERFACE="eth0"', 'binding to default interface: eth0', 'interface=eth0' ),
|
||||
('-e INTERFACE="br0"', 'binding to custom interface: br0', 'interface=br0'),
|
||||
@pytest.mark.parametrize('args_env, expected_stdout, expected_config_line', [
|
||||
('', 'binding to default interface: eth0', 'PIHOLE_INTERFACE=eth0'),
|
||||
('-e INTERFACE="br0"', 'binding to custom interface: br0', 'PIHOLE_INTERFACE=br0'),
|
||||
])
|
||||
def test_DNS_interface_override_defaults(Docker, Slow, args_env, expected_stdout, expected_config_line):
|
||||
''' When INTERFACE environment var is passed in, overwrite dnsmasq interface '''
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep "^setup_dnsmasq " /start.sh`')
|
||||
assert expected_stdout in function.stdout
|
||||
Slow(lambda: expected_config_line + '\n' == Docker.run('grep "^interface" /etc/dnsmasq.d/01-pihole.conf').stdout)
|
||||
Slow(lambda: expected_config_line + '\n' == Docker.run('grep "^PIHOLE_INTERFACE" /etc/pihole/setupVars.conf').stdout)
|
||||
|
||||
|
||||
expected_debian_lines = [
|
||||
@@ -183,19 +211,3 @@ def test_webPassword_pre_existing_trumps_all_envs(Docker, args_env, test_args):
|
||||
|
||||
assert '::: Pre existing WEBPASSWORD found' in function.stdout
|
||||
assert Docker.run('grep -q \'{}\' {}'.format('WEBPASSWORD=volumepass', '/etc/pihole/setupVars.conf')).rc == 0
|
||||
|
||||
|
||||
@pytest.mark.parametrize('args_dns, expected_stdout', [
|
||||
# No DNS passed will vary by the host this is ran on, bad idea for a test
|
||||
#('', 'WARNING Misconfigured DNS in /etc/resolv.conf: Primary DNS should be 127.0.0.1'),
|
||||
('--dns 1.1.1.1', 'WARNING Misconfigured DNS in /etc/resolv.conf: Two DNS servers are recommended, 127.0.0.1 and any backup server\n'
|
||||
'WARNING Misconfigured DNS in /etc/resolv.conf: Primary DNS should be 127.0.0.1 (found 1.1.1.1)'),
|
||||
('--dns 127.0.0.1', 'WARNING Misconfigured DNS in /etc/resolv.conf: Two DNS servers are recommended, 127.0.0.1 and any backup server'),
|
||||
('--dns 1.1.1.1 --dns 127.0.0.1', 'WARNING Misconfigured DNS in /etc/resolv.conf: Primary DNS should be 127.0.0.1 (found 1.1.1.1)'),
|
||||
('--dns 127.0.0.1 --dns 1.1.1.1', 'OK: Checks passed for /etc/resolv.conf DNS servers'),
|
||||
])
|
||||
def test_docker_checks_for_resolvconf_misconfiguration(Docker, args_dns, expected_stdout):
|
||||
''' The container checks for misconfigured resolv.conf '''
|
||||
function = Docker.run('. /bash_functions.sh ; eval `grep docker_checks /start.sh`')
|
||||
print function.stdout
|
||||
assert expected_stdout in function.stdout
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
|
||||
import pytest
|
||||
import time
|
||||
''' conftest.py provides the defaults through fixtures '''
|
||||
@@ -44,10 +45,10 @@ def test_indecies_are_present(RunningPiHole):
|
||||
|
||||
def validate_curl(http_rc, expected_http_code, page_contents):
|
||||
if int(http_rc.rc) != 0 or int(http_rc.stdout) != expected_http_code:
|
||||
print 'CURL return code: {}'.format(http_rc.rc)
|
||||
print 'CURL stdout: {}'.format(http_rc.stdout)
|
||||
print 'CURL stderr:{}'.format(http_rc.stderr)
|
||||
print 'CURL file:\n{}\n'.format(page_contents.encode('utf-8'))
|
||||
print('CURL return code: {}'.format(http_rc.rc))
|
||||
print('CURL stdout: {}'.format(http_rc.stdout))
|
||||
print('CURL stderr:{}'.format(http_rc.stderr))
|
||||
print('CURL file:\n{}\n'.format(page_contents.encode('utf-8')))
|
||||
|
||||
|
||||
@pytest.mark.parametrize('addr', [ 'localhost' ] )
|
||||
|
||||
104
test/test_volume_data.sh
Executable file
104
test/test_volume_data.sh
Executable file
@@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
set -ex
|
||||
# Trying something different from the python test, this is a big integration test in bash
|
||||
# Tests multiple volume settings and how they are impacted by the complete startup scripts + restart/re-creation of container
|
||||
# Maybe a bit easier to read the workflow/debug in bash than python for others?
|
||||
# This workflow is VERY similar to python's tests, but in bash so not object-oriented/pytest fixture based
|
||||
|
||||
# Debug can be added anywhere to check current state mid-test
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
if [ $(id -u) != 0 ] ; then
|
||||
sudo=sudo # do not need if root (in docker)
|
||||
fi
|
||||
debug() {
|
||||
$sudo grep -r . "$VOL_PH"
|
||||
$sudo grep -r . "$VOL_DM"
|
||||
}
|
||||
# Cleanup at the end, print debug on fail
|
||||
cleanup() {
|
||||
retcode=$?
|
||||
{ set +x; } 2>/dev/null
|
||||
if [ $retcode != 0 ] ; then
|
||||
printf "${RED}ERROR / FAILURE${NC} - printing all volume info"
|
||||
debug
|
||||
fi
|
||||
docker rm -f $CONTAINER
|
||||
$sudo rm -rf $VOLUMES
|
||||
exit $retcode
|
||||
}
|
||||
trap "cleanup" INT TERM EXIT
|
||||
|
||||
|
||||
# VOLUME TESTS
|
||||
|
||||
# Given...
|
||||
DEBIAN_VERSION="$(DEBIAN_VERSION:-buster)"
|
||||
IMAGE="${1:-pihole:v5.0-amd64}-${DEBIAN_VERSION}" # Default is latest build test image (generic, non release/branch tag)
|
||||
VOLUMES="$(mktemp -d)" # A fresh volume directory
|
||||
VOL_PH="$VOLUMES/pihole"
|
||||
VOL_DM="$VOLUMES/dnsmasq.d"
|
||||
tty -s && TTY='-t' || TTY=''
|
||||
|
||||
echo "Testing $IMAGE with volumes base path $VOLUMES"
|
||||
|
||||
# When
|
||||
# Running stock+empty volumes (no ports to avoid conflicts)
|
||||
CONTAINER="$(
|
||||
docker run -d \
|
||||
-v "$VOL_PH:/etc/pihole" \
|
||||
-v "$VOL_DM:/etc/dnsmasq.d" \
|
||||
-v "/dev/null:/etc/pihole/adlists.list" \
|
||||
--entrypoint='' \
|
||||
$IMAGE \
|
||||
tail -f /dev/null
|
||||
)" # container backgrounded for multipiple operations over time
|
||||
|
||||
EXEC() {
|
||||
local container="$1"
|
||||
# Must quote for complex commands
|
||||
docker exec $TTY $container bash -c "$2"
|
||||
}
|
||||
EXEC $CONTAINER /start.sh # run all the startup scripts
|
||||
|
||||
# Then default are present
|
||||
grep "PIHOLE_DNS_1=8.8.8.8" "$VOL_PH/setupVars.conf"
|
||||
grep "PIHOLE_DNS_2=8.8.4.4" "$VOL_PH/setupVars.conf"
|
||||
grep "IPV4_ADDRESS=0.0.0.0" "$VOL_PH/setupVars.conf"
|
||||
grep -E "WEBPASSWORD=.+" "$VOL_PH/setupVars.conf"
|
||||
|
||||
# Given the settings are manually changed (not good settings, just for testing changes)
|
||||
EXEC $CONTAINER 'pihole -a setdns 127.1.1.1,127.2.2.2,127.3.3.3,127.4.4.4'
|
||||
EXEC $CONTAINER '. /opt/pihole/webpage.sh ; change_setting IPV4_ADDRESS 10.0.0.0'
|
||||
EXEC $CONTAINER 'pihole -a -p login'
|
||||
assert_new_settings() {
|
||||
grep "PIHOLE_DNS_1=127.1.1.1" "$VOL_PH/setupVars.conf"
|
||||
grep "PIHOLE_DNS_2=127.2.2.2" "$VOL_PH/setupVars.conf"
|
||||
grep "PIHOLE_DNS_3=127.3.3.3" "$VOL_PH/setupVars.conf"
|
||||
grep "PIHOLE_DNS_4=127.4.4.4" "$VOL_PH/setupVars.conf"
|
||||
grep "IPV4_ADDRESS=10.0.0.0" "$VOL_PH/setupVars.conf"
|
||||
grep "WEBPASSWORD=6060d59351e8c2f48140f01b2c3f3b61652f396c53a5300ae239ebfbe7d5ff08" "$VOL_PH/setupVars.conf"
|
||||
grep "server=127.1.1.1" $VOL_DM/01-pihole.conf
|
||||
grep "server=127.2.2.2" $VOL_DM/01-pihole.conf
|
||||
}
|
||||
assert_new_settings
|
||||
|
||||
# When Restarting
|
||||
docker restart $CONTAINER
|
||||
# Then settings are still manual changed values
|
||||
assert_new_settings
|
||||
|
||||
# When removing/re-creating the container
|
||||
docker rm -f $CONTAINER
|
||||
CONTAINER="$(
|
||||
docker run -d \
|
||||
-v "$VOL_PH:/etc/pihole" \
|
||||
-v "$VOL_DM:/etc/dnsmasq.d" \
|
||||
-v "/dev/null:/etc/pihole/adlists.list" \
|
||||
--entrypoint='' \
|
||||
$IMAGE \
|
||||
tail -f /dev/null
|
||||
)" # container backgrounded for multipiple operations over time
|
||||
|
||||
# Then settings are still manual changed values
|
||||
assert_new_settings
|
||||
7
test/test_volumes.py
Normal file
7
test/test_volumes.py
Normal file
@@ -0,0 +1,7 @@
|
||||
import pytest
|
||||
|
||||
@pytest.mark.skip('broke, needs further investigation.')
|
||||
def test_volume_shell_script(arch, run_and_stream_command_output):
|
||||
# only one arch should be necessary
|
||||
if arch == 'amd64':
|
||||
run_and_stream_command_output('./test/test_volume_data.sh')
|
||||
25
tox.ini
25
tox.ini
@@ -1,13 +1,18 @@
|
||||
[tox]
|
||||
envlist = py27
|
||||
envlist = py38
|
||||
|
||||
[testenv]
|
||||
whitelist_externals = docker
|
||||
deps = -rrequirements.txt
|
||||
# 2 parallel max b/c race condition with docker fixture (I think?)
|
||||
commands = docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
./Dockerfile.py -v --arch amd64
|
||||
pytest -vv -n auto -k amd64 ./test/
|
||||
./Dockerfile.py -v --arch armhf --arch aarch64
|
||||
pytest -vv -n auto -k armhf ./test/
|
||||
pytest -vv -n auto -k aarch64 ./test/
|
||||
commands = echo "Use ./gh-actions-test.sh instead for now"
|
||||
|
||||
# Currently out of comission post-python3 upgrade due to failed monkey patch of testinfra sh -> bash
|
||||
#[testenv]
|
||||
#whitelist_externals = docker
|
||||
#deps = -rrequirements.txt
|
||||
## 2 parallel max b/c race condition with docker fixture (I think?)
|
||||
#commands = docker run --rm --privileged multiarch/qemu-user-static:register --reset
|
||||
# ./Dockerfile.py -v --arch amd64
|
||||
# pytest -vv -n auto -k amd64 ./test/
|
||||
# ./Dockerfile.py -v --arch armhf --arch arm64 --arch armel
|
||||
# pytest -vv -n auto -k arm64 ./test/
|
||||
# pytest -vv -n auto -k armhf ./test/
|
||||
# pytest -vv -n auto -k armel ./test/
|
||||
|
||||
Reference in New Issue
Block a user