Coverage for bzfs_main/argparse_cli.py: 100%
170 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-07 04:44 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-07 04:44 +0000
1# Copyright 2024 Wolfgang Hoschek AT mac DOT com
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15"""Documentation, definition of input data and ArgumentParser used by the 'bzfs' CLI."""
17from __future__ import (
18 annotations,
19)
20import argparse
21import dataclasses
22import itertools
23from typing import (
24 Final,
25)
27from bzfs_main.argparse_actions import (
28 CheckPercentRange,
29 DatasetPairsAction,
30 DeleteDstSnapshotsExceptPlanAction,
31 FileOrLiteralAction,
32 IncludeSnapshotPlanAction,
33 NewSnapshotFilterGroupAction,
34 NonEmptyStringAction,
35 SafeDirectoryNameAction,
36 SafeFileNameAction,
37 SSHConfigFileNameAction,
38 TimeRangeAndRankRangeAction,
39)
40from bzfs_main.check_range import (
41 CheckRange,
42)
43from bzfs_main.detect import (
44 DISABLE_PRG,
45 DUMMY_DATASET,
46)
47from bzfs_main.period_anchors import (
48 PeriodAnchors,
49)
50from bzfs_main.utils import (
51 ENV_VAR_PREFIX,
52 PROG_NAME,
53 format_dict,
54)
56# constants:
57__version__: Final[str] = "1.14.0.dev0"
58PROG_AUTHOR: Final[str] = "Wolfgang Hoschek"
59EXCLUDE_DATASET_REGEXES_DEFAULT: Final[str] = r"(?:.*/)?[Tt][Ee]?[Mm][Pp][-_]?[0-9]*" # skip tmp datasets by default
60LOG_DIR_DEFAULT: Final[str] = PROG_NAME + "-logs"
61SKIP_ON_ERROR_DEFAULT: Final[str] = "dataset"
62CMP_CHOICES_ITEMS: Final[tuple[str, str, str]] = ("src", "dst", "all")
63ZFS_RECV_O: Final[str] = "zfs_recv_o"
64ZFS_RECV_X: Final[str] = "zfs_recv_x"
65ZFS_RECV_GROUPS: Final[dict[str, str]] = {ZFS_RECV_O: "-o", ZFS_RECV_X: "-x", "zfs_set": ""}
66ZFS_RECV_O_INCLUDE_REGEX_DEFAULT: Final[str] = "|".join(
67 [
68 "aclinherit",
69 "aclmode",
70 "acltype",
71 "atime",
72 "checksum",
73 "compression",
74 "copies",
75 "logbias",
76 "primarycache",
77 "recordsize",
78 "redundant_metadata",
79 "relatime",
80 "secondarycache",
81 "snapdir",
82 "sync",
83 "xattr",
84 ]
85)
88def argument_parser() -> argparse.ArgumentParser:
89 """Returns the CLI parser used by bzfs."""
90 create_src_snapshots_plan_example1: str = str({"test": {"": {"adhoc": 1}}}).replace(" ", "")
91 create_src_snapshots_plan_example2: str = str({"prod": {"us-west-1": {"hourly": 36, "daily": 31}}}).replace(" ", "")
92 delete_dst_snapshots_except_plan_example1: str = str(
93 {
94 "prod": {
95 "onsite": {
96 "secondly": 40,
97 "minutely": 40,
98 "hourly": 36,
99 "daily": 31,
100 "weekly": 12,
101 "monthly": 18,
102 "yearly": 5,
103 }
104 }
105 }
106 ).replace(" ", "")
108 # fmt: off
109 parser: argparse.ArgumentParser = argparse.ArgumentParser(
110 prog=PROG_NAME,
111 allow_abbrev=False,
112 formatter_class=argparse.RawTextHelpFormatter,
113 description=f"""
114*{PROG_NAME} is a backup command line tool that reliably replicates ZFS snapshots from a (local or remote)
115source ZFS dataset (ZFS filesystem or ZFS volume) and its descendant datasets to a (local or remote)
116destination ZFS dataset to make the destination dataset a recursively synchronized copy of the source dataset,
117using zfs send/receive/rollback/destroy and ssh as directed. For example, {PROG_NAME} can be used to
118incrementally replicate all ZFS snapshots since the most recent common snapshot from source to destination,
119in order to help protect against data loss or ransomware.*
121When run for the first time, {PROG_NAME} replicates the dataset and all its snapshots from the source to the
122destination. On subsequent runs, {PROG_NAME} transfers only the data that has changed since the previous run,
123i.e. it incrementally replicates to the destination all intermediate snapshots that have been created on
124the source since the last run. Source ZFS snapshots older than the most recent common snapshot found on the
125destination are auto-skipped.
127Unless {PROG_NAME} is explicitly told to create snapshots on the source, it treats the source as read-only,
128thus the source remains unmodified. With the --dryrun flag, {PROG_NAME} also treats the destination as read-only.
129In normal operation, {PROG_NAME} treats the destination as append-only. Optional CLI flags are available to
130delete destination snapshots and destination datasets as directed, for example to make the destination
131identical to the source if the two have somehow diverged in unforeseen ways. This easily enables
132(re)synchronizing the backup from the production state, as well as restoring the production state from
133backup.
135In the spirit of rsync, {PROG_NAME} supports a variety of powerful include/exclude filters that can be combined to
136select which datasets, snapshots and properties to create, replicate, delete or compare.
138Typically, a `cron` job on the source host runs `{PROG_NAME}` periodically to create new snapshots and prune outdated
139snapshots on the source, whereas another `cron` job on the destination host runs `{PROG_NAME}` periodically to prune
140outdated destination snapshots. Yet another `cron` job runs `{PROG_NAME}` periodically to replicate the recently created
141snapshots from the source to the destination. The frequency of these periodic activities is typically every N milliseconds,
142every second, minute, hour, day, week, month and/or year (or multiples thereof).
144All {PROG_NAME} functions including snapshot creation, replication, deletion, monitoring, comparison, etc. happily work
145with any snapshots in any format, even created or managed by third party ZFS snapshot management tools, including manual
146zfs snapshot/destroy. All functions can also be used independently. That is, if you wish you can use {PROG_NAME} just
147for creating snapshots, or just for replicating, or just for deleting/pruning, or just for monitoring, or just for
148comparing snapshot lists.
150The source 'pushes to' the destination whereas the destination 'pulls from' the source. {PROG_NAME} is installed
151and executed on the 'initiator' host which can be either the host that contains the source dataset (push mode),
152or the destination dataset (pull mode), or both datasets (local mode, no network required, no ssh required),
153or any third-party (even non-ZFS OSX) host as long as that host is able to SSH (via standard 'ssh' OpenSSH CLI) into
154both the source and destination host (pull-push mode). In pull-push mode the source 'zfs send's the data stream
155to the initiator which immediately pipes the stream (without storing anything locally) to the destination
156host that 'zfs receive's it. Pull-push mode means that {PROG_NAME} need not be installed or executed on either
157source or destination host. Only the underlying 'zfs' CLI must be installed on both source and destination host.
158{PROG_NAME} can run as root or non-root user, in the latter case via a) sudo or b) when granted corresponding
159ZFS permissions by administrators via 'zfs allow' delegation mechanism.
161{PROG_NAME} is written in Python and continuously runs a wide set of unit tests and integration tests to ensure
162coverage and compatibility with old and new versions of ZFS on Linux and FreeBSD, on all Python
163versions ≥ 3.9 (including latest stable which is currently python-3.14).
165{PROG_NAME} is a stand-alone program with zero required dependencies, akin to a
166stand-alone shell script or binary executable. It is designed to be able to run in restricted barebones server
167environments. No external Python packages are required; indeed no Python package management at all is required.
168You can just symlink the program wherever you like, for example into /usr/local/bin or similar, and simply run it like
169any stand-alone shell script or binary executable.
171{PROG_NAME} automatically replicates the snapshots of multiple datasets in parallel for best performance.
172Similarly, it quickly deletes (or monitors or compares) snapshots of multiple datasets in parallel. Atomic snapshots can be
173created as frequently as every N milliseconds.
175Optionally, {PROG_NAME} applies bandwidth rate-limiting and progress monitoring (via 'pv' CLI) during 'zfs
176send/receive' data transfers. When run across the network, {PROG_NAME} also transparently inserts lightweight
177data compression (via 'zstd -1' CLI) and efficient data buffering (via 'mbuffer' CLI) into the pipeline
178between network endpoints during 'zfs send/receive' network transfers. If one of these utilities is not
179installed this is auto-detected, and the operation continues reliably without the corresponding auxiliary
180feature.
182# Periodic Jobs with bzfs_jobrunner
184The software also ships with the [bzfs_jobrunner](README_bzfs_jobrunner.md) companion program, which is a convenience
185wrapper around `{PROG_NAME}` that simplifies efficient periodic ZFS snapshot creation, replication, pruning, and monitoring,
186across a fleet of N source hosts and M destination hosts, using a single shared fleet-wide
187[jobconfig](bzfs_tests/bzfs_job_example.py) script. For example, this simplifies the deployment of an efficient
188geo-replicated backup service where each of the M destination hosts is located in a separate geographic region and pulls
189replicas from (the same set of) N source hosts. It also simplifies low latency replication from a primary to a secondary or
190to M read replicas, or backup to removable drives, etc.
192# Quickstart
194* Create adhoc atomic snapshots without a schedule:
196```$ {PROG_NAME} tank1/foo/bar dummy --recursive --skip-replication --create-src-snapshots
197--create-src-snapshots-plan "{create_src_snapshots_plan_example1}"```
199```$ zfs list -t snapshot tank1/foo/bar
201tank1/foo/bar@test_2024-11-06_08:30:05_adhoc```
203* Create periodic atomic snapshots on a schedule, every hour and every day, by launching this from a periodic `cron` job:
205```$ {PROG_NAME} tank1/foo/bar dummy --recursive --skip-replication --create-src-snapshots
206--create-src-snapshots-plan "{create_src_snapshots_plan_example2}"```
208```$ zfs list -t snapshot tank1/foo/bar
210tank1/foo/bar@prod_us-west-1_2024-11-06_08:30:05_daily
212tank1/foo/bar@prod_us-west-1_2024-11-06_08:30:05_hourly```
214Note: A periodic snapshot is created if it is due per the schedule indicated by its suffix (e.g. `_daily` or `_hourly`
215or `_minutely` or `_2secondly` or `_100millisecondly`), or if the --create-src-snapshots-even-if-not-due flag is specified,
216or if the most recent scheduled snapshot is somehow missing. In the latter case {PROG_NAME} immediately creates a snapshot
217(named with the current time, not backdated to the missed time), and then resumes the original schedule. If the suffix is
218`_adhoc` or not a known period then a snapshot is considered non-periodic and is thus created immediately regardless of the
219creation time of any existing snapshot.
221* Replication example in local mode (no network, no ssh), to replicate ZFS dataset tank1/foo/bar to tank2/boo/bar:
223```$ {PROG_NAME} tank1/foo/bar tank2/boo/bar```
225```$ zfs list -t snapshot tank1/foo/bar
227tank1/foo/bar@prod_us-west-1_2024-11-06_08:30:05_daily
229tank1/foo/bar@prod_us-west-1_2024-11-06_08:30:05_hourly```
231```$ zfs list -t snapshot tank2/boo/bar
233tank2/boo/bar@prod_us-west-1_2024-11-06_08:30:05_daily
235tank2/boo/bar@prod_us-west-1_2024-11-06_08:30:05_hourly```
237* Same example in pull mode:
239```$ {PROG_NAME} root@host1.example.com:tank1/foo/bar tank2/boo/bar```
241* Same example in push mode:
243```$ {PROG_NAME} tank1/foo/bar root@host2.example.com:tank2/boo/bar```
245* Same example in pull-push mode:
247```$ {PROG_NAME} root@host1:tank1/foo/bar root@host2:tank2/boo/bar```
249* Example in local mode (no network, no ssh) to recursively replicate ZFS dataset tank1/foo/bar and its descendant
250datasets to tank2/boo/bar:
252```$ {PROG_NAME} tank1/foo/bar tank2/boo/bar --recursive```
254```$ zfs list -t snapshot -r tank1/foo/bar
256tank1/foo/bar@prod_us-west-1_2024-11-06_08:30:05_daily
258tank1/foo/bar@prod_us-west-1_2024-11-06_08:30:05_hourly
260tank1/foo/bar/baz@prod_us-west-1_2024-11-06_08:40:00_daily
262tank1/foo/bar/baz@prod_us-west-1_2024-11-06_08:40:00_hourly```
264```$ zfs list -t snapshot -r tank2/boo/bar
266tank2/boo/bar@prod_us-west-1_2024-11-06_08:30:05_daily
268tank2/boo/bar@prod_us-west-1_2024-11-06_08:30:05_hourly
270tank2/boo/bar/baz@prod_us-west-1_2024-11-06_08:40:00_daily
272tank2/boo/bar/baz@prod_us-west-1_2024-11-06_08:40:00_hourly```
274* Example that makes destination identical to source even if the two have drastically diverged:
276```$ {PROG_NAME} tank1/foo/bar tank2/boo/bar --recursive --force --delete-dst-datasets --delete-dst-snapshots```
278* Replicate all daily snapshots created during the last 7 days, and at the same time ensure that the latest 7 daily
279snapshots (per dataset) are replicated regardless of creation time:
281```$ {PROG_NAME} tank1/foo/bar tank2/boo/bar --recursive --include-snapshot-regex '.*_daily'
282--include-snapshot-times-and-ranks '7 days ago..anytime' 'latest 7'```
284Note: The example above compares the specified times against the standard ZFS 'creation' time property of the snapshots
285(which is a UTC Unix time in integer seconds), rather than against a timestamp that may be part of the snapshot name.
287* Delete all daily snapshots older than 7 days, but ensure that the latest 7 daily snapshots (per dataset) are retained
288regardless of creation time:
290```$ {PROG_NAME} {DUMMY_DATASET} tank2/boo/bar --dryrun --recursive --skip-replication --delete-dst-snapshots
291--include-snapshot-regex '.*_daily' --include-snapshot-times-and-ranks notime 'all except latest 7'
292--include-snapshot-times-and-ranks 'anytime..7 days ago'```
294Note: This also prints how many GB of disk space in total would be freed if the command were to be run for real without
295the --dryrun flag.
297* Delete all daily snapshots older than 7 days, but ensure that the latest 7 daily snapshots (per dataset) are retained
298regardless of creation time. Additionally, only delete a snapshot if no corresponding snapshot or bookmark exists in
299the source dataset (same as above except replace the 'dummy' source with 'tank1/foo/bar'):
301```$ {PROG_NAME} tank1/foo/bar tank2/boo/bar --dryrun --recursive --skip-replication --delete-dst-snapshots
302--include-snapshot-regex '.*_daily' --include-snapshot-times-and-ranks notime 'all except latest 7'
303--include-snapshot-times-and-ranks '7 days ago..anytime'```
305* Delete all daily snapshots older than 7 days, but ensure that the latest 7 daily snapshots (per dataset) are retained
306regardless of creation time. Additionally, only delete a snapshot if no corresponding snapshot exists in the source
307dataset (same as above except append 'no-crosscheck'):
309```$ {PROG_NAME} tank1/foo/bar tank2/boo/bar --dryrun --recursive --skip-replication --delete-dst-snapshots
310--include-snapshot-regex '.*_daily' --include-snapshot-times-and-ranks notime 'all except latest 7'
311--include-snapshot-times-and-ranks 'anytime..7 days ago' --delete-dst-snapshots-no-crosscheck```
313* Delete all daily bookmarks older than 90 days, but retain the latest 200 daily bookmarks (per dataset) regardless
314of creation time:
316```$ {PROG_NAME} {DUMMY_DATASET} tank1/foo/bar --dryrun --recursive --skip-replication --delete-dst-snapshots=bookmarks
317--include-snapshot-regex '.*_daily' --include-snapshot-times-and-ranks notime 'all except latest 200'
318--include-snapshot-times-and-ranks 'anytime..90 days ago'```
320* Delete all tmp datasets within tank2/boo/bar:
322```$ {PROG_NAME} {DUMMY_DATASET} tank2/boo/bar --dryrun --recursive --skip-replication --delete-dst-datasets
323--include-dataset-regex '(.*/)?tmp.*' --exclude-dataset-regex '!.*'```
325* Retain all secondly snapshots that were created less than 40 seconds ago, and ensure that the latest 40
326secondly snapshots (per dataset) are retained regardless of creation time. Same for 40 minutely snapshots, 36 hourly
327snapshots, 31 daily snapshots, 12 weekly snapshots, 18 monthly snapshots, and 5 yearly snapshots:
329```$ {PROG_NAME} {DUMMY_DATASET} tank2/boo/bar --dryrun --recursive --skip-replication --delete-dst-snapshots
330--delete-dst-snapshots-except
331--include-snapshot-regex '.*_secondly' --include-snapshot-times-and-ranks '40 seconds ago..anytime' 'latest 40'
332--new-snapshot-filter-group
333--include-snapshot-regex '.*_minutely' --include-snapshot-times-and-ranks '40 minutes ago..anytime' 'latest 40'
334--new-snapshot-filter-group
335--include-snapshot-regex '.*_hourly' --include-snapshot-times-and-ranks '36 hours ago..anytime' 'latest 36'
336--new-snapshot-filter-group
337--include-snapshot-regex '.*_daily' --include-snapshot-times-and-ranks '31 days ago..anytime' 'latest 31'
338--new-snapshot-filter-group
339--include-snapshot-regex '.*_weekly' --include-snapshot-times-and-ranks '12 weeks ago..anytime' 'latest 12'
340--new-snapshot-filter-group
341--include-snapshot-regex '.*_monthly' --include-snapshot-times-and-ranks '18 months ago..anytime' 'latest 18'
342--new-snapshot-filter-group
343--include-snapshot-regex '.*_yearly' --include-snapshot-times-and-ranks '5 years ago..anytime' 'latest 5'```
345For convenience, the lengthy command line above can be expressed in a more concise way, like so:
347```$ {PROG_NAME} {DUMMY_DATASET} tank2/boo/bar --dryrun --recursive --skip-replication --delete-dst-snapshots
348--delete-dst-snapshots-except-plan "{delete_dst_snapshots_except_plan_example1}"```
350* Compare source and destination dataset trees recursively, for example to check if all recently taken snapshots have
351been successfully replicated by a periodic job. List snapshots only contained in src (tagged with 'src'),
352only contained in dst (tagged with 'dst'), and contained in both src and dst (tagged with 'all'), restricted to hourly
353and daily snapshots taken within the last 7 days, excluding the last 4 hours (to allow for some slack/stragglers),
354excluding temporary datasets:
356```$ {PROG_NAME} tank1/foo/bar tank2/boo/bar --skip-replication --compare-snapshot-lists=src+dst+all --recursive
357--include-snapshot-regex '.*_(hourly|daily)' --include-snapshot-times-and-ranks '7 days ago..4 hours ago'
358--exclude-dataset-regex '(.*/)?tmp.*'```
360If the resulting TSV output file contains zero lines starting with the prefix 'src' and zero lines starting with the
361prefix 'dst' then no source snapshots are missing on the destination, and no destination snapshots are missing
362on the source, indicating that the periodic replication and pruning jobs perform as expected. The TSV output is sorted
363by dataset, and by ZFS creation time within each dataset - the first and last line prefixed with 'all' contains the
364metadata of the oldest and latest common snapshot, respectively. The --compare-snapshot-lists option also directly
365logs various summary stats, such as the metadata of the latest common snapshot, latest snapshots and oldest snapshots,
366as well as the time diff between the latest common snapshot and latest snapshot only in src (and only in dst), as well
367as how many src snapshots and how many GB of data are missing on dst, etc.
369* Example with further options:
371```$ {PROG_NAME} tank1/foo/bar root@host2.example.com:tank2/boo/bar --recursive
372--exclude-snapshot-regex '.*_(secondly|minutely)' --exclude-snapshot-regex 'test_.*'
373--include-snapshot-times-and-ranks '7 days ago..anytime' 'latest 7' --exclude-dataset /tank1/foo/bar/temporary
374--exclude-dataset /tank1/foo/bar/baz/trash --exclude-dataset-regex '(.*/)?private'
375--exclude-dataset-regex '(.*/)?[Tt][Ee]?[Mm][Pp][-_]?[0-9]*'```
376""") # noqa: S608
378 parser.add_argument(
379 "--no-argument-file", action="store_true",
380 # help="Disable support for reading the names of datasets and snapshots from a file.\n\n")
381 help=argparse.SUPPRESS)
382 parser.add_argument(
383 "root_dataset_pairs", nargs="+", action=DatasetPairsAction, metavar="SRC_DATASET DST_DATASET",
384 help="SRC_DATASET: "
385 "Source ZFS dataset (and its descendants) that will be replicated. Can be a ZFS filesystem or ZFS volume. "
386 "Format is [[user@]host:]dataset. The host name can also be an IPv4 address (or an IPv6 address where "
387 "each ':' colon character must be replaced with a '|' pipe character for disambiguation). If the "
388 "host name is '-', the dataset will be on the local host, and the corresponding SSH leg will be omitted. "
389 "The same is true if the host is omitted and the dataset does not contain a ':' colon at the same time. "
390 "Local dataset examples: `tank1/foo/bar`, `tank1`, `-:tank1/foo/bar:baz:boo` "
391 "Remote dataset examples: `host:tank1/foo/bar`, `host.example.com:tank1/foo/bar`, "
392 "`root@host:tank`, `root@host.example.com:tank1/foo/bar`, `user@127.0.0.1:tank1/foo/bar:baz:boo`, "
393 "`user@||1:tank1/foo/bar:baz:boo`. "
394 "The first component of the ZFS dataset name is the ZFS pool name, here `tank1`. "
395 "If the option starts with a `+` prefix then dataset names are read from the UTF-8 text file given "
396 "after the `+` prefix, with each line in the file containing a SRC_DATASET and a DST_DATASET, "
397 "separated by a tab character. The basename must contain the substring 'bzfs_argument_file'. "
398 "Example: `+root_dataset_names_bzfs_argument_file.txt`, "
399 "`+/path/to/root_dataset_names_bzfs_argument_file.txt`\n\n"
400 "DST_DATASET: "
401 "Destination ZFS dataset for replication and deletion. Has same naming format as SRC_DATASET. During "
402 "replication, destination datasets that do not yet exist are created as necessary, along with their "
403 "parent and ancestors.\n\n"
404 f"*Performance Note:* {PROG_NAME} automatically replicates multiple datasets in parallel. It replicates "
405 "snapshots in parallel across datasets and serially within a dataset. All child datasets of a dataset "
406 "may be processed in parallel. For consistency, processing of a dataset only starts after processing of "
407 "all its ancestor datasets has completed. Further, when a thread is ready to start processing another "
408 "dataset, it chooses the next dataset wrt. lexicographical sort order from the datasets that are "
409 "currently available for start of processing. Initially, only the roots of the selected dataset subtrees "
410 "are available for start of processing. The degree of parallelism is configurable with the --threads "
411 "option (see below).\n\n")
412 parser.add_argument(
413 "--recursive", "-r", action="store_true",
414 help="During snapshot creation, replication, deletion and comparison, also consider descendant datasets, i.e. "
415 "datasets within the dataset tree, including children, and children of children, etc.\n\n")
416 parser.add_argument(
417 "--include-dataset", action=FileOrLiteralAction, nargs="+", default=[], metavar="DATASET",
418 help="During snapshot creation, replication, deletion and comparison, select any ZFS dataset (and its descendants) "
419 "that is contained within SRC_DATASET (DST_DATASET in case of deletion) if its dataset name is one of the "
420 "given include dataset names but none of the exclude dataset names. If a dataset is excluded its descendants "
421 "are automatically excluded too, and this decision is never reconsidered even for the descendants because "
422 "exclude takes precedence over include.\n\n"
423 "A dataset name is absolute if the specified dataset is prefixed by `/`, e.g. `/tank/baz/tmp`. "
424 "Otherwise the dataset name is relative wrt. source and destination, e.g. `baz/tmp` if the source "
425 "is `tank`.\n\n"
426 "This option is automatically translated to an --include-dataset-regex (see below) and can be "
427 "specified multiple times.\n\n"
428 "If the option starts with a `+` prefix then dataset names are read from the newline-separated "
429 "UTF-8 text file given after the `+` prefix, one dataset per line inside of the text file. The basename "
430 "must contain the substring 'bzfs_argument_file'.\n\n"
431 "Examples: `/tank/baz/tmp` (absolute), `baz/tmp` (relative), "
432 "`+dataset_names_bzfs_argument_file.txt`, `+/path/to/dataset_names_bzfs_argument_file.txt`\n\n")
433 parser.add_argument(
434 "--exclude-dataset", action=FileOrLiteralAction, nargs="+", default=[], metavar="DATASET",
435 help="Same syntax as --include-dataset (see above) except that the option is automatically translated to an "
436 "--exclude-dataset-regex (see below).\n\n")
437 parser.add_argument(
438 "--include-dataset-regex", action=FileOrLiteralAction, nargs="+", default=[], metavar="REGEX",
439 help="During snapshot creation, replication (and deletion) and comparison, select any ZFS dataset (and its "
440 "descendants) that is contained within SRC_DATASET (DST_DATASET in case of deletion) if its relative dataset "
441 "path (e.g. `baz/tmp`) wrt. SRC_DATASET (DST_DATASET in case of deletion) matches at least one of the given "
442 "include regular expressions but none of the exclude regular expressions. "
443 "If a dataset is excluded its descendants are automatically excluded too, and this decision is never "
444 "reconsidered even for the descendants because exclude takes precedence over include.\n\n"
445 "This option can be specified multiple times. "
446 "A leading `!` character indicates logical negation, i.e. the regex matches if the regex with the "
447 "leading `!` character removed does not match.\n\n"
448 "If the option starts with a `+` prefix then regex names are read from the newline-separated "
449 "UTF-8 text file given after the `+` prefix, one regex per line inside of the text file. The basename "
450 "must contain the substring 'bzfs_argument_file'.\n\n"
451 "Default: `.*` (include all datasets).\n\n"
452 "Examples: `baz/tmp`, `(.*/)?doc[^/]*/(private|confidential).*`, `!public`, "
453 "`+dataset_regexes_bzfs_argument_file.txt`, `+/path/to/dataset_regexes_bzfs_argument_file.txt`\n\n")
454 parser.add_argument(
455 "--exclude-dataset-regex", action=FileOrLiteralAction, nargs="+", default=[], metavar="REGEX",
456 help="Same syntax as --include-dataset-regex (see above) except that the default is "
457 f"`{EXCLUDE_DATASET_REGEXES_DEFAULT}` (exclude tmp datasets). Example: `!.*` (exclude no dataset)\n\n")
458 parser.add_argument(
459 "--exclude-dataset-property", default=None, action=NonEmptyStringAction, metavar="STRING",
460 help="The name of a ZFS dataset user property (optional). If this option is specified, the effective value "
461 "(potentially inherited) of that user property is read via 'zfs list' for each selected source dataset "
462 "to determine whether the dataset will be included or excluded, as follows:\n\n"
463 "a) Value is 'true' or '-' or empty string or the property is missing: Include the dataset.\n\n"
464 "b) Value is 'false': Exclude the dataset and its descendants.\n\n"
465 "c) Value is a comma-separated list of host names (no spaces, for example: "
466 "'store001,store002'): Include the dataset if the host name of "
467 f"the host executing {PROG_NAME} is contained in the list, otherwise exclude the dataset and its "
468 "descendants.\n\n"
469 "If a dataset is excluded its descendants are automatically excluded too, and the property values of the "
470 "descendants are ignored because exclude takes precedence over include.\n\n"
471 "Examples: 'syncoid:sync', 'com.example.eng.project.x:backup'\n\n"
472 "*Note:* The use of --exclude-dataset-property is discouraged for most use cases. It is more flexible, "
473 "more powerful, *and* more efficient to instead use a combination of --include/exclude-dataset-regex "
474 "and/or --include/exclude-dataset to achieve the same or better outcome.\n\n")
475 parser.add_argument(
476 "--include-snapshot-regex", action=FileOrLiteralAction, nargs="+", default=[], metavar="REGEX",
477 help="During replication, deletion and comparison, select any source ZFS snapshot that has a name (i.e. the part "
478 "after the '@') that matches at least one of the given include regular expressions but none of the "
479 "exclude regular expressions. If a snapshot is excluded this decision is never reconsidered because "
480 "exclude takes precedence over include.\n\n"
481 "This option can be specified multiple times. "
482 "A leading `!` character indicates logical negation, i.e. the regex matches if the regex with the "
483 "leading `!` character removed does not match.\n\n"
484 "Default: `.*` (include all snapshots). "
485 "Examples: `test_.*`, `!prod_.*`, `.*_(hourly|frequent)`, `!.*_(weekly|daily)`\n\n"
486 "*Note:* All --include/exclude-snapshot-* CLI option groups are combined into a mini filter pipeline. "
487 "A filter pipeline is executed in the order given on the command line, left to right. For example if "
488 "--include-snapshot-times-and-ranks (see below) is specified on the command line before "
489 "--include/exclude-snapshot-regex, then --include-snapshot-times-and-ranks will be applied before "
490 "--include/exclude-snapshot-regex. The pipeline results would not always be the same if the order were "
491 "reversed. Order matters.\n\n"
492 "*Note:* During replication, bookmarks are always retained aka selected in order to help find common "
493 "snapshots between source and destination.\n\n")
494 parser.add_argument(
495 "--exclude-snapshot-regex", action=FileOrLiteralAction, nargs="+", default=[], metavar="REGEX",
496 help="Same syntax as --include-snapshot-regex (see above) except that the default is to exclude no "
497 "snapshots.\n\n")
498 parser.add_argument(
499 "--include-snapshot-times-and-ranks", action=TimeRangeAndRankRangeAction, nargs="+", default=[],
500 metavar=("TIMERANGE", "RANKRANGE"),
501 help="This option takes as input parameters a time range filter and an optional rank range filter. It "
502 "separately computes the results for each filter and selects the UNION of both results. "
503 "To instead use a pure rank range filter (no UNION), or a pure time range filter (no UNION), simply "
504 "use 'notime' aka '0..0' to indicate an empty time range, or omit the rank range, respectively. "
505 "This option can be specified multiple times.\n\n"
506 "<b>*Replication Example (UNION):* </b>\n\n"
507 "Specify to replicate all daily snapshots created during the last 7 days, "
508 "and at the same time ensure that the latest 7 daily snapshots (per dataset) are replicated regardless "
509 "of creation time, like so: "
510 "`--include-snapshot-regex '.*_daily' --include-snapshot-times-and-ranks '7 days ago..anytime' 'latest 7'`\n\n"
511 "<b>*Deletion Example (no UNION):* </b>\n\n"
512 "Specify to delete all daily snapshots older than 7 days, but ensure that the "
513 "latest 7 daily snapshots (per dataset) are retained regardless of creation time, like so: "
514 "`--include-snapshot-regex '.*_daily' --include-snapshot-times-and-ranks notime 'all except latest 7' "
515 "--include-snapshot-times-and-ranks 'anytime..7 days ago'`"
516 "\n\n"
517 "This helps to safely cope with irregular scenarios where no snapshots were created or received within "
518 "the last 7 days, or where more than 7 daily snapshots were created within the last 7 days. It can also "
519 "help to avoid accidental pruning of the last snapshot that source and destination have in common.\n\n"
520 ""
521 "<b>*TIMERANGE:* </b>\n\n"
522 "The ZFS 'creation' time of a snapshot (and bookmark) must fall into this time range in order for the "
523 "snapshot to be included. The time range consists of a 'start' time, followed by a '..' separator, "
524 "followed by an 'end' time. For example '2024-01-01..2024-04-01', or 'anytime..anytime' aka `*..*` aka all "
525 "times, or 'notime' aka '0..0' aka empty time range. Only snapshots (and bookmarks) in the half-open time "
526 "range [start, end) are included; other snapshots (and bookmarks) are excluded. If a snapshot is excluded "
527 "this decision is never reconsidered because exclude takes precedence over include. Each of the two specified "
528 "times can take any of the following forms:\n\n"
529 "* a) `anytime` aka `*` wildcard; represents negative or positive infinity.\n\n"
530 "* b) a non-negative integer representing a UTC Unix time in seconds. Example: 1728109805\n\n"
531 "* c) an ISO 8601 datetime string with or without timezone. Examples: '2024-10-05', "
532 "'2024-10-05T14:48:55', '2024-10-05T14:48:55+02', '2024-10-05T14:48:55-04:30'. If the datetime string "
533 "does not contain time zone info then it is assumed to be in the local time zone. Timezone string support "
534 "requires Python ≥ 3.11.\n\n"
535 "* d) a duration that indicates how long ago from the current time, using the following syntax: "
536 "a non-negative integer, followed by an optional space, followed by a duration unit that is "
537 "*one* of 'seconds', 'secs', 'minutes', 'mins', 'hours', 'days', 'weeks', 'months', 'years', "
538 "followed by an optional space, followed by the word 'ago'. "
539 "Examples: '0secs ago', '40 mins ago', '36hours ago', '90days ago', '12weeksago'.\n\n"
540 "* Note: This option compares the specified time against the standard ZFS 'creation' time property of the "
541 "snapshot (which is a UTC Unix time in integer seconds), rather than against a timestamp that may be "
542 "part of the snapshot name. You can list the ZFS creation time of snapshots and bookmarks as follows: "
543 "`zfs list -t snapshot,bookmark -o name,creation -s creation -d 1 $SRC_DATASET` (optionally add "
544 "the -p flag to display UTC Unix time in integer seconds).\n\n"
545 "*Note:* During replication, bookmarks are always retained aka selected in order to help find common "
546 "snapshots between source and destination.\n\n"
547 ""
548 "<b>*RANKRANGE:* </b>\n\n"
549 "Specifies to include the N (or N%%) oldest snapshots or latest snapshots, and exclude all other "
550 "snapshots (default: include no snapshots). Snapshots are sorted by creation time (actually, by the "
551 "'createtxg' ZFS property, which serves the same purpose but is more precise). The rank position of a "
552 "snapshot is the zero-based integer position of the snapshot within that sorted list. A rank consists of the "
553 "optional words 'all except' (followed by an optional space), followed by the word 'oldest' or 'latest', "
554 "followed by a non-negative integer, followed by an optional '%%' percent sign. A rank range consists of a "
555 "lower rank, followed by a '..' separator, followed by a higher rank. "
556 "If the optional lower rank is missing it is assumed to be 0. Examples:\n\n"
557 "* 'oldest 10%%' aka 'oldest 0..oldest 10%%' (include the oldest 10%% of all snapshots)\n\n"
558 "* 'latest 10%%' aka 'latest 0..latest 10%%' (include the latest 10%% of all snapshots)\n\n"
559 "* 'all except latest 10%%' aka 'oldest 90%%' aka 'oldest 0..oldest 90%%' (include all snapshots except the "
560 "latest 10%% of all snapshots)\n\n"
561 "* 'oldest 90' aka 'oldest 0..oldest 90' (include the oldest 90 snapshots)\n\n"
562 "* 'latest 90' aka 'latest 0..latest 90' (include the latest 90 snapshots)\n\n"
563 "* 'all except oldest 90' aka 'oldest 90..oldest 100%%' (include all snapshots except the oldest 90 snapshots)"
564 "\n\n"
565 "* 'all except latest 90' aka 'latest 90..latest 100%%' (include all snapshots except the latest 90 snapshots)"
566 "\n\n"
567 "* 'latest 1' aka 'latest 0..latest 1' (include the latest snapshot)\n\n"
568 "* 'all except latest 1' aka 'latest 1..latest 100%%' (include all snapshots except the latest snapshot)\n\n"
569 "* 'oldest 2' aka 'oldest 0..oldest 2' (include the oldest 2 snapshots)\n\n"
570 "* 'all except oldest 2' aka 'oldest 2..oldest 100%%' (include all snapshots except the oldest 2 snapshots)\n\n"
571 "* 'oldest 100%%' aka 'oldest 0..oldest 100%%' (include all snapshots)\n\n"
572 "* 'oldest 0%%' aka 'oldest 0..oldest 0%%' (include no snapshots)\n\n"
573 "* 'oldest 0' aka 'oldest 0..oldest 0' (include no snapshots)\n\n"
574 "*Note:* If multiple RANKRANGEs are specified within a single --include-snapshot-times-and-ranks option, each "
575 "subsequent rank range operates on the output of the preceding rank rage.\n\n"
576 "*Note:* Percentage calculations are not based on the number of snapshots "
577 "contained in the dataset on disk, but rather based on the number of snapshots arriving at the filter. "
578 "For example, if only two daily snapshots arrive at the filter because a prior filter excludes hourly "
579 "snapshots, then 'latest 10' will only include these two daily snapshots, and 'latest 50%%' will only "
580 "include one of these two daily snapshots.\n\n"
581 "*Note:* During replication, bookmarks are always retained aka selected in order to help find common "
582 "snapshots between source and destination. Bookmarks do not count towards N or N%% wrt. rank.\n\n"
583 "*Note:* If a snapshot is excluded this decision is never reconsidered because exclude takes precedence "
584 "over include.\n\n")
586 src_snapshot_plan_example = {
587 "prod": {
588 "onsite": {"secondly": 40, "minutely": 40, "hourly": 36, "daily": 31, "weekly": 12, "monthly": 18, "yearly": 5},
589 "us-west-1": {"secondly": 0, "minutely": 0, "hourly": 36, "daily": 31, "weekly": 12, "monthly": 18, "yearly": 5},
590 "eu-west-1": {"secondly": 0, "minutely": 0, "hourly": 36, "daily": 31, "weekly": 12, "monthly": 18, "yearly": 5},
591 },
592 "test": {
593 "offsite": {"12hourly": 42, "weekly": 12},
594 "onsite": {"100millisecondly": 42},
595 },
596 }
597 parser.add_argument(
598 "--include-snapshot-plan", action=IncludeSnapshotPlanAction, default=None, metavar="DICT_STRING",
599 help="Replication periods to be used if replicating snapshots within the selected destination datasets. "
600 "Has the same format as --create-src-snapshots-plan and --delete-dst-snapshots-except-plan (see below). "
601 "Snapshots that do not match a period will not be replicated. To avoid unexpected surprises, make sure to "
602 "carefully specify ALL snapshot names and periods that shall be replicated, in combination with --dryrun.\n\n"
603 f"Example: `{format_dict(src_snapshot_plan_example)}`. This example will, for the organization 'prod' and the "
604 "intended logical target 'onsite', replicate secondly snapshots that were created less than 40 seconds ago, "
605 "yet replicate the latest 40 secondly snapshots regardless of creation time. Analog for the latest 40 minutely "
606 "snapshots, latest 36 hourly snapshots, etc. "
607 "Note: A zero within a period (e.g. 'hourly': 0) indicates that no snapshots shall be replicated for the given "
608 "period.\n\n"
609 "Note: --include-snapshot-plan is a convenience option that auto-generates a series of the following other "
610 "options: --new-snapshot-filter-group, --include-snapshot-regex, --include-snapshot-times-and-ranks\n\n")
611 parser.add_argument(
612 "--new-snapshot-filter-group", action=NewSnapshotFilterGroupAction, nargs=0,
613 help="Starts a new snapshot filter group containing separate --{include|exclude}-snapshot-* filter options. The "
614 "program separately computes the results for each filter group and selects the UNION of all results. "
615 "This option can be specified multiple times and serves as a separator between groups. Example:\n\n"
616 "Delete all minutely snapshots older than 40 minutes, but ensure that the latest 40 minutely snapshots (per "
617 "dataset) are retained regardless of creation time. Additionally, delete all hourly snapshots older than 36 "
618 "hours, but ensure that the latest 36 hourly snapshots (per dataset) are retained regardless of creation time. "
619 "Additionally, delete all daily snapshots older than 31 days, but ensure that the latest 31 daily snapshots "
620 "(per dataset) are retained regardless of creation time: "
621 f"`{PROG_NAME} {DUMMY_DATASET} tank2/boo/bar --dryrun --recursive --skip-replication --delete-dst-snapshots "
622 "--include-snapshot-regex '.*_minutely' --include-snapshot-times-and-ranks notime 'all except latest 40' "
623 "--include-snapshot-times-and-ranks 'anytime..40 minutes ago' "
624 "--new-snapshot-filter-group "
625 "--include-snapshot-regex '.*_hourly' --include-snapshot-times-and-ranks notime 'all except latest 36' "
626 "--include-snapshot-times-and-ranks 'anytime..36 hours ago' "
627 "--new-snapshot-filter-group "
628 "--include-snapshot-regex '.*_daily' --include-snapshot-times-and-ranks notime 'all except latest 31' "
629 "--include-snapshot-times-and-ranks 'anytime..31 days ago'`\n\n")
630 parser.add_argument(
631 "--create-src-snapshots", action="store_true",
632 help="Do nothing if the --create-src-snapshots flag is missing. Otherwise, before the replication step (see below), "
633 "atomically create new snapshots of the source datasets selected via --{include|exclude}-dataset* policy. "
634 "The names of the snapshots can be configured via --create-src-snapshots-* suboptions (see below). "
635 "To create snapshots only, without any other processing such as replication, etc, consider using this flag "
636 "together with the --skip-replication flag.\n\n"
637 "A periodic snapshot is created if it is due per the schedule indicated by --create-src-snapshots-plan "
638 "(for example '_daily' or '_hourly' or _'10minutely' or '_2secondly' or '_100millisecondly'), or if the "
639 "--create-src-snapshots-even-if-not-due flag is specified, or if the most recent scheduled snapshot "
640 f"is somehow missing. In the latter case {PROG_NAME} immediately creates a snapshot (tagged with the current "
641 "time, not backdated to the missed time), and then resumes the original schedule.\n\n"
642 "If the snapshot suffix is '_adhoc' or not a known period then a snapshot is considered "
643 "non-periodic and is thus created immediately regardless of the creation time of any existing snapshot.\n\n"
644 "The implementation attempts to fit as many datasets as possible into a single (atomic) 'zfs snapshot' command "
645 "line, using lexicographical sort order, and using 'zfs snapshot -r' to the extent that this is compatible "
646 "with the actual results of the schedule and the actual results of the --{include|exclude}-dataset* pruning "
647 "policy. The snapshots of all datasets that fit "
648 "within the same single 'zfs snapshot' CLI invocation will be taken within the same ZFS transaction group, and "
649 "correspondingly have identical 'createtxg' ZFS property (but not necessarily identical 'creation' ZFS time "
650 "property as ZFS actually provides no such guarantee), and thus be consistent. Dataset names that can't fit "
651 "into a single command line are spread over multiple command line invocations, respecting the limits that the "
652 "operating system places on the maximum length of a single command line, per `getconf ARG_MAX`.\n\n"
653 f"Note: All {PROG_NAME} functions including snapshot creation, replication, deletion, monitoring, comparison, "
654 "etc. happily work with any snapshots in any format, even created or managed by third party ZFS snapshot "
655 "management tools, including manual zfs snapshot/destroy.\n\n")
656 parser.add_argument(
657 "--create-src-snapshots-plan", default=None, type=str, metavar="DICT_STRING",
658 help="Creation periods that specify a schedule for when new snapshots shall be created on src within the selected "
659 "datasets. Has the same format as --delete-dst-snapshots-except-plan.\n\n"
660 f"Example: `{format_dict(src_snapshot_plan_example)}`. This example will, for the organization 'prod' and "
661 "the intended logical target 'onsite', create 'secondly' snapshots every second, 'minutely' snapshots every "
662 "minute, hourly snapshots every hour, and so on. "
663 "It will also create snapshots for the targets 'us-west-1' and 'eu-west-1' within the 'prod' organization. "
664 "In addition, it will create snapshots every 12 hours and every week for the 'test' organization, "
665 "and name them as being intended for the 'offsite' replication target. Analog for snapshots that are taken "
666 "every 100 milliseconds within the 'test' organization.\n\n"
667 "The example creates ZFS snapshots with names like "
668 "`prod_onsite_<timestamp>_secondly`, `prod_onsite_<timestamp>_minutely`, "
669 "`prod_us-west-1_<timestamp>_hourly`, `prod_us-west-1_<timestamp>_daily`, "
670 "`prod_eu-west-1_<timestamp>_hourly`, `prod_eu-west-1_<timestamp>_daily`, "
671 "`test_offsite_<timestamp>_12hourly`, `test_offsite_<timestamp>_weekly`, and so on.\n\n"
672 "Note: A period name that is missing indicates that no snapshots shall be created for the given period.\n\n"
673 "The period name can contain an optional positive integer immediately preceding the time period unit, for "
674 "example `_2secondly` or `_10minutely` or `_100millisecondly` to indicate that snapshots are taken every 2 "
675 "seconds, or every 10 minutes, or every 100 milliseconds, respectively.\n\n")
677 def argparser_escape(text: str) -> str:
678 return text.replace("%", "%%")
680 parser.add_argument(
681 "--create-src-snapshots-timeformat", default="%Y-%m-%d_%H:%M:%S", metavar="STRFTIME_SPEC",
682 help="Default is `%(default)s`. For the strftime format, see "
683 "https://docs.python.org/3.11/library/datetime.html#strftime-strptime-behavior. "
684 f"Examples: `{argparser_escape('%Y-%m-%d_%H:%M:%S.%f')}` (adds microsecond resolution), "
685 f"`{argparser_escape('%Y-%m-%d_%H:%M:%S%z')}` (adds timezone offset), "
686 f"`{argparser_escape('%Y-%m-%dT%H-%M-%S')}` (no colons).\n\n"
687 "The name of the snapshot created on the src is `$org_$target_strftime(--create-src-snapshots-time*)_$period`. "
688 "Example: `tank/foo@prod_us-west-1_2024-09-03_12:26:15_daily`\n\n")
689 parser.add_argument(
690 "--create-src-snapshots-timezone", default="", type=str, metavar="TZ_SPEC",
691 help=f"Default is the local timezone of the system running {PROG_NAME}. When creating a new snapshot on the source, "
692 "fetch the current time in the specified timezone, and feed that time, and the value of "
693 "--create-src-snapshots-timeformat, into the standard strftime() function to generate the timestamp portion "
694 "of the snapshot name. The TZ_SPEC input parameter is of the form 'UTC' or '+HHMM' or '-HHMM' for fixed UTC "
695 "offsets, or an IANA TZ identifier for auto-adjustment to daylight savings time, or the empty string to use "
696 "the local timezone, for example '', 'UTC', '+0000', '+0530', '-0400', 'America/Los_Angeles', 'Europe/Vienna'. "
697 "For a list of valid IANA TZ identifiers see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones#List"
698 "\n\nTo change the timezone not only for snapshot name creation, but in all respects for the entire program, "
699 "use the standard 'TZ' Unix environment variable, like so: `export TZ=UTC`.\n\n")
700 parser.add_argument(
701 "--create-src-snapshots-even-if-not-due", action="store_true",
702 help="Take snapshots immediately regardless of the creation time of any existing snapshot, even if snapshots "
703 "are periodic and not actually due per the schedule.\n\n")
704 parser.add_argument(
705 "--create-src-snapshots-enable-snapshots-changed-cache", action="store_true",
706 help=argparse.SUPPRESS) # deprecated; was replaced by --cache-snapshots
707 parser.add_argument(
708 "--zfs-send-program-opts", type=str, default="--raw --compressed", metavar="STRING",
709 help="Parameters to fine-tune 'zfs send' behaviour (optional); will be passed into 'zfs send' CLI. "
710 "The value is split on runs of one or more whitespace characters. "
711 "Default is '%(default)s'. To run `zfs send` without options, specify the empty "
712 "string: `--zfs-send-program-opts=''`. "
713 "See https://openzfs.github.io/openzfs-docs/man/master/8/zfs-send.8.html "
714 "and https://github.com/openzfs/zfs/issues/13024\n\n")
715 parser.add_argument(
716 "--zfs-recv-program-opts", type=str, default="-u", metavar="STRING",
717 help="Parameters to fine-tune 'zfs receive' behaviour (optional); will be passed into 'zfs receive' CLI. "
718 "The value is split on runs of one or more whitespace characters. "
719 "Default is '%(default)s'. To run `zfs receive` without options, specify the empty "
720 "string: `--zfs-recv-program-opts=''`. "
721 "Example: '-u -o canmount=noauto -o readonly=on -x keylocation -x keyformat -x encryption'. "
722 "See https://openzfs.github.io/openzfs-docs/man/master/8/zfs-receive.8.html "
723 "and https://openzfs.github.io/openzfs-docs/man/master/7/zfsprops.7.html\n\n")
724 parser.add_argument(
725 "--zfs-recv-program-opt", action="append", default=[], metavar="STRING",
726 help="Parameter to fine-tune 'zfs receive' behaviour (optional); will be passed into 'zfs receive' CLI. "
727 "The value can contain spaces and is not split. This option can be specified multiple times. Example: `"
728 "--zfs-recv-program-opt=-o "
729 "--zfs-recv-program-opt='org.zfsbootmenu:commandline=ro debug zswap.enabled=1'`\n\n")
730 parser.add_argument(
731 "--preserve-properties", nargs="+", default=[], metavar="STRING",
732 help="On replication, preserve the current value of ZFS properties with the given names on the destination "
733 "datasets. The destination ignores the property value it 'zfs receive's from the source if the property name "
734 "matches one of the given blacklist values. This prevents a compromised or untrusted source from overwriting "
735 "security-critical properties on the destination. The default is to preserve none, i.e. an empty blacklist.\n\n"
736 "Example blacklist that protects against dangerous overwrites: "
737 "mountpoint overlay sharenfs sharesmb exec setuid devices encryption keyformat keylocation volsize\n\n"
738 "See https://openzfs.github.io/openzfs-docs/man/master/7/zfsprops.7.html and "
739 "https://openzfs.github.io/openzfs-docs/man/master/8/zfs-receive.8.html#x\n\n"
740 "Note: --preserve-properties uses the 'zfs recv -x' option and thus requires either OpenZFS ≥ 2.2.0 "
741 "(see https://github.com/openzfs/zfs/commit/b0269cd8ced242e66afc4fa856d62be29bb5a4ff), or that "
742 "'zfs send --props' is not used.\n\n")
743 parser.add_argument(
744 "--force-rollback-to-latest-snapshot", action="store_true",
745 help="Before replication, rollback the destination dataset to its most recent destination snapshot (if there "
746 "is one), via 'zfs rollback', just in case the destination dataset was modified since its most recent "
747 "snapshot. This is much less invasive than the other --force* options (see below).\n\n")
748 parser.add_argument(
749 "--force-rollback-to-latest-common-snapshot", action="store_true",
750 help="Before replication, delete destination ZFS snapshots that are more recent than the most recent common "
751 "snapshot ('conflicting snapshots'), via 'zfs rollback'. Do no rollback if no common snapshot exists.\n\n")
752 parser.add_argument(
753 "--force", action="store_true",
754 help="Same as --force-rollback-to-latest-common-snapshot (see above), except that additionally, if no common "
755 "snapshot exists, then delete all destination snapshots before starting replication, and proceed "
756 "without aborting. Without the --force* flags, the destination dataset is treated as append-only, hence "
757 "no destination snapshot that already exists is deleted, and instead the operation is aborted with an "
758 "error when encountering a conflicting snapshot.\n\n"
759 "Analogy: --force-rollback-to-latest-snapshot is a tiny hammer, whereas "
760 "--force-rollback-to-latest-common-snapshot is a medium sized hammer, --force is a large hammer, and "
761 "--force-destroy-dependents is a very large hammer. "
762 "Consider using the smallest hammer that can fix the problem. No hammer is ever used by default.\n\n")
763 parser.add_argument(
764 "--force-destroy-dependents", action="store_true",
765 help="On destination, --force and --force-rollback-to-latest-common-snapshot and --delete-* will add the "
766 "'-R' flag to their use of 'zfs rollback' and 'zfs destroy', causing them to delete dependents such as "
767 "clones and bookmarks. This can be very destructive and is rarely advisable.\n\n")
768 parser.add_argument(
769 "--force-hard", action="store_true", # deprecated; was renamed to --force-destroy-dependents
770 help=argparse.SUPPRESS)
771 parser.add_argument(
772 "--force-unmount", action="store_true",
773 help="On destination, --force and --force-rollback-to-latest-common-snapshot will add the '-f' flag to their "
774 "use of 'zfs rollback' and 'zfs destroy'.\n\n")
775 parser.add_argument(
776 "--force-once", "--f1", action="store_true",
777 help="Use the --force option or --force-rollback-to-latest-common-snapshot option at most once to resolve a "
778 "conflict, then abort with an error on any subsequent conflict. This helps to interactively resolve "
779 "conflicts, one conflict at a time.\n\n")
780 parser.add_argument(
781 "--skip-parent", action="store_true",
782 help="During replication and deletion, skip processing of the SRC_DATASET and DST_DATASET and only process "
783 "their descendant datasets, i.e. children, and children of children, etc (with --recursive). No dataset "
784 "is processed unless --recursive is also specified. "
785 f"Analogy: `{PROG_NAME} --recursive --skip-parent src dst` is akin to Unix `cp -r src/* dst/` whereas "
786 f" `{PROG_NAME} --recursive --skip-parent --skip-replication --delete-dst-datasets dummy dst` is akin to "
787 "Unix `rm -r dst/*`\n\n")
788 parser.add_argument(
789 "--skip-missing-snapshots", choices=["fail", "dataset", "continue"], default="dataset", nargs="?",
790 help="During replication, handle source datasets that select no snapshots (and no relevant bookmarks) "
791 "as follows:\n\n"
792 "a) 'fail': Abort with an error.\n\n"
793 "b) 'dataset' (default): Skip the source dataset with a warning. Skip descendant datasets if "
794 "--recursive and destination dataset does not exist. Otherwise skip to the next dataset.\n\n"
795 "c) 'continue': Skip nothing. If destination snapshots exist, delete them (with --force) or abort "
796 "with an error (without --force). If there is no such abort, continue processing with the next dataset. "
797 "Eventually create empty destination dataset and ancestors if they do not yet exist and source dataset "
798 "has at least one descendant that selects at least one snapshot.\n\n")
799 parser.add_argument(
800 "--retries", type=int, min=0, default=2, action=CheckRange, metavar="INT",
801 help="The maximum number of times a retryable replication or deletion step shall be retried if it fails, for "
802 "example because of network hiccups (default: %(default)s, min: %(min)s). "
803 "Also consider this option if a periodic pruning script may simultaneously delete a dataset or "
804 f"snapshot or bookmark while {PROG_NAME} is running and attempting to access it.\n\n")
805 parser.add_argument(
806 "--retry-min-sleep-secs", type=float, min=0, default=0, action=CheckRange, metavar="FLOAT",
807 help="The minimum duration to sleep between retries (default: %(default)s).\n\n")
808 parser.add_argument(
809 "--retry-initial-max-sleep-secs", type=float, min=0, default=0.125, action=CheckRange, metavar="FLOAT",
810 help="The initial maximum duration to sleep between retries (default: %(default)s).\n\n")
811 parser.add_argument(
812 "--retry-max-sleep-secs", type=float, min=0, default=5 * 60, action=CheckRange, metavar="FLOAT",
813 help="The maximum duration to sleep between retries initially starts with --retry-initial-max-sleep-secs "
814 "(see above), and doubles on each retry, up to the final maximum of --retry-max-sleep-secs "
815 "(default: %(default)s). On each retry a random sleep time in the [--retry-min-sleep-secs, current max] range "
816 "is picked. In a nutshell: retry-min-sleep-secs ≤ retry-initial-max-sleep-secs ≤ retry-max-sleep-secs. "
817 "The timer resets after each operation.\n\n")
818 parser.add_argument(
819 "--retry-max-elapsed-secs", type=float, min=0, default=60 * 60, action=CheckRange, metavar="FLOAT",
820 help="A single operation (e.g. 'zfs send/receive' of the current dataset, or deletion of a list of snapshots "
821 "within the current dataset) will not be retried (or not retried anymore) once this much time has elapsed "
822 "since the initial start of the operation, including retries (default: %(default)s). "
823 "The timer resets after each operation completes or retries exhaust, such that subsequently failing "
824 "operations can again be retried.\n\n")
825 parser.add_argument(
826 "--skip-on-error", choices=["fail", "tree", "dataset"], default=SKIP_ON_ERROR_DEFAULT,
827 help="During replication and deletion, if an error is not retryable, or --retries has been exhausted, "
828 "or --skip-missing-snapshots raises an error, proceed as follows:\n\n"
829 "a) 'fail': Abort the program with an error. This mode is ideal for testing, clear "
830 "error reporting, and situations where consistency trumps availability.\n\n"
831 "b) 'tree': Log the error, skip the dataset tree rooted at the dataset for which the error "
832 "occurred, and continue processing the next (sibling) dataset tree. "
833 "Example: Assume datasets tank/user1/foo and tank/user2/bar and an error occurs while processing "
834 "tank/user1. In this case processing skips tank/user1/foo and proceeds with tank/user2.\n\n"
835 "c) 'dataset' (default): Same as 'tree' except if the destination dataset already exists, skip to "
836 "the next dataset instead.\n\n"
837 "Example: Assume datasets tank/user1/foo and tank/user2/bar and an error occurs while "
838 "processing tank/user1. In this case processing skips tank/user1 and proceeds with tank/user1/foo "
839 "if the destination already contains tank/user1. Otherwise processing continues with tank/user2. "
840 "This mode is for production use cases that require timely forward progress even in the presence of "
841 "partial failures. For example, assume the job is to backup the home directories or virtual machines "
842 "of thousands of users across an organization. Even if replication of some of the datasets for some "
843 "users fails due too conflicts, busy datasets, etc, the replication job will continue for the "
844 "remaining datasets and the remaining users.\n\n")
845 parser.add_argument(
846 "--skip-replication", action="store_true",
847 help="Skip replication step (see above) and proceed to the optional --delete-dst-datasets step "
848 "immediately (see below).\n\n")
849 parser.add_argument(
850 "--delete-dst-datasets", action="store_true",
851 help="Do nothing if the --delete-dst-datasets option is missing. Otherwise, after successful replication "
852 "step, if any, delete existing destination datasets that are selected via --{include|exclude}-dataset* "
853 "policy yet do not exist within SRC_DATASET (which can be an empty dataset, such as the hardcoded virtual "
854 f"dataset named '{DUMMY_DATASET}'!). Do not recurse without --recursive. With --recursive, never delete "
855 "non-selected dataset subtrees or their ancestors.\n\n"
856 "For example, if the destination contains datasets h1,h2,h3,d1 whereas source only contains h3, "
857 "and the include/exclude policy selects h1,h2,h3,d1, then delete datasets h1,h2,d1 on "
858 "the destination to make it 'the same'. On the other hand, if the include/exclude policy "
859 "only selects h1,h2,h3 then only delete datasets h1,h2 on the destination to make it 'the same'.\n\n"
860 "Example to delete all tmp datasets within tank2/boo/bar: "
861 f"`{PROG_NAME} {DUMMY_DATASET} tank2/boo/bar --dryrun --skip-replication --recursive "
862 "--delete-dst-datasets --include-dataset-regex '(.*/)?tmp.*' --exclude-dataset-regex '!.*'`\n\n")
863 parser.add_argument(
864 "--delete-dst-snapshots", choices=["snapshots", "bookmarks"], default=None, const="snapshots", nargs="?",
865 help="Do nothing if the --delete-dst-snapshots option is missing. Otherwise, after successful "
866 "replication, and successful --delete-dst-datasets step, if any, delete existing destination snapshots "
867 "whose GUID does not exist within the source dataset (which can be an empty dummy dataset!) if the "
868 "destination snapshots are selected by the --include/exclude-snapshot-* policy, and the destination "
869 "dataset is selected via --{include|exclude}-dataset* policy. Does not recurse without --recursive.\n\n"
870 "For example, if the destination dataset contains snapshots h1,h2,h3,d1 (h=hourly, d=daily) whereas "
871 "the source dataset only contains snapshot h3, and the include/exclude policy selects "
872 "h1,h2,h3,d1, then delete snapshots h1,h2,d1 on the destination dataset to make it 'the same'. "
873 "On the other hand, if the include/exclude policy only selects snapshots h1,h2,h3 then only "
874 "delete snapshots h1,h2 on the destination dataset to make it 'the same'.\n\n"
875 "*Note:* To delete snapshots regardless, consider using --delete-dst-snapshots in combination with a "
876 f"source that is an empty dataset, such as the hardcoded virtual dataset named '{DUMMY_DATASET}', like so:"
877 f" `{PROG_NAME} {DUMMY_DATASET} tank2/boo/bar --dryrun --skip-replication --delete-dst-snapshots "
878 "--include-snapshot-regex '.*_daily' --recursive`\n\n"
879 "*Note:* Use --delete-dst-snapshots=bookmarks to delete bookmarks instead of snapshots, in which "
880 "case no snapshots are selected and the --{include|exclude}-snapshot-* filter options treat bookmarks as "
881 "snapshots wrt. selecting.\n\n"
882 "*Performance Note:* --delete-dst-snapshots operates on multiple datasets in parallel (and serially "
883 f"within a dataset), using the same dataset order as {PROG_NAME} replication. "
884 "The degree of parallelism is configurable with the --threads option (see below).\n\n")
885 parser.add_argument(
886 "--delete-dst-snapshots-no-crosscheck", action="store_true",
887 help="This flag indicates that --delete-dst-snapshots=snapshots shall check the source dataset only for "
888 "a snapshot with the same GUID, and ignore whether a bookmark with the same GUID is present in the "
889 "source dataset. Similarly, it also indicates that --delete-dst-snapshots=bookmarks shall check the "
890 "source dataset only for a bookmark with the same GUID, and ignore whether a snapshot with the same GUID "
891 "is present in the source dataset.\n\n")
892 parser.add_argument(
893 "--delete-dst-snapshots-except", action="store_true",
894 help="This flag indicates that the --include/exclude-snapshot-* options shall have inverted semantics for the "
895 "--delete-dst-snapshots option, thus deleting all snapshots except for the selected snapshots (within the "
896 "specified datasets), instead of deleting all selected snapshots (within the specified datasets). In other "
897 "words, this flag enables to specify which snapshots to retain instead of which snapshots to delete.\n\n"
898 "*Synchronization vs. Backup*: When a real (non-dummy) source dataset is specified in combination with "
899 "--delete-dst-snapshots-except, then any destination snapshot retained by the rules above is actually only "
900 "retained if it also exists in the source dataset - __all other destination snapshots are deleted__. This is "
901 "great for synchronization use cases but should __NEVER BE USED FOR LONG-TERM ARCHIVAL__. Long-term archival "
902 "use cases should instead specify the `dummy` source dataset as they require an independent retention policy "
903 "that is not tied to the current contents of the source dataset.\n\n")
904 parser.add_argument(
905 "--delete-dst-snapshots-except-plan", action=DeleteDstSnapshotsExceptPlanAction, default=None, metavar="DICT_STRING",
906 help="Retention periods to be used if pruning snapshots or bookmarks within the selected destination datasets via "
907 "--delete-dst-snapshots. Has the same format as --create-src-snapshots-plan. "
908 "Snapshots (--delete-dst-snapshots=snapshots) or bookmarks (with --delete-dst-snapshots=bookmarks) that "
909 "do not match a period will be deleted. To avoid unexpected surprises, make sure to carefully specify ALL "
910 "snapshot names and periods that shall be retained, in combination with --dryrun.\n\n"
911 f"Example: `{format_dict(src_snapshot_plan_example)}`. This example will, for the organization 'prod' and "
912 "the intended logical target 'onsite', retain secondly snapshots that were created less than 40 seconds ago, "
913 "yet retain the latest 40 secondly snapshots regardless of creation time. Analog for the latest 40 minutely "
914 "snapshots, latest 36 hourly snapshots, etc. "
915 "It will also retain snapshots for the targets 'us-west-1' and 'eu-west-1' within the 'prod' organization. "
916 "In addition, within the 'test' organization, it will retain snapshots that are created every 12 hours and "
917 "every week as specified, and name them as being intended for the 'offsite' replication target. Analog for "
918 "snapshots that are taken every 100 milliseconds within the 'test' organization. "
919 "All other snapshots within the selected datasets will be deleted - you've been warned!\n\n"
920 "The example scans the selected ZFS datasets for snapshots with names like "
921 "`prod_onsite_<timestamp>_secondly`, `prod_onsite_<timestamp>_minutely`, "
922 "`prod_us-west-1_<timestamp>_hourly`, `prod_us-west-1_<timestamp>_daily`, "
923 "`prod_eu-west-1_<timestamp>_hourly`, `prod_eu-west-1_<timestamp>_daily`, "
924 "`test_offsite_<timestamp>_12hourly`, `test_offsite_<timestamp>_weekly`, and so on, and deletes all snapshots "
925 "that do not match a retention rule.\n\n"
926 "Note: A zero within a period (e.g. 'hourly': 0) indicates that no snapshots shall be retained for the given "
927 "period.\n\n"
928 "Note: --delete-dst-snapshots-except-plan is a convenience option that auto-generates a series of the "
929 "following other options: --delete-dst-snapshots-except, "
930 "--new-snapshot-filter-group, --include-snapshot-regex, --include-snapshot-times-and-ranks\n\n")
931 parser.add_argument(
932 "--delete-empty-dst-datasets", choices=["snapshots", "snapshots+bookmarks"], default=None,
933 const="snapshots+bookmarks", nargs="?",
934 help="Do nothing if the --delete-empty-dst-datasets option is missing or --recursive is missing. Otherwise, "
935 "after successful replication "
936 "step and successful --delete-dst-datasets and successful --delete-dst-snapshots steps, if any, "
937 "delete any selected destination dataset that has no snapshot and no bookmark if all descendants of "
938 "that destination dataset are also selected and do not have a snapshot or bookmark either "
939 "(again, only if the existing destination dataset is selected via --{include|exclude}-dataset* policy). "
940 "Never delete non-selected dataset subtrees or their ancestors.\n\n"
941 "For example, if the destination contains datasets h1,d1, and the include/exclude policy "
942 "selects h1,d1, then check if h1,d1 can be deleted. "
943 "On the other hand, if the include/exclude policy only selects h1 then only check if h1 can be deleted.\n\n"
944 "*Note:* Use --delete-empty-dst-datasets=snapshots to delete snapshot-less datasets even if they still "
945 "contain bookmarks.\n\n")
946 monitor_snapshot_plan_example = {
947 "prod": {
948 "onsite": {
949 "100millisecondly": {"latest": {"warning": "300 milliseconds", "critical": "2 seconds"}},
950 "secondly": {"latest": {"warning": "2 seconds", "critical": "14 seconds"}},
951 "minutely": {"latest": {"warning": "30 seconds", "critical": "300 seconds"}},
952 "hourly": {"latest": {"warning": "30 minutes", "critical": "300 minutes"}},
953 "daily": {"latest": {"warning": "4 hours", "critical": "8 hours"}},
954 "weekly": {"latest": {"warning": "2 days", "critical": "8 days"}},
955 "monthly": {"latest": {"warning": "2 days", "critical": "8 days"}},
956 "yearly": {"latest": {"warning": "5 days", "critical": "14 days"}},
957 "10minutely": {"latest": {"warning": "0 minutes", "critical": "0 minutes"}},
958 },
959 "": {
960 "daily": {"latest": {"warning": "4 hours", "critical": "8 hours"}},
961 },
962 },
963 }
964 parser.add_argument(
965 "--monitor-snapshots", default="{}", type=str, metavar="DICT_STRING",
966 help="Do nothing if the --monitor-snapshots flag is missing. Otherwise, after all other steps, "
967 "alert the user if the ZFS 'creation' time property of the latest snapshot for any specified snapshot name "
968 "pattern within the selected datasets is too old wrt. the specified age limit. The purpose is to check if "
969 "snapshots are successfully taken on schedule, successfully replicated on schedule, and successfully pruned on "
970 "schedule. Process exit code is 0, 1, 2 on OK, WARNING, CRITICAL, respectively. "
971 f"Example DICT_STRING: `{format_dict(monitor_snapshot_plan_example)}`. "
972 "This example alerts the user if the latest src or dst snapshot named `prod_onsite_<timestamp>_hourly` is more "
973 "than 30 minutes late (i.e. more than 30+60=90 minutes old) [warning] or more than 300 minutes late (i.e. more "
974 "than 300+60=360 minutes old) [critical]. "
975 "Analog for the latest snapshot named `prod_<timestamp>_daily`, and so on.\n\n"
976 "Note: A duration that is missing or zero (e.g. '0 minutes') indicates that no snapshots shall be checked for "
977 "the given snapshot name pattern.\n\n")
978 parser.add_argument(
979 "--monitor-snapshots-dont-warn", action="store_true",
980 help="Log a message for monitoring warnings but nonetheless exit with zero exit code.\n\n")
981 parser.add_argument(
982 "--monitor-snapshots-dont-crit", action="store_true",
983 help="Log a message for monitoring criticals but nonetheless exit with zero exit code.\n\n")
984 parser.add_argument(
985 "--monitor-snapshots-no-latest-check", action="store_true",
986 # help="Disable monitoring check of latest snapshot.\n\n")
987 help=argparse.SUPPRESS)
988 parser.add_argument(
989 "--monitor-snapshots-no-oldest-check", action="store_true",
990 # help="Disable monitoring check of oldest snapshot.\n\n")
991 help=argparse.SUPPRESS)
992 cmp_choices_dflt: str = "+".join(CMP_CHOICES_ITEMS)
993 cmp_choices: list[str] = []
994 for i in range(len(CMP_CHOICES_ITEMS)):
995 cmp_choices += ["+".join(c) for c in itertools.combinations(CMP_CHOICES_ITEMS, i + 1)]
996 parser.add_argument(
997 "--compare-snapshot-lists", choices=cmp_choices, default="", const=cmp_choices_dflt, nargs="?",
998 help="Do nothing if the --compare-snapshot-lists option is missing. Otherwise, after successful replication "
999 "step and successful --delete-dst-datasets, --delete-dst-snapshots steps and --delete-empty-dst-datasets "
1000 "steps, if any, proceed as follows:\n\n"
1001 "Compare source and destination dataset trees recursively wrt. snapshots, for example to check if all "
1002 "recently taken snapshots have been successfully replicated by a periodic job.\n\n"
1003 "Example: List snapshots only contained in source (tagged with 'src'), only contained in destination "
1004 "(tagged with 'dst'), and contained in both source and destination (tagged with 'all'), restricted to "
1005 "hourly and daily snapshots taken within the last 7 days, excluding the last 4 hours (to allow for some "
1006 "slack/stragglers), excluding temporary datasets: "
1007 f"`{PROG_NAME} tank1/foo/bar tank2/boo/bar --skip-replication "
1008 "--compare-snapshot-lists=src+dst+all --recursive --include-snapshot-regex '.*_(hourly|daily)' "
1009 "--include-snapshot-times-and-ranks '7 days ago..4 hours ago' --exclude-dataset-regex 'tmp.*'`\n\n"
1010 "This outputs a TSV file containing the following columns:\n\n"
1011 "`location creation_iso createtxg rel_name guid root_dataset rel_dataset name creation written`\n\n"
1012 "Example output row:\n\n"
1013 "`src 2024-11-06_08:30:05 17435050 /foo@test_2024-11-06_08:30:05_daily 2406491805272097867 tank1/src "
1014 "/foo tank1/src/foo@test_2024-10-06_08:30:04_daily 1730878205 24576`\n\n"
1015 "If the TSV output file contains zero lines starting with the prefix 'src' and zero lines starting with "
1016 "the prefix 'dst' then no source snapshots are missing on the destination, and no destination "
1017 "snapshots are missing on the source, indicating that the periodic replication and pruning jobs perform "
1018 "as expected. The TSV output is sorted by rel_dataset, and by ZFS creation time within each rel_dataset "
1019 "- the first and last line prefixed with 'all' contains the metadata of the oldest and latest common "
1020 "snapshot, respectively. Third party tools can use this info for post-processing, for example using "
1021 "custom scripts using 'csplit' or duckdb analytics queries.\n\n"
1022 "The --compare-snapshot-lists option also directly logs various summary stats, such as the metadata of "
1023 "the latest common snapshot, latest snapshots and oldest snapshots, as well as the time diff between the "
1024 "latest common snapshot and latest snapshot only in src (and only in dst), as well as how many src "
1025 "snapshots and how many GB of data are missing on dst, etc.\n\n"
1026 "*Note*: Consider omitting the 'all' flag to reduce noise and instead focus on missing snapshots only, "
1027 "like so: --compare-snapshot-lists=src+dst \n\n"
1028 "*Note*: The source can also be an empty dataset, such as the hardcoded virtual dataset named "
1029 f"'{DUMMY_DATASET}'.\n\n"
1030 "*Note*: --compare-snapshot-lists is typically *much* faster than standard 'zfs list -t snapshot' CLI "
1031 "usage because the former issues requests with a higher degree of parallelism than the latter. The "
1032 "degree is configurable with the --threads option (see below).\n\n")
1033 parser.add_argument(
1034 "--cache-snapshots", choices=["true", "false"], default="false", const="true", nargs="?",
1035 help="Default is '%(default)s'. If 'true', maintain a persistent local cache of recent snapshot creation times, "
1036 "recent successful replication times, and recent monitoring times, and compare them to a quick "
1037 "'zfs list -t filesystem,volume -p -o snapshots_changed' to help determine if a new snapshot shall be created "
1038 "on the src, and if there are any changes that need to be replicated or monitored. Enabling the cache "
1039 "improves performance if --create-src-snapshots and/or replication and/or --monitor-snapshots is invoked "
1040 "frequently (e.g. every minute via cron) over a large number of datasets, with each dataset containing a large "
1041 "number of snapshots, yet it is seldom for a new src snapshot to actually be created, or there are seldom any "
1042 "changes to replicate or monitor (e.g. a snapshot is only created every day and/or deleted every day).\n\n"
1043 "*Note:* This flag only has an effect on OpenZFS ≥ 2.2.\n\n"
1044 "*Note:* This flag is only relevant for snapshot creation on the src if --create-src-snapshots-even-if-not-due "
1045 "is not specified.\n\n")
1046 parser.add_argument(
1047 "--dryrun", "-n", choices=["recv", "send"], default=None, const="send", nargs="?",
1048 help="Do a dry run (aka 'no-op') to print what operations would happen if the command were to be executed "
1049 "for real (optional). This option treats both the ZFS source and destination as read-only. "
1050 "Accepts an optional argument for fine tuning that is handled as follows:\n\n"
1051 "a) 'recv': Send snapshot data via 'zfs send' to the destination host and receive it there via "
1052 "'zfs receive -n', which discards the received data there.\n\n"
1053 "b) 'send': Do not execute 'zfs send' and do not execute 'zfs receive'. This is a less 'realistic' form "
1054 "of dry run, but much faster, especially for large snapshots and slow networks/disks, as no snapshot is "
1055 "actually transferred between source and destination. This is the default when specifying --dryrun.\n\n"
1056 "Examples: --dryrun, --dryrun=send, --dryrun=recv\n\n")
1057 parser.add_argument(
1058 "--verbose", "-v", action="count", default=0,
1059 help="Print verbose information. This option can be specified multiple times to increase the level of "
1060 "verbosity. To print what ZFS/SSH operation exactly is happening (or would happen), add the `-v -v -v` "
1061 "flag, maybe along with --dryrun. All ZFS and SSH commands (even with --dryrun) are logged such that "
1062 "they can be inspected, copy-and-pasted into a terminal shell and run manually to help anticipate or "
1063 "diagnose issues. ERROR, WARN, INFO, DEBUG, TRACE output lines are identified by [E], [W], [I], [D], [T] "
1064 "prefixes, respectively.\n\n")
1065 parser.add_argument(
1066 "--quiet", "-q", action="store_true",
1067 help="Suppress non-error, info, debug, and trace output.\n\n")
1068 parser.add_argument(
1069 "--no-privilege-elevation", "-p", action="store_true",
1070 help="Do not attempt to run state changing ZFS operations 'zfs create/rollback/destroy/send/receive/snapshot' as "
1071 "root (via 'sudo -u root' elevation granted by administrators appending the following to /etc/sudoers: "
1072 "`<NON_ROOT_USER_NAME> ALL=NOPASSWD:/path/to/zfs`\n\n"
1073 "Instead, the --no-privilege-elevation flag is for non-root users that have been granted corresponding "
1074 "ZFS permissions by administrators via 'zfs allow' delegation mechanism, like so: "
1075 "sudo zfs allow -u $SRC_NON_ROOT_USER_NAME snapshot,destroy,send,bookmark,hold $SRC_DATASET; "
1076 "sudo zfs allow -u $DST_NON_ROOT_USER_NAME mount,create,receive,rollback,destroy $DST_DATASET_OR_POOL.\n\n"
1077 "If you do not plan to use the --force* flags and --delete-* CLI options then ZFS permissions "
1078 "'rollback,destroy' can be omitted, arriving at the absolutely minimal set of required destination "
1079 "permissions: `mount,create,receive`.\n\n"
1080 "For extra security $SRC_NON_ROOT_USER_NAME should be different than $DST_NON_ROOT_USER_NAME, i.e. the "
1081 "sending Unix user on the source and the receiving Unix user at the destination should be separate Unix "
1082 "user accounts with separate private keys even if both accounts reside on the same machine, per the "
1083 "principle of least privilege.\n\n"
1084 "Also see https://openzfs.github.io/openzfs-docs/man/master/8/zfs-allow.8.html#EXAMPLES and "
1085 "https://tinyurl.com/9h97kh8n and "
1086 "https://youtu.be/o_jr13Z9f1k?si=7shzmIQJpzNJV6cq\n\n")
1087 parser.add_argument(
1088 "--no-stream", action="store_true",
1089 help="During replication, only replicate the most recent selected source snapshot of a dataset (using -i "
1090 "incrementals instead of -I incrementals), hence skip all intermediate source snapshots that may exist "
1091 "between that and the most recent common snapshot. If there is no common snapshot also skip all other "
1092 "source snapshots for the dataset, except for the most recent selected source snapshot. This option helps "
1093 "the destination to 'catch up' with the source ASAP, consuming a minimum of disk space, at the expense "
1094 "of reducing reliable options for rolling back to intermediate snapshots in the future.\n\n")
1095 parser.add_argument(
1096 "--no-resume-recv", action="store_true",
1097 help="Replication of snapshots via 'zfs send/receive' can be interrupted by intermittent network hiccups, "
1098 "reboots, hardware issues, etc. Interrupted 'zfs send/receive' operations are retried if the --retries "
1099 f"and --retry-* options enable it (see above). In normal operation {PROG_NAME} automatically retries "
1100 "such that only the portion of the snapshot is transmitted that has not yet been fully received on the "
1101 "destination. For example, this helps to progressively transfer a large individual snapshot over a "
1102 "wireless network in a timely manner despite frequent intermittent network hiccups. This optimization is "
1103 "called 'resume receive' and uses the 'zfs receive -s' and 'zfs send -t' feature.\n\n"
1104 "The --no-resume-recv option disables this optimization such that a retry now retransmits the entire "
1105 "snapshot from scratch, which could slow down or even prohibit progress in case of frequent network "
1106 f"hiccups. {PROG_NAME} automatically falls back to using the --no-resume-recv option if it is "
1107 "auto-detected that the ZFS pool does not reliably support the 'resume receive' optimization.\n\n"
1108 "*Note:* Snapshots that have already been fully transferred as part of the current 'zfs send/receive' "
1109 "operation need not be retransmitted regardless of the --no-resume-recv flag. For example, assume "
1110 "a single 'zfs send/receive' operation is transferring incremental snapshots 1 through 10 via "
1111 "'zfs send -I', but the operation fails while transferring snapshot 10, then snapshots 1 through 9 "
1112 "need not be retransmitted regardless of the --no-resume-recv flag, as these snapshots have already "
1113 "been successfully received at the destination either way.\n\n")
1114 parser.add_argument(
1115 "--create-bookmarks", choices=["all", "hourly", "minutely", "secondly", "none"], default="all",
1116 help=f"For increased safety, {PROG_NAME} replication behaves as follows wrt. ZFS bookmark creation, if it is "
1117 "autodetected that the source ZFS pool support bookmarks:\n\n"
1118 "* `all` (default): Whenever it has successfully completed a 'zfs send' operation, "
1119 f"{PROG_NAME} creates a ZFS bookmark of each source snapshot that was sent during that 'zfs send' operation, "
1120 "and attaches it to the source dataset. This increases safety at the expense of some performance.\n\n"
1121 "* `hourly`: Whenever it has successfully completed replication of the most recent source snapshot, "
1122 f"{PROG_NAME} creates a ZFS bookmark of that snapshot, and attaches it to the source dataset. In addition, "
1123 f"whenever it has successfully completed a 'zfs send' operation, {PROG_NAME} creates a ZFS bookmark of each "
1124 f"hourly, daily, weekly, monthly and yearly source snapshot that was sent during that 'zfs send' operation, "
1125 "and attaches it to the source dataset.\n\n"
1126 "* `minutely` and `secondly`: Same as `hourly` except that it also creates ZFS bookmarks for minutely and "
1127 "secondly snapshots, respectively.\n\n"
1128 "* `none`: No bookmark is created.\n\n"
1129 "Bookmarks exist so an incremental stream can continue to be sent from the source dataset without having "
1130 "to keep the already replicated snapshot around on the source dataset until the next upcoming snapshot "
1131 "has been successfully replicated. This way you can send the snapshot from the source dataset to another "
1132 "host, then bookmark the snapshot on the source dataset, then delete the snapshot from the source "
1133 "dataset to save disk space, and then still incrementally send the next upcoming snapshot from the "
1134 "source dataset to the other host by referring to the bookmark.\n\n"
1135 "The --create-bookmarks=none option disables this safety feature but is discouraged, because bookmarks "
1136 "are tiny and relatively cheap and help to ensure that ZFS replication can continue even if source and "
1137 "destination dataset somehow have no common snapshot anymore. "
1138 "For example, if a pruning script has accidentally deleted too many (or even all) snapshots on the "
1139 "source dataset in an effort to reclaim disk space, replication can still proceed because it can use "
1140 "the info in the bookmark (the bookmark must still exist in the source dataset) instead of the info in "
1141 "the metadata of the (now missing) source snapshot.\n\n"
1142 "A ZFS bookmark is a tiny bit of metadata extracted from a ZFS snapshot by the 'zfs bookmark' CLI, and "
1143 "attached to a dataset, much like a ZFS snapshot. Note that a ZFS bookmark does not contain user data; "
1144 "instead a ZFS bookmark is essentially a tiny pointer in the form of the GUID of the snapshot and 64-bit "
1145 "transaction group number of the snapshot and creation time of the snapshot, which is sufficient to tell "
1146 "the destination ZFS pool how to find the destination snapshot corresponding to the source bookmark "
1147 "and (potentially already deleted) source snapshot. A bookmark can be fed into 'zfs send' as the "
1148 "source of an incremental send. Note that while a bookmark allows for its snapshot "
1149 "to be deleted on the source after successful replication, it still requires that its snapshot is not "
1150 "somehow deleted prematurely on the destination dataset, so be mindful of that. "
1151 f"By convention, a bookmark created by {PROG_NAME} has the same name as its corresponding "
1152 "snapshot, the only difference being the leading '#' separator instead of the leading '@' separator. "
1153 "Also see https://www.youtube.com/watch?v=LaNgoAZeTww&t=316s.\n\n"
1154 "You can list bookmarks, like so: "
1155 "`zfs list -t bookmark -o name,guid,createtxg,creation -d 1 $SRC_DATASET`, and you can (and should) "
1156 "periodically prune obsolete bookmarks just like snapshots, like so: "
1157 "`zfs destroy $SRC_DATASET#$BOOKMARK`. Typically, bookmarks should be pruned less aggressively "
1158 "than snapshots, and destination snapshots should be pruned less aggressively than source snapshots. "
1159 "As an example starting point, here is a command that deletes all bookmarks older than "
1160 "90 days, but retains the latest 200 bookmarks (per dataset) regardless of creation time: "
1161 f"`{PROG_NAME} {DUMMY_DATASET} tank2/boo/bar --dryrun --recursive --skip-replication "
1162 "--delete-dst-snapshots=bookmarks --include-snapshot-times-and-ranks notime 'all except latest 200' "
1163 "--include-snapshot-times-and-ranks 'anytime..90 days ago'`\n\n")
1164 parser.add_argument(
1165 "--no-create-bookmark", action="store_true",
1166 help=argparse.SUPPRESS) # deprecated; was replaced by --create-bookmarks=none
1167 parser.add_argument(
1168 "--no-use-bookmark", action="store_true",
1169 help=f"For increased safety, in normal replication operation {PROG_NAME} replication also looks for bookmarks "
1170 "(in addition to snapshots) on the source dataset in order to find the most recent common snapshot wrt. the "
1171 "destination dataset, if it is auto-detected that the source ZFS pool support bookmarks. "
1172 "The --no-use-bookmark option disables this safety feature but is discouraged, because bookmarks help "
1173 "to ensure that ZFS replication can continue even if source and destination dataset somehow have no "
1174 "common snapshot anymore.\n\n"
1175 f"Note that it does not matter whether a bookmark was created by {PROG_NAME} or a third party script, "
1176 "as only the GUID of the bookmark and the GUID of the snapshot is considered for comparison, and ZFS "
1177 "guarantees that any bookmark of a given snapshot automatically has the same GUID, transaction group "
1178 "number and creation time as the snapshot. Also note that you can create, delete and prune bookmarks "
1179 f"any way you like, as {PROG_NAME} (without --no-use-bookmark) will happily work with whatever "
1180 "bookmarks currently exist, if any.\n\n")
1182 ssh_cipher_default = "^aes256-gcm@openssh.com"
1183 # ^aes256-gcm@openssh.com cipher: for speed with confidentiality and integrity
1184 # measure cipher perf like so: count=5000; for i in $(seq 1 3); do echo "iteration $i:"; for cipher in $(ssh -Q cipher); do dd if=/dev/zero bs=1M count=$count 2> /dev/null | ssh -c $cipher -p 40999 127.0.0.1 "(time -p cat) > /dev/null" 2>&1 | grep real | awk -v count=$count -v cipher=$cipher '{print cipher ": " count / $2 " MB/s"}'; done; done
1185 # see https://gbe0.com/posts/linux/server/benchmark-ssh-ciphers/
1186 # and https://crypto.stackexchange.com/questions/43287/what-are-the-differences-between-these-aes-ciphers
1187 parser.add_argument(
1188 "--ssh-cipher", type=str, default=ssh_cipher_default, metavar="STRING",
1189 help="SSH cipher specification for encrypting the session (optional); will be passed into ssh -c CLI. "
1190 "--ssh-cipher is a comma-separated list of ciphers listed in order of preference. See the 'Ciphers' "
1191 "keyword in ssh_config(5) for more information: "
1192 "https://manpages.ubuntu.com/manpages/man5/ssh_config.5.html. Default: `%(default)s`\n\n")
1194 locations = ["src", "dst"]
1195 for loc in locations:
1196 parser.add_argument(
1197 f"--ssh-{loc}-user", type=str, metavar="STRING",
1198 help=f"Remote SSH username on {loc} host to connect to (optional). Overrides username given in "
1199 f"{loc.upper()}_DATASET.\n\n")
1200 for loc in locations:
1201 parser.add_argument(
1202 f"--ssh-{loc}-host", type=str, metavar="STRING",
1203 help=f"Remote SSH hostname of {loc} host to connect to (optional). Can also be an IPv4 or IPv6 address. "
1204 f"Overrides hostname given in {loc.upper()}_DATASET.\n\n")
1205 for loc in locations:
1206 parser.add_argument(
1207 f"--ssh-{loc}-port", type=int, min=1, max=65535, action=CheckRange, metavar="INT",
1208 help=f"Remote SSH port on {loc} host to connect to (optional).\n\n")
1209 for loc in locations:
1210 parser.add_argument(
1211 f"--ssh-{loc}-config-file", type=str, action=SSHConfigFileNameAction, metavar="FILE",
1212 help=f"Path to SSH ssh_config(5) file to connect to {loc} (optional); will be passed into ssh -F CLI. "
1213 "The basename must contain the substring 'bzfs_ssh_config'.\n\n")
1214 control_persist_secs_dflt: int = 90
1215 parser.add_argument(
1216 "--ssh-exit-on-shutdown", action="store_true",
1217 # help="On process shutdown, ask the SSH ControlMaster to exit immediately via 'ssh -O exit'. By default, masters "
1218 # f"persist for {control_persist_secs_dflt} idle seconds and are reused across {PROG_NAME} processes to improve "
1219 # f"startup latency when safe. A master is never used simultaneously by multiple {PROG_NAME} processes.")
1220 help=argparse.SUPPRESS)
1221 parser.add_argument(
1222 "--ssh-control-persist-secs", type=int, min=1, default=control_persist_secs_dflt, action=CheckRange, metavar="INT",
1223 help=argparse.SUPPRESS)
1224 parser.add_argument(
1225 "--timeout", default=None, metavar="DURATION",
1226 # help="Exit the program (or current task with non-zero --daemon-lifetime) with an error after this much time has "
1227 # "elapsed. Default is to never timeout. Examples: '600 seconds', '90 minutes', '10years'\n\n")
1228 help=argparse.SUPPRESS)
1229 threads_default = 100 # percent
1230 parser.add_argument(
1231 "--threads", min=1, max=1600, default=(threads_default, True), action=CheckPercentRange, metavar="INT[%]",
1232 help="The maximum number of threads to use for parallel operations; can be given as a positive integer, "
1233 f"optionally followed by the %% percent character (min: %(min)s, default: {threads_default}%%). Percentages "
1234 "are relative to the number of CPU cores on the machine. Example: 200%% uses twice as many threads as "
1235 "there are cores on the machine; 75%% uses num_threads = num_cores * 0.75. Currently this option only "
1236 "applies to dataset and snapshot replication, --create-src-snapshots, --delete-dst-snapshots, "
1237 "--delete-empty-dst-datasets, --monitor-snapshots and --compare-snapshot-lists. The ideal value for this "
1238 "parameter depends on the use case and its performance requirements, as well as the number of available CPU "
1239 "cores and the parallelism offered by SSDs vs. HDDs, ZFS topology and configuration, as well as the network "
1240 "bandwidth and other workloads simultaneously running on the system. The current default is geared towards a "
1241 "high degree of parallelism, and as such may perform poorly on HDDs. Examples: 1, 4, 75%%, 150%%\n\n")
1242 parser.add_argument(
1243 "--max-concurrent-ssh-sessions-per-tcp-connection", type=int, min=1, default=8, action=CheckRange, metavar="INT",
1244 help=f"For best throughput, {PROG_NAME} uses multiple SSH TCP connections in parallel, as indicated by "
1245 "--threads (see above). For best startup latency, each such parallel TCP connection can carry a "
1246 "maximum of S concurrent SSH sessions, where "
1247 "S=--max-concurrent-ssh-sessions-per-tcp-connection (default: %(default)s, min: %(min)s). "
1248 "Concurrent SSH sessions are mostly used for metadata operations such as listing ZFS datasets and their "
1249 "snapshots. This client-side max sessions parameter must not be higher than the server-side "
1250 "sshd_config(5) MaxSessions parameter (which defaults to 10, see "
1251 "https://manpages.ubuntu.com/manpages/man5/sshd_config.5.html).\n\n"
1252 f"*Note:* For better throughput, {PROG_NAME} uses one dedicated TCP connection per ZFS "
1253 "send/receive operation such that the dedicated connection is never used by any other "
1254 "concurrent SSH session, effectively ignoring the value of the "
1255 "--max-concurrent-ssh-sessions-per-tcp-connection parameter in the ZFS send/receive case.\n\n")
1256 parser.add_argument(
1257 "--bwlimit", default=None, action=NonEmptyStringAction, metavar="STRING",
1258 help="Sets 'pv' bandwidth rate limit for zfs send/receive data transfer (optional). Example: `100m` to cap "
1259 "throughput at 100 MB/sec. Default is unlimited. Also see "
1260 "https://manpages.ubuntu.com/manpages/man1/pv.1.html\n\n")
1261 parser.add_argument(
1262 "--daemon-lifetime", default="0 seconds", metavar="DURATION",
1263 # help="Exit the daemon after this much time has elapsed. Default is '0 seconds', i.e. no daemon mode. "
1264 # "Examples: '600 seconds', '86400 seconds', '1000years'\n\n")
1265 help=argparse.SUPPRESS)
1266 parser.add_argument(
1267 "--daemon-frequency", default="minutely", metavar="STRING",
1268 # help="Run a daemon iteration every N time units. Default is '%(default)s'. "
1269 # "Examples: '100 millisecondly', '10secondly, 'minutely' to request the daemon to run every 100 milliseconds, "
1270 # "or every 10 seconds, or every minute, respectively. Only has an effect if --daemon-lifetime is nonzero.\n\n")
1271 help=argparse.SUPPRESS)
1272 parser.add_argument(
1273 "--daemon-remote-conf-cache-ttl", default="300 seconds", metavar="DURATION",
1274 # help="The Time-To-Live for the remote host configuration cache, which stores available programs and "
1275 # f"ZFS features. After this duration, {prog_name} will re-detect the remote environment. Set to '0 seconds' "
1276 # "to re-detect on every daemon iteration. Default: %(default)s.\n\n")
1277 help=argparse.SUPPRESS)
1278 parser.add_argument(
1279 "--no-estimate-send-size", action="store_true",
1280 help="Skip 'zfs send -n -v'. This can improve performance if replicating small snapshots at high frequency.\n\n")
1282 def hlp(program: str) -> str:
1283 return f"The name of the '{program}' executable (optional). Default is '{program}'. "
1285 msg: str = f"Use '{DISABLE_PRG}' to disable the use of this program.\n\n"
1286 parser.add_argument(
1287 "--compression-program", default="zstd", choices=["zstd", "lz4", "pzstd", "pigz", "gzip", DISABLE_PRG],
1288 help=hlp("zstd") + msg.rstrip() + " The use is auto-disabled if data is transferred locally instead of via the "
1289 "network. This option is about transparent compression-on-the-wire, not about "
1290 "compression-at-rest.\n\n")
1291 parser.add_argument(
1292 "--compression-program-opts", default="-1", metavar="STRING",
1293 help="The options to be passed to the compression program on the compression step (optional). "
1294 "Default is '%(default)s' (fastest).\n\n")
1295 parser.add_argument(
1296 "--mbuffer-program", default="mbuffer", choices=["mbuffer", DISABLE_PRG],
1297 help=hlp("mbuffer") + msg.rstrip() + " The use is auto-disabled if data is transferred locally "
1298 "instead of via the network. This tool is used to smooth out the rate "
1299 "of data flow and prevent bottlenecks caused by network latency or "
1300 "speed fluctuation.\n\n")
1301 parser.add_argument(
1302 "--mbuffer-program-opts", default="-q -m 128M", metavar="STRING",
1303 help="Options to be passed to 'mbuffer' program (optional). Default: '%(default)s'.\n\n")
1304 parser.add_argument(
1305 "--ps-program", default="ps", choices=["ps", DISABLE_PRG],
1306 help=hlp("ps") + msg)
1307 parser.add_argument(
1308 "--pv-program", default="pv", choices=["pv", DISABLE_PRG],
1309 help=hlp("pv") + msg.rstrip() + " This is used for bandwidth rate-limiting and progress monitoring.\n\n")
1310 parser.add_argument(
1311 "--pv-program-opts", metavar="STRING",
1312 default="--progress --timer --eta --fineta --rate --average-rate --bytes --interval=1 --width=120 --buffer-size=2M",
1313 help="The options to be passed to the 'pv' program (optional). Default: '%(default)s'.\n\n")
1314 parser.add_argument(
1315 "--shell-program", default="sh", choices=["sh", DISABLE_PRG],
1316 help=hlp("sh") + msg)
1317 parser.add_argument(
1318 "--ssh-program", default="ssh", choices=["ssh", "hpnssh", DISABLE_PRG],
1319 help=hlp("ssh") + msg)
1320 parser.add_argument(
1321 "--sudo-program", default="sudo", choices=["sudo", DISABLE_PRG],
1322 help=hlp("sudo") + msg)
1323 parser.add_argument(
1324 "--zpool-program", default="zpool", choices=["zpool", DISABLE_PRG],
1325 help=hlp("zpool") + msg)
1326 parser.add_argument(
1327 "--log-dir", type=str, action=SafeDirectoryNameAction, metavar="DIR",
1328 help=f"Path to the log output directory on local host (optional). Default: $HOME/{LOG_DIR_DEFAULT}. The logger "
1329 "that is used by default writes log files there, in addition to the console. The basename of --log-dir must "
1330 f"contain the substring '{LOG_DIR_DEFAULT}' as this helps prevent accidents. The current.dir symlink "
1331 "always points to the subdirectory containing the most recent log file. The current.log symlink "
1332 "always points to the most recent log file. The current.pv symlink always points to the most recent "
1333 "data transfer monitoring log. Run `tail --follow=name --max-unchanged-stats=1` on both symlinks to "
1334 "follow what's currently going on. Parallel replication generates a separate .pv file per thread. To "
1335 "monitor these, run something like "
1336 "`while true; do clear; for f in $(realpath $HOME/bzfs-logs/current/current.pv)*; "
1337 "do tac -s $(printf '\\r') $f | tr '\\r' '\\n' | grep -m1 -v '^$'; done; sleep 1; done`\n\n")
1338 h_fix = ("The path name of the log file on local host is "
1339 "`${--log-dir}/${--log-file-prefix}<timestamp>${--log-file-infix}${--log-file-suffix}-<random>.log`. "
1340 "Example: `--log-file-prefix=zrun_us-west-1_ --log-file-suffix=_daily` will generate log "
1341 "file names such as `zrun_us-west-1_2024-09-03_12:26:15_daily-bl4i1fth.log`\n\n")
1342 parser.add_argument(
1343 "--log-file-prefix", default="zrun_", action=SafeFileNameAction, metavar="STRING",
1344 help="Default is %(default)s. " + h_fix)
1345 parser.add_argument(
1346 "--log-file-infix", default="", action=SafeFileNameAction, metavar="STRING",
1347 help="Default is the empty string. " + h_fix)
1348 parser.add_argument(
1349 "--log-file-suffix", default="", action=SafeFileNameAction, metavar="STRING",
1350 help="Default is the empty string. " + h_fix)
1351 parser.add_argument(
1352 "--log-subdir", choices=["daily", "hourly", "minutely"], default="daily",
1353 help="Make a new subdirectory in --log-dir every day, hour or minute; write log files there. "
1354 "Default is '%(default)s'.")
1355 parser.add_argument(
1356 "--log-syslog-address", default=None, action=NonEmptyStringAction, metavar="STRING",
1357 help="Host:port of the syslog machine to send messages to (e.g. 'foo.example.com:514' or '127.0.0.1:514'), or "
1358 "the file system path to the syslog socket file on localhost (e.g. '/dev/log'). The default is no "
1359 "address, i.e. do not log anything to syslog by default. See "
1360 "https://docs.python.org/3/library/logging.handlers.html#sysloghandler\n\n")
1361 parser.add_argument(
1362 "--log-syslog-socktype", choices=["UDP", "TCP"], default="UDP",
1363 help="The socket type to use to connect if no local socket file system path is used. Default is '%(default)s'.\n\n")
1364 parser.add_argument(
1365 "--log-syslog-facility", type=int, min=0, max=7, default=1, action=CheckRange, metavar="INT",
1366 help="The local facility aka category that identifies msg sources in syslog "
1367 "(default: %(default)s, min=%(min)s, max=%(max)s).\n\n")
1368 parser.add_argument(
1369 "--log-syslog-prefix", default=PROG_NAME, action=NonEmptyStringAction, metavar="STRING",
1370 help=f"The name to prepend to each message that is sent to syslog; identifies {PROG_NAME} messages as opposed "
1371 "to messages from other sources. Default is '%(default)s'.\n\n")
1372 parser.add_argument(
1373 "--log-syslog-level", choices=["CRITICAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE"],
1374 default="ERROR",
1375 help="Only send messages with equal or higher priority than this log level to syslog. Default is '%(default)s'.\n\n")
1376 parser.add_argument(
1377 "--include-envvar-regex", action=FileOrLiteralAction, nargs="+", default=[], metavar="REGEX",
1378 help="On program startup, unset all Unix environment variables for which the full environment variable "
1379 "name matches at least one of the excludes but none of the includes. If an environment variable is "
1380 "included this decision is never reconsidered because include takes precedence over exclude. "
1381 "The purpose is to tighten security and help guard against accidental inheritance or malicious "
1382 "injection of environment variable values that may have unintended effects.\n\n"
1383 "This option can be specified multiple times. "
1384 "A leading `!` character indicates logical negation, i.e. the regex matches if the regex with the "
1385 "leading `!` character removed does not match. "
1386 "The default is to include no environment variables, i.e. to make no exceptions to --exclude-envvar-regex. "
1387 "Example that retains at least these two env vars: "
1388 "`--include-envvar-regex PATH "
1389 f"--include-envvar-regex {ENV_VAR_PREFIX}min_pipe_transfer_size`. "
1390 "Example that retains all environment variables without tightened security: `'.*'`\n\n")
1391 parser.add_argument(
1392 "--exclude-envvar-regex", action=FileOrLiteralAction, nargs="+", default=[], metavar="REGEX",
1393 help="Same syntax as --include-envvar-regex (see above) except that the default is to exclude no "
1394 f"environment variables. Example: `{ENV_VAR_PREFIX}.*`\n\n")
1396 for period, label in {"yearly": "years", "monthly": "months", "weekly": "weeks", "daily": "days", "hourly": "hours",
1397 "minutely": "minutes", "secondly": "seconds", "millisecondly": "milliseconds"}.items():
1398 anchor_group = parser.add_argument_group(
1399 f"{period.title()} period anchors", "Use these options to customize when snapshots that happen "
1400 f"every N {label} are scheduled to be created on the source by the --create-src-snapshots option.")
1401 for f in [f for f in dataclasses.fields(PeriodAnchors) if f.name.startswith(period + "_")]:
1402 min_ = f.metadata.get("min")
1403 max_ = f.metadata.get("max")
1404 anchor_group.add_argument(
1405 "--" + f.name, type=int, min=min_, max=max_, default=f.default, action=CheckRange, metavar="INT",
1406 help=f"{f.metadata.get('help')} ({min_} ≤ x ≤ {max_}, default: %(default)s).\n\n")
1408 for option_name, flag in ZFS_RECV_GROUPS.items():
1409 grup: str = option_name.replace("_", "-") # one of zfs_recv_o, zfs_recv_x
1410 flag = "'" + flag + "'" # one of -o or -x
1412 def h(text: str, option_name: str=option_name) -> str:
1413 return argparse.SUPPRESS if option_name not in (ZFS_RECV_O, ZFS_RECV_X) else text
1415 argument_group = parser.add_argument_group(
1416 grup,
1417 description=h(f"The following group of parameters specifies additional zfs receive {flag} options that "
1418 "can be used to configure copying of ZFS dataset properties from the source dataset to "
1419 "its corresponding destination dataset. The 'zfs-recv-o' group of parameters is applied "
1420 "before the 'zfs-recv-x' group."))
1421 target_choices = ["full", "incremental", "full+incremental"]
1422 target_choices_default = "full+incremental" if option_name == ZFS_RECV_X else "full"
1423 qq = "'"
1424 argument_group.add_argument(
1425 f"--{grup}-targets", choices=target_choices, default=target_choices_default,
1426 help=h(f"The zfs send phase or phases during which the extra {flag} options are passed to 'zfs receive'. "
1427 "This can be one of the following choices: "
1428 f"{', '.join([f'{qq}{x}{qq}' for x in target_choices])}. "
1429 "Default is '%(default)s'. "
1430 "A 'full' send is sometimes also known as an 'initial' send.\n\n"))
1431 msg = "Thus, -x opts do not benefit from source != 'local' (which is the default already)." \
1432 if flag == "'-x'" else ""
1433 argument_group.add_argument(
1434 f"--{grup}-sources", action=NonEmptyStringAction, default="local", metavar="STRING",
1435 help=h("The ZFS sources to provide to the 'zfs get -s' CLI in order to fetch the ZFS dataset properties "
1436 f"that will be fed into the --{grup}-include/exclude-regex filter (see below). The sources are in "
1437 "the form of a comma-separated list (no spaces) containing one or more of the following choices: "
1438 "'local', 'default', 'inherited', 'temporary', 'received', 'none', with the default being '%(default)s'. "
1439 f"Uses 'zfs get -p -s ${grup}-sources all $SRC_DATASET' to fetch the "
1440 "properties to copy - https://openzfs.github.io/openzfs-docs/man/master/8/zfs-get.8.html. P.S: Note "
1441 "that the existing 'zfs send --props' option does not filter and that --props only reads properties "
1442 f"from the 'local' ZFS property source (https://github.com/openzfs/zfs/issues/13024). {msg}\n\n"))
1443 if option_name == ZFS_RECV_O:
1444 group_include_regex_default_help: str = f"The default regex is '{ZFS_RECV_O_INCLUDE_REGEX_DEFAULT}'."
1445 else:
1446 group_include_regex_default_help = ("The default is to include no properties, thus by default no extra "
1447 f"{flag} option is appended. ")
1448 argument_group.add_argument(
1449 f"--{grup}-include-regex", action=FileOrLiteralAction, default=None, const=[], nargs="*", metavar="REGEX",
1450 help=h(f"Take the output properties of --{grup}-sources (see above) and filter them such that we only "
1451 "retain the properties whose name matches at least one of the --include regexes but none of the "
1452 "--exclude regexes. If a property is excluded this decision is never reconsidered because exclude "
1453 f"takes precedence over include. Append each retained property to the list of {flag} options in "
1454 "--zfs-recv-program-opt(s), unless another '-o' or '-x' option with the same name already exists "
1455 "therein. In other words, --zfs-recv-program-opt(s) takes precedence.\n\n"
1456 f"Zero or more regexes can be specified. Specify zero regexes to append no extra {flag} option. "
1457 "A leading `!` character indicates logical negation, i.e. the regex matches if the regex with the "
1458 "leading `!` character removed does not match. "
1459 "If the option starts with a `+` prefix then regexes are read from the newline-separated "
1460 "UTF-8 text file given after the `+` prefix, one regex per line inside of the text file. The basename "
1461 "must contain the substring 'bzfs_argument_file'.\n\n"
1462 f"{group_include_regex_default_help} "
1463 f"Example: `--{grup}-include-regex compression recordsize`. "
1464 "More examples: `.*` (include all properties), `foo bar myapp:.*` (include three regexes) "
1465 f"`+{grup}_regexes_bzfs_argument_file.txt`, `+/path/to/{grup}_regexes_bzfs_argument_file.txt`\n\n"
1466 "See https://openzfs.github.io/openzfs-docs/man/master/7/zfsprops.7.html\n\n"))
1467 argument_group.add_argument(
1468 f"--{grup}-exclude-regex", action=FileOrLiteralAction, nargs="+", default=[], metavar="REGEX",
1469 help=h(f"Same syntax as --{grup}-include-regex (see above), and the default is to exclude no properties. "
1470 f"Example: --{grup}-exclude-regex encryptionroot keystatus origin volblocksize volsize\n\n"))
1471 parser.add_argument(
1472 "--version", action="version", version=f"{PROG_NAME}-{__version__}, by {PROG_AUTHOR}",
1473 help="Display version information and exit.\n\n")
1474 parser.add_argument(
1475 "--help, -h", action="help", # trick to ensure both --help and -h are shown in the help msg
1476 help="Show this help message and exit.\n\n")
1477 return parser
1478 # fmt: on