Coverage for bzfs_main/parallel_batch_cmd.py: 100%
37 statements
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-07 04:44 +0000
« prev ^ index » next coverage.py v7.11.0, created at 2025-11-07 04:44 +0000
1# Copyright 2024 Wolfgang Hoschek AT mac DOT com
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14#
15"""Helpers for running CLI commands in (sequential or parallel) batches, without exceeding operating system limits.
17The batch size aka max_batch_items splits one CLI command into one or more CLI commands. The resulting commands are executed
18sequentially (via functions *_batched()), or in parallel across max_workers threads (via functions *_parallel()).
20The degree of parallelism (max_workers) is specified by the job (via --threads).
21Batch size is a trade-off between resource consumption, latency, bandwidth and throughput.
23Example:
24--------
26- max_batch_items=1 (seq or par):
27```
28zfs list -t snapshot d1
29zfs list -t snapshot d2
30zfs list -t snapshot d3
31zfs list -t snapshot d4
32```
34- max_batch_items=2 (seq or par):
35```
36zfs list -t snapshot d1 d2
37zfs list -t snapshot d3 d4
39- max_batch_items=N (seq or par):
40```
41zfs list -t snapshot d1 d2 d3 d4
42```
43"""
45from __future__ import (
46 annotations,
47)
48import sys
49from collections.abc import (
50 Iterable,
51 Iterator,
52)
53from typing import (
54 TYPE_CHECKING,
55 Any,
56 Callable,
57 TypeVar,
58)
60from bzfs_main.connection import (
61 SHARED,
62 ConnectionPool,
63 try_ssh_command,
64)
65from bzfs_main.parallel_iterator import (
66 batch_cmd_iterator,
67 get_max_command_line_bytes,
68 parallel_iterator,
69)
70from bzfs_main.utils import (
71 LOG_TRACE,
72 drain,
73)
75if TYPE_CHECKING: # pragma: no cover - for type hints only
76 from bzfs_main.bzfs import (
77 Job,
78 )
79 from bzfs_main.configuration import (
80 Remote,
81 )
83T = TypeVar("T")
86def run_ssh_cmd_batched(
87 job: Job,
88 r: Remote,
89 cmd: list[str],
90 cmd_args: Iterable[str],
91 fn: Callable[[list[str]], Any],
92 max_batch_items: int = 2**29,
93 sep: str = " ",
94) -> None:
95 """Runs ssh command for each sequential batch of args, without creating a cmdline that's too big for the OS to handle."""
96 drain(itr_ssh_cmd_batched(job, r, cmd, cmd_args, fn, max_batch_items=max_batch_items, sep=sep))
99def itr_ssh_cmd_batched(
100 job: Job,
101 r: Remote,
102 cmd: list[str],
103 cmd_args: Iterable[str],
104 fn: Callable[[list[str]], T],
105 max_batch_items: int = 2**29,
106 sep: str = " ",
107) -> Iterator[T]:
108 """Runs fn(cmd_args) in sequential batches w/ cmd, without creating a cmdline that's too big for the OS to handle."""
109 max_bytes: int = _max_batch_bytes(job, r, cmd, sep)
110 return batch_cmd_iterator(cmd_args=cmd_args, fn=fn, max_batch_items=max_batch_items, max_batch_bytes=max_bytes, sep=sep)
113def run_ssh_cmd_parallel(
114 job: Job,
115 r: Remote,
116 cmd_args_list: Iterable[tuple[list[str], Iterable[str]]],
117 fn: Callable[[list[str], list[str]], Any],
118 max_batch_items: int = 2**29,
119) -> None:
120 """Runs multiple ssh commands in parallel, batching each set of args."""
121 drain(itr_ssh_cmd_parallel(job, r, cmd_args_list, fn=fn, max_batch_items=max_batch_items, ordered=False))
124def itr_ssh_cmd_parallel(
125 job: Job,
126 r: Remote,
127 cmd_args_list: Iterable[tuple[list[str], Iterable[str]]],
128 fn: Callable[[list[str], list[str]], T],
129 max_batch_items: int = 2**29,
130 ordered: bool = True,
131) -> Iterator[T]:
132 """Streams results from multiple parallel (batched) SSH commands.
134 When ordered=True, preserves the order of the batches as provided by cmd_args_list (i.e. yields results in the same order
135 as the input), not in completion order. When ordered=False, yields results as they complete for minimum latency.
136 """
137 return parallel_iterator(
138 iterator_builder=lambda executr: (
139 itr_ssh_cmd_batched(
140 job, r, cmd, cmd_args, lambda batch, cmd=cmd: executr.submit(fn, cmd, batch), max_batch_items=max_batch_items # type: ignore[misc]
141 )
142 for cmd, cmd_args in cmd_args_list
143 ),
144 max_workers=job.max_workers[r.location],
145 ordered=ordered,
146 termination_event=job.termination_event,
147 )
150def zfs_list_snapshots_in_parallel(
151 job: Job, r: Remote, cmd: list[str], datasets: list[str], ordered: bool = True
152) -> Iterator[list[str]]:
153 """Runs 'zfs list -t snapshot' on multiple datasets at the same time.
155 Implemented with a time and space efficient streaming algorithm; easily scales to millions of datasets and any number of
156 snapshots. Attempts to use at least 4 datasets per remote cmd mini batch to reflect increased communication latency.
157 """
158 max_workers: int = job.max_workers[r.location]
159 max_batch_items: int = min(
160 job.max_datasets_per_minibatch_on_list_snaps[r.location],
161 max(
162 len(datasets) // (max_workers * 8),
163 4 if r.ssh_user_host else 1,
164 ),
165 )
166 return itr_ssh_cmd_parallel(
167 job,
168 r,
169 [(cmd, datasets)],
170 fn=lambda cmd, batch: (try_ssh_command(job, r, LOG_TRACE, cmd=cmd + batch) or "").splitlines(),
171 max_batch_items=max_batch_items,
172 ordered=ordered,
173 )
176def _max_batch_bytes(job: Job, r: Remote, cmd: list[str], sep: str) -> int:
177 """Avoids creating a cmdline that's too big for the OS to handle.
179 The calculation subtracts 'header_bytes', which accounts for the full SSH invocation (including control socket/options)
180 plus the fixed subcommand prefix, so that the remaining budget is reserved exclusively for the batched arguments.
181 """
182 assert isinstance(sep, str)
183 max_bytes: int = min(_get_max_command_line_bytes(job, "local"), _get_max_command_line_bytes(job, r.location))
184 # Max size of a single argument is 128KB on Linux - https://lists.gnu.org/archive/html/bug-bash/2020-09/msg00095.html
185 max_bytes = max_bytes if sep == " " else min(max_bytes, 128 * 1024 - 1) # e.g. 'zfs destroy foo@s1,s2,...,sN'
186 conn_pool: ConnectionPool = job.params.connection_pools[r.location].pool(SHARED)
187 with conn_pool.connection() as conn:
188 cmd = conn.ssh_cmd + cmd
189 header_bytes: int = len(" ".join(cmd).encode(sys.getfilesystemencoding()))
190 return max_bytes - header_bytes
193def _get_max_command_line_bytes(job: Job, location: str, os_name: str | None = None) -> int:
194 """Remote flavor of os.sysconf("SC_ARG_MAX") - size(os.environb) - safety margin"""
195 os_name = os_name if os_name else job.params.available_programs[location].get("os")
196 os_name = os_name if os_name else "n/a"
197 max_bytes = get_max_command_line_bytes(os_name)
198 if job.max_command_line_bytes is not None:
199 return job.max_command_line_bytes # for testing only
200 else:
201 return max_bytes