Skip to content

Forks Plugin

A pytest plugin to configure the forks in the test session. It parametrizes tests based on the user-provided fork range and the tests' specified validity markers.

Pytest plugin to enable fork range configuration for the test session.

pytest_addoption(parser)

Add command-line options to pytest.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
def pytest_addoption(parser: pytest.Parser) -> None:
    """Add command-line options to pytest."""
    fork_group = parser.getgroup(
        "Forks", "Specify the fork range to generate fixtures for"
    )
    fork_group.addoption(
        "--forks",
        action="store_true",
        dest="show_fork_help",
        default=False,
        help="Display forks supported by the test framework and exit.",
    )
    fork_group.addoption(
        "--fork",
        action="store",
        dest="single_fork",
        default="",
        help="Only fill tests for the specified fork.",
    )
    fork_group.addoption(
        "--from",
        action="store",
        dest="forks_from",
        default="",
        help="Fill tests from and including the specified fork.",
    )
    fork_group.addoption(
        "--until",
        action="store",
        dest="forks_until",
        default="",
        help="Fill tests until and including the specified fork.",
    )

ForkCovariantParameter dataclass

Value list for a fork covariant parameter in a given fork.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
83
84
85
86
87
88
@dataclass(kw_only=True)
class ForkCovariantParameter:
    """Value list for a fork covariant parameter in a given fork."""

    names: List[str]
    values: List[ParameterSet]

ForkParametrizer

A parametrizer for a test case that is parametrized by the fork.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
class ForkParametrizer:
    """A parametrizer for a test case that is parametrized by the fork."""

    fork: Fork | TransitionFork
    fork_covariant_parameters: List[ForkCovariantParameter] = field(
        default_factory=list
    )

    def __init__(
        self,
        fork: Fork | TransitionFork,
        marks: List[pytest.MarkDecorator | pytest.Mark] | None = None,
        fork_covariant_parameters: List[ForkCovariantParameter] | None = None,
    ):
        """
        Initialize a new fork parametrizer object for a given fork.

        Args:
          fork: The fork for which the test cases will be parametrized.
          marks: A list of pytest marks to apply to all the test cases
                 parametrized by the fork.
          fork_covariant_parameters: A list of fork covariant parameters
                                     for the test case, for unit testing
                                     purposes only.

        """
        if marks is None:
            marks = []
        self.fork_covariant_parameters = [
            ForkCovariantParameter(
                names=["parametrized_fork"],
                values=[
                    pytest.param(
                        fork,
                        marks=marks,
                    )
                ],
            )
        ]
        if fork_covariant_parameters is not None:
            self.fork_covariant_parameters.extend(fork_covariant_parameters)
        self.fork = fork

    @property
    def argnames(self) -> List[str]:
        """Return the parameter names for the test case."""
        argnames = []
        for p in self.fork_covariant_parameters:
            argnames.extend(p.names)
        return argnames

    @property
    def argvalues(self) -> List[ParameterSet]:
        """Return the parameter values for the test case."""
        parameter_set_combinations = itertools.product(
            # Add the values for each parameter, all of them are lists of at
            # least one element.
            *[p.values for p in self.fork_covariant_parameters],
        )

        parameter_set_list: List[ParameterSet] = []
        for parameter_set_combination in parameter_set_combinations:
            params: List[Any] = []
            marks: List[pytest.Mark | pytest.MarkDecorator] = []
            test_id: str | None = None
            for p in parameter_set_combination:
                assert isinstance(p, ParameterSet)
                params.extend(p.values)
                if p.marks:
                    marks.extend(p.marks)
                if p.id:
                    if test_id is None:
                        test_id = f"fork_{self.fork.name()}-{p.id}"
                    else:
                        test_id = f"{test_id}-{p.id}"
            parameter_set_list.append(
                pytest.param(*params, marks=marks, id=test_id)
            )

        return parameter_set_list

__init__(fork, marks=None, fork_covariant_parameters=None)

Initialize a new fork parametrizer object for a given fork.

Parameters:

Name Type Description Default
fork Fork | TransitionFork

The fork for which the test cases will be parametrized.

required
marks List[MarkDecorator | Mark] | None

A list of pytest marks to apply to all the test cases parametrized by the fork.

None
fork_covariant_parameters List[ForkCovariantParameter] | None

A list of fork covariant parameters for the test case, for unit testing purposes only.

None
Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
def __init__(
    self,
    fork: Fork | TransitionFork,
    marks: List[pytest.MarkDecorator | pytest.Mark] | None = None,
    fork_covariant_parameters: List[ForkCovariantParameter] | None = None,
):
    """
    Initialize a new fork parametrizer object for a given fork.

    Args:
      fork: The fork for which the test cases will be parametrized.
      marks: A list of pytest marks to apply to all the test cases
             parametrized by the fork.
      fork_covariant_parameters: A list of fork covariant parameters
                                 for the test case, for unit testing
                                 purposes only.

    """
    if marks is None:
        marks = []
    self.fork_covariant_parameters = [
        ForkCovariantParameter(
            names=["parametrized_fork"],
            values=[
                pytest.param(
                    fork,
                    marks=marks,
                )
            ],
        )
    ]
    if fork_covariant_parameters is not None:
        self.fork_covariant_parameters.extend(fork_covariant_parameters)
    self.fork = fork

argnames property

Return the parameter names for the test case.

argvalues property

Return the parameter values for the test case.

CovariantDescriptor

A descriptor for a parameter that is covariant with the fork: the parametrized values change depending on the fork.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
class CovariantDescriptor:
    """
    A descriptor for a parameter that is covariant with the fork: the
    parametrized values change depending on the fork.
    """

    argnames: List[str] = []
    fn: Callable[[Fork | TransitionFork], List[Any] | Iterable[Any]] | None = (
        None
    )

    selector: Callable[..., bool] | None = None
    marks: (
        None
        | pytest.Mark
        | pytest.MarkDecorator
        | List[pytest.Mark | pytest.MarkDecorator]
    ) = None

    def __init__(
        self,
        argnames: List[str] | str,
        fn: Callable[[Fork | TransitionFork], List[Any] | Iterable[Any]]
        | None = None,
        *,
        selector: Callable[..., bool] | None = None,
        marks: None
        | pytest.Mark
        | pytest.MarkDecorator
        | List[pytest.Mark | pytest.MarkDecorator] = None,
    ):
        """
        Initialize a new covariant descriptor.

        Args:
          argnames: The names of the parameters that are covariant with the
                    fork.
          fn: A function that takes the fork as the single parameter and
              returns the values for the parameter for each fork.
          selector: A function that filters the values for the parameter.
          marks: A list of pytest marks to apply to the test cases
                 parametrized by the parameter.

        """
        self.argnames = (
            [argname.strip() for argname in argnames.split(",")]
            if isinstance(argnames, str)
            else argnames
        )
        self.fn = fn
        self.selector = selector
        self.marks = marks

    def process_value(
        self,
        parameters_values: Any | List[Any] | Tuple[Any] | ParameterSet,
    ) -> ParameterSet | None:
        """
        Process a value for a covariant parameter.

        The `selector` is applied to parameters_values in order to filter them.
        """
        if isinstance(parameters_values, ParameterSet):
            return parameters_values

        if len(self.argnames) == 1:
            # Wrap values that are meant for a single parameter in a list
            parameters_values = [parameters_values]
        marks = self.marks
        if self.selector is None or self.selector(
            *parameters_values[: self.selector.__code__.co_argcount]
        ):
            if isinstance(marks, FunctionType):
                marks = marks(*parameters_values[: marks.__code__.co_argcount])
            assert not isinstance(marks, FunctionType), (
                "marks must be a list or None"
            )
            if marks is None:
                marks = []
            elif not isinstance(marks, list):
                marks = [marks]

            return pytest.param(*parameters_values, marks=marks)

        return None

    def process_values(self, values: Iterable[Any]) -> List[ParameterSet]:
        """
        Filter the values for the covariant parameter.

        I.e. if the marker has an argument, the argument is interpreted as a
        lambda function that filters the values.
        """
        processed_values: List[ParameterSet] = []
        for value in values:
            processed_value = self.process_value(value)
            if processed_value is not None:
                processed_values.append(processed_value)
        return processed_values

    def add_values(self, fork_parametrizer: ForkParametrizer) -> None:
        """Add the values for the covariant parameter to the parametrizer."""
        if self.fn is None:
            return
        fork = fork_parametrizer.fork
        values = self.fn(fork)
        values = self.process_values(values)
        assert len(values) > 0
        fork_parametrizer.fork_covariant_parameters.append(
            ForkCovariantParameter(names=self.argnames, values=values)
        )

__init__(argnames, fn=None, *, selector=None, marks=None)

Initialize a new covariant descriptor.

Parameters:

Name Type Description Default
argnames List[str] | str

The names of the parameters that are covariant with the fork.

required
fn Callable[[Fork | TransitionFork], List[Any] | Iterable[Any]] | None

A function that takes the fork as the single parameter and returns the values for the parameter for each fork.

None
selector Callable[..., bool] | None

A function that filters the values for the parameter.

None
marks None | Mark | MarkDecorator | List[Mark | MarkDecorator]

A list of pytest marks to apply to the test cases parametrized by the parameter.

None
Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
def __init__(
    self,
    argnames: List[str] | str,
    fn: Callable[[Fork | TransitionFork], List[Any] | Iterable[Any]]
    | None = None,
    *,
    selector: Callable[..., bool] | None = None,
    marks: None
    | pytest.Mark
    | pytest.MarkDecorator
    | List[pytest.Mark | pytest.MarkDecorator] = None,
):
    """
    Initialize a new covariant descriptor.

    Args:
      argnames: The names of the parameters that are covariant with the
                fork.
      fn: A function that takes the fork as the single parameter and
          returns the values for the parameter for each fork.
      selector: A function that filters the values for the parameter.
      marks: A list of pytest marks to apply to the test cases
             parametrized by the parameter.

    """
    self.argnames = (
        [argname.strip() for argname in argnames.split(",")]
        if isinstance(argnames, str)
        else argnames
    )
    self.fn = fn
    self.selector = selector
    self.marks = marks

process_value(parameters_values)

Process a value for a covariant parameter.

The selector is applied to parameters_values in order to filter them.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
def process_value(
    self,
    parameters_values: Any | List[Any] | Tuple[Any] | ParameterSet,
) -> ParameterSet | None:
    """
    Process a value for a covariant parameter.

    The `selector` is applied to parameters_values in order to filter them.
    """
    if isinstance(parameters_values, ParameterSet):
        return parameters_values

    if len(self.argnames) == 1:
        # Wrap values that are meant for a single parameter in a list
        parameters_values = [parameters_values]
    marks = self.marks
    if self.selector is None or self.selector(
        *parameters_values[: self.selector.__code__.co_argcount]
    ):
        if isinstance(marks, FunctionType):
            marks = marks(*parameters_values[: marks.__code__.co_argcount])
        assert not isinstance(marks, FunctionType), (
            "marks must be a list or None"
        )
        if marks is None:
            marks = []
        elif not isinstance(marks, list):
            marks = [marks]

        return pytest.param(*parameters_values, marks=marks)

    return None

process_values(values)

Filter the values for the covariant parameter.

I.e. if the marker has an argument, the argument is interpreted as a lambda function that filters the values.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
259
260
261
262
263
264
265
266
267
268
269
270
271
def process_values(self, values: Iterable[Any]) -> List[ParameterSet]:
    """
    Filter the values for the covariant parameter.

    I.e. if the marker has an argument, the argument is interpreted as a
    lambda function that filters the values.
    """
    processed_values: List[ParameterSet] = []
    for value in values:
        processed_value = self.process_value(value)
        if processed_value is not None:
            processed_values.append(processed_value)
    return processed_values

add_values(fork_parametrizer)

Add the values for the covariant parameter to the parametrizer.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
273
274
275
276
277
278
279
280
281
282
283
def add_values(self, fork_parametrizer: ForkParametrizer) -> None:
    """Add the values for the covariant parameter to the parametrizer."""
    if self.fn is None:
        return
    fork = fork_parametrizer.fork
    values = self.fn(fork)
    values = self.process_values(values)
    assert len(values) > 0
    fork_parametrizer.fork_covariant_parameters.append(
        ForkCovariantParameter(names=self.argnames, values=values)
    )

CovariantDecorator

Bases: CovariantDescriptor

A marker used to parametrize a function by a covariant parameter with the values returned by a fork method.

The decorator must be subclassed with the appropriate class variables before initialization.

Attributes:

Name Type Description
marker_name str

Name of the marker.

description str

Description of the marker.

fork_attribute_name str

Name of the method to call on the fork to get the values.

marker_parameter_names List[str]

Names of the parameters to be parametrized in the test function.

indirect bool

Whether the parameters should be passed through fixtures (indirect parametrization).

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
class CovariantDecorator(CovariantDescriptor):
    """
    A marker used to parametrize a function by a covariant parameter with the
    values returned by a fork method.

    The decorator must be subclassed with the appropriate class variables
    before initialization.

    Attributes:
      marker_name: Name of the marker.
      description: Description of the marker.
      fork_attribute_name: Name of the method to call on the fork to
                           get the values.
      marker_parameter_names: Names of the parameters to be parametrized
                              in the test function.
      indirect: Whether the parameters should be passed through fixtures
                (indirect parametrization).

    """

    marker_name: ClassVar[str]
    description: ClassVar[str]
    fork_attribute_name: ClassVar[str]
    marker_parameter_names: ClassVar[List[str]]
    indirect: ClassVar[bool]

    def __init__(self, metafunc: Metafunc):
        """
        Initialize the covariant decorator.

        The decorator must already be subclassed with the appropriate class
        variables before initialization.

        Args:
            metafunc: The metafunc object that pytest uses when generating
                tests.

        """
        self.metafunc = metafunc

        m = metafunc.definition.iter_markers(self.marker_name)
        if m is None:
            return
        marker_list = list(m)
        assert len(marker_list) <= 1, (
            f"Multiple markers {self.marker_name} found"
        )
        if len(marker_list) == 0:
            return
        marker = marker_list[0]

        assert marker is not None
        assert len(marker.args) == 0, "Only keyword arguments are supported"

        kwargs = dict(marker.kwargs)

        selector = kwargs.pop("selector", lambda _: True)
        assert isinstance(selector, FunctionType), (
            "selector must be a function"
        )

        marks = kwargs.pop("marks", None)

        if len(kwargs) > 0:
            raise ValueError(
                f"Unknown arguments to {self.marker_name}: {kwargs}"
            )

        def fn(fork: Fork | TransitionFork) -> List[Any]:
            if fork.is_transition_fork:
                raise ValueError(f"Transition forks are not supported: {fork}")
            return getattr(fork, self.fork_attribute_name)()

        super().__init__(
            argnames=self.marker_parameter_names,
            fn=fn,
            selector=selector,
            marks=marks,
        )

__init__(metafunc)

Initialize the covariant decorator.

The decorator must already be subclassed with the appropriate class variables before initialization.

Parameters:

Name Type Description Default
metafunc Metafunc

The metafunc object that pytest uses when generating tests.

required
Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
def __init__(self, metafunc: Metafunc):
    """
    Initialize the covariant decorator.

    The decorator must already be subclassed with the appropriate class
    variables before initialization.

    Args:
        metafunc: The metafunc object that pytest uses when generating
            tests.

    """
    self.metafunc = metafunc

    m = metafunc.definition.iter_markers(self.marker_name)
    if m is None:
        return
    marker_list = list(m)
    assert len(marker_list) <= 1, (
        f"Multiple markers {self.marker_name} found"
    )
    if len(marker_list) == 0:
        return
    marker = marker_list[0]

    assert marker is not None
    assert len(marker.args) == 0, "Only keyword arguments are supported"

    kwargs = dict(marker.kwargs)

    selector = kwargs.pop("selector", lambda _: True)
    assert isinstance(selector, FunctionType), (
        "selector must be a function"
    )

    marks = kwargs.pop("marks", None)

    if len(kwargs) > 0:
        raise ValueError(
            f"Unknown arguments to {self.marker_name}: {kwargs}"
        )

    def fn(fork: Fork | TransitionFork) -> List[Any]:
        if fork.is_transition_fork:
            raise ValueError(f"Transition forks are not supported: {fork}")
        return getattr(fork, self.fork_attribute_name)()

    super().__init__(
        argnames=self.marker_parameter_names,
        fn=fn,
        selector=selector,
        marks=marks,
    )

covariant_decorator(*, marker_name, description, fork_attribute_name, argnames, indirect=False)

Generate a new covariant decorator subclass.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
def covariant_decorator(
    *,
    marker_name: str,
    description: str,
    fork_attribute_name: str,
    argnames: List[str],
    indirect: bool = False,
) -> Type[CovariantDecorator]:
    """Generate a new covariant decorator subclass."""
    return type(
        marker_name,
        (CovariantDecorator,),
        {
            "marker_name": marker_name,
            "description": description,
            "fork_attribute_name": fork_attribute_name,
            "marker_parameter_names": argnames,
            "indirect": indirect,
        },
    )

pytest_configure(config)

Register the plugin's custom markers and process command-line options.

Custom marker registration: https://docs.pytest.org/en/7.1.x/how-to/ writing_plugins.html#registering-custom-markers

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
def pytest_configure(config: pytest.Config) -> None:
    """
    Register the plugin's custom markers and process command-line options.

    Custom marker registration:
    https://docs.pytest.org/en/7.1.x/how-to/
    writing_plugins.html#registering-custom-markers
    """
    config.addinivalue_line(
        "markers",
        (
            "valid_at_transition_to(fork, subsequent_forks: bool = False, "
            "until: str | None = None): specifies a test case is only valid "
            "at the specified fork transition boundaries"
        ),
    )
    config.addinivalue_line(
        "markers",
        "valid_from(fork): specifies from which fork a test case is valid",
    )
    config.addinivalue_line(
        "markers",
        (
            "valid_until(fork): specifies until which fork a test "
            "case is valid (inclusive)"
        ),
    )
    config.addinivalue_line(
        "markers",
        (
            "valid_before(fork_or_eip): specifies the fork or EIP "
            "before which a test case is valid (exclusive)"
        ),
    )
    config.addinivalue_line(
        "markers",
        "valid_at(fork): specifies at which fork a test case is valid",
    )
    config.addinivalue_line(
        "markers",
        (
            "parametrize_by_fork(names, values_fn): parametrize a test case "
            "by fork using the specified names and values returned by the "
            "function values_fn(fork)"
        ),
    )
    config.addinivalue_line(
        "markers",
        (
            "filter_combinations(predicate, *, reason): deselect "
            "parametrized test cases whose parameter combination does not "
            "satisfy predicate. The predicate receives parameter values as "
            "keyword arguments and must return True to keep the "
            "combination. *reason* is a short human-readable explanation "
            "shown in verbose collection output."
        ),
    )
    for d in fork_covariant_decorators:
        config.addinivalue_line("markers", f"{d.marker_name}: {d.description}")

    available_forks_help = textwrap.dedent(
        f"""\
        Available forks:
        {", ".join(fork.name() for fork in ALL_FORKS)}
        """
    )
    available_forks_help += textwrap.dedent(
        f"""\
        Available transition forks:
        {", ".join([fork.name() for fork in get_transition_forks()])}
        """
    )

    def get_fork_option(
        config: pytest.Config, option_name: str, parameter_name: str
    ) -> Set[Fork]:
        """Post-process get option to allow for external fork conditions."""
        config_str = config.getoption(option_name)
        try:
            return ForkSetAdapter.validate_python(config_str)
        except InvalidForkError:
            print(
                f"Error: Unsupported fork provided to {parameter_name}:",
                config_str,
                "\n",
                file=sys.stderr,
            )
            print(available_forks_help, file=sys.stderr)
            pytest.exit(
                "Invalid command-line options.",
                returncode=pytest.ExitCode.USAGE_ERROR,
            )

    single_fork = get_fork_option(config, "single_fork", "--fork")
    forks_from = get_fork_option(config, "forks_from", "--from")
    forks_until = get_fork_option(config, "forks_until", "--until")
    show_fork_help = config.getoption("show_fork_help")

    dev_forks_help = textwrap.dedent(
        "To run tests for a fork under active development, it must be "
        "specified explicitly via --until=FORK.\n"
        "Tests are only run for deployed mainnet forks by default, i.e., "
        f"until {get_deployed_forks()[-1].name()}.\n"
    )
    if show_fork_help:
        print(available_forks_help)
        print(dev_forks_help)
        pytest.exit("After displaying help.", returncode=0)

    if single_fork and (forks_from or forks_until):
        print(
            "Error: --fork cannot be used in combination "
            "with --from or --until",
            file=sys.stderr,
        )
        pytest.exit(
            "Invalid command-line options.",
            returncode=pytest.ExitCode.USAGE_ERROR,
        )

    transition_forks = not getattr(config, "skip_transition_forks", False)

    selected_fork_set = get_selected_fork_set(
        single_fork=single_fork,
        forks_from=forks_from,
        forks_until=forks_until,
        transition_forks=transition_forks,
    )
    if (
        getattr(config, "single_fork_mode", False)
        and len(selected_fork_set) != 1
    ):
        fork_count = len(selected_fork_set)
        pytest.exit(
            f"""
            Expected exactly one fork to be specified, got {fork_count}
            ({selected_fork_set}).
            Make sure to specify exactly one fork using the --fork
            command line argument.
            """,
            returncode=pytest.ExitCode.USAGE_ERROR,
        )
    config.selected_fork_set = selected_fork_set  # type: ignore

    if not selected_fork_set:
        print(
            f"Error: --from {','.join(fork.name() for fork in forks_from)} "
            f"--until {','.join(fork.name() for fork in forks_until)} "
            "creates an empty fork range.",
            file=sys.stderr,
        )
        pytest.exit(
            "Command-line options produce empty fork range.",
            returncode=pytest.ExitCode.USAGE_ERROR,
        )

    config.unsupported_forks: Set[Fork | TransitionFork] = set()  # type: ignore
    t8n: TransitionTool | None = getattr(config, "t8n", None)
    if t8n:
        config.unsupported_forks = frozenset(  # type: ignore
            fork
            for fork in selected_fork_set
            if not t8n.is_fork_supported(fork.transitions_from())
            or not t8n.is_fork_supported(fork.transitions_to())
        )
        logger.debug(
            f"List of unsupported forks: {list(config.unsupported_forks)}"  # type: ignore
        )

pytest_report_header(config, start_path)

Pytest hook called to obtain the report header.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
@pytest.hookimpl(trylast=True)
def pytest_report_header(config: pytest.Config, start_path: Any) -> List[str]:
    """Pytest hook called to obtain the report header."""
    del start_path

    bold = "\033[1m"
    warning = "\033[93m"
    reset = "\033[39;49m"
    selected_fork_set: Set[Fork | TransitionFork] = config.selected_fork_set  # type: ignore[attr-defined]
    header = [
        (
            bold
            + "Generating fixtures for: "
            + ", ".join([f.name() for f in sorted(selected_fork_set)])
            + reset
        ),
    ]
    unsupported_forks: Set[Fork | TransitionFork] = config.unsupported_forks  # type: ignore[attr-defined]
    if unsupported_forks:
        t8n_name = config.t8n.__class__.__name__  # type: ignore[attr-defined]
        excluded = ", ".join(f.name() for f in sorted(unsupported_forks))
        header += [
            (
                bold
                + warning
                + f"Unsupported forks excluded from {t8n_name}: "
                + excluded
                + "."
                + reset
            )
        ]
    if all(fork.is_deployed() for fork in selected_fork_set):
        header += [
            (
                bold
                + warning
                + "Only generating fixtures with stable/deployed forks: "
                "Specify an upcoming fork via --until=fork to "
                "add forks under development." + reset
            )
        ]
    return header

parametrized_fork(request)

Parametrize test cases by fork.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
673
674
675
676
@pytest.fixture(autouse=True)
def parametrized_fork(request: pytest.FixtureRequest) -> None:
    """Parametrize test cases by fork."""
    pass

session_fork(request)

Session-wide fork object used if the plugin is configured in single-fork mode.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
@pytest.fixture(scope="session")
def session_fork(
    request: pytest.FixtureRequest,
) -> Fork | TransitionFork | None:
    """
    Session-wide fork object used if the plugin is configured in single-fork
    mode.
    """
    if (
        hasattr(request.config, "single_fork_mode")
        and request.config.single_fork_mode
    ):
        return list(request.config.selected_fork_set)[0]  # type: ignore
    raise AssertionError(
        "Plugin used `session_fork` fixture without the correct "
        "configuration (single_fork_mode)."
    )

ValidityMarker dataclass

Bases: ABC

Abstract class to represent any fork validity marker.

Subclassing this class allows for the creation of new validity markers.

Instantiation must be done per test function, and the process method must be called to process the fork arguments.

When subclassing, the following optional parameters can be set: - marker_name: Name of the marker, if not set, the class name is converted to underscore. - mutually_exclusive: List of other marker types incompatible with this one. - flag: Whether the marker is a flag and should always be included.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
@dataclass(kw_only=True)
class ValidityMarker(ABC):
    """
    Abstract class to represent any fork validity marker.

    Subclassing this class allows for the creation of new validity markers.

    Instantiation must be done per test function, and the `process` method must
    be called to process the fork arguments.

    When subclassing, the following optional parameters can be set:
    - marker_name: Name of the marker, if not set, the class name is
                   converted to underscore.
    - mutually_exclusive: List of other marker types incompatible
                          with this one.
    - flag: Whether the marker is a flag and should always be included.
    """

    marker_name: ClassVar[str]
    mutually_exclusive: ClassVar[List[Type["ValidityMarker"]]]
    flag: ClassVar[bool]

    mark: Mark | None

    class ValidityMarkerCombinationError(Exception):
        """
        Combination of two validity markers generates an empty fork range.
        """

        pass

    def __init_subclass__(
        cls,
        marker_name: str | None = None,
        mutually_exclusive: List[Type["ValidityMarker"]] | None = None,
        flag: bool = False,
        **kwargs: Any,
    ) -> None:
        """Register the validity marker subclass."""
        super().__init_subclass__(**kwargs)
        if marker_name is None:
            # Use the class name converted to underscore:
            # https://stackoverflow.com/a/1176023
            marker_name = MARKER_NAME_REGEX.sub("_", cls.__name__).lower()
        cls.marker_name = marker_name
        cls.mutually_exclusive = (
            mutually_exclusive if mutually_exclusive else []
        )
        cls.flag = flag
        if marker_name in ALL_VALIDITY_MARKERS:
            raise ValueError(f"Duplicate validity marker class: {cls}")
        ALL_VALIDITY_MARKERS[marker_name] = cls

    def __post_init__(self) -> None:
        """Post-initialize the validity marker."""
        if self.flag:
            return
        if self.mark is None:
            raise Exception(f"Marker error '{self.marker_name}'")
        if len(self.mark.args) == 0:
            raise Exception(
                f"Missing fork argument with '{self.marker_name}' marker"
            )

    def process_fork_arguments(
        self, *fork_args: str
    ) -> Set[Fork | TransitionFork]:
        """Process the fork arguments."""
        fork_eips_set = ForkEIPSetAdapter.validate_python(fork_args)
        if len(fork_eips_set) != len(fork_args):
            raise Exception(
                f"Duplicate argument specified in '{self.marker_name}'"
            )
        forks_set: Set[Fork | TransitionFork] = set()
        for fork_eip in fork_eips_set:
            if fork_eip.is_transition_fork:
                forks_set.add(fork_eip)
            else:
                if not fork_eip.is_eip():
                    forks_set.add(fork_eip)
                else:
                    forks_set |= fork_eip.enabling_forks()
        return forks_set

    @staticmethod
    def get_all_validity_markers(
        markers: Iterator[pytest.Mark],
    ) -> List["ValidityMarker"]:
        """Get all the validity markers applied to the test function."""
        markers_dict: Dict[str, ValidityMarker] = {}
        for marker in markers:
            for marker_name in ALL_VALIDITY_MARKERS:
                if marker.name == marker_name:
                    if marker_name in markers_dict:
                        raise Exception(
                            f"Too many '{marker_name}' markers applied to test"
                        )
                    cls = ALL_VALIDITY_MARKERS[marker.name]
                    markers_dict[marker_name] = cls(mark=marker)

        for cls in ALL_VALIDITY_MARKERS.values():
            if cls.flag and cls.marker_name not in markers_dict:
                markers_dict[cls.marker_name] = cls(mark=None)

        for marker_name, validity_marker in markers_dict.items():
            for incompatible_marker in validity_marker.mutually_exclusive:
                if incompatible_marker.marker_name in markers_dict:
                    raise Exception(
                        f"The markers '{incompatible_marker.marker_name}' and "
                        f"'{marker_name}' can't be combined. "
                    )
        return list(markers_dict.values())

    @staticmethod
    def get_test_fork_set(
        validity_markers: List["ValidityMarker"],
    ) -> Set[Fork | TransitionFork]:
        """
        Get the set of forks where a test is valid from the validity markers
        and filters.
        """
        test_fork_set: Set[Fork | TransitionFork] = set()
        if not len(
            [
                validity_marker
                for validity_marker in validity_markers
                if not validity_marker.flag
            ]
        ):
            # Limit to non-transition forks if no validity markers were applied
            test_fork_set |= set(ALL_FORKS)
        else:
            # Start with all forks and transitions if any validity markers were
            # applied
            test_fork_set |= set(ALL_FORKS_WITH_TRANSITIONS)

        for v in validity_markers:
            # Apply the validity markers to the test function if applicable
            test_fork_set = v.process(test_fork_set)

        return test_fork_set

    @staticmethod
    def get_test_fork_set_from_markers(
        markers: Iterator[pytest.Mark],
    ) -> Set[Fork | TransitionFork]:
        """
        Get the set of forks where a test is valid using the markers applied to
        the test.
        """
        return ValidityMarker.get_test_fork_set(
            ValidityMarker.get_all_validity_markers(markers)
        )

    @staticmethod
    def get_test_fork_set_from_metafunc(
        metafunc: Metafunc,
    ) -> Set[Fork | TransitionFork]:
        """
        Get the set of forks where a test is valid using its pytest
        meta-function.
        """
        return ValidityMarker.get_test_fork_set_from_markers(
            metafunc.definition.iter_markers()
        )

    @staticmethod
    def is_validity_or_filter_marker(name: str) -> bool:
        """Check if a marker is a validity or filter marker."""
        return name in ALL_VALIDITY_MARKERS

    def process(
        self, forks: Set[Fork | TransitionFork]
    ) -> Set[Fork | TransitionFork]:
        """Process the fork arguments."""
        if self.mark is None:
            fork_set = self._process_with_marker_args()
        else:
            fork_set = self._process_with_marker_args(
                *self.mark.args, **self.mark.kwargs
            )
        if self.flag:
            return forks - fork_set
        if not fork_set:
            # Test is marked for an EIP that is not yet enabled in any
            # fork.
            return fork_set
        resulting_set = forks & fork_set
        if not resulting_set:
            raise ValidityMarker.ValidityMarkerCombinationError()
        return resulting_set

    @abstractmethod
    def _process_with_marker_args(
        self, *args: Any, **kwargs: Any
    ) -> Set[Fork | TransitionFork]:
        """
        Process the fork arguments as specified for the marker.

        Method must be implemented by the subclass.

        If the validity marker is of flag type, the returned forks will be
        subtracted from the fork set, otherwise the returned forks will be
        intersected with the current set.
        """
        pass

ValidityMarkerCombinationError

Bases: Exception

Combination of two validity markers generates an empty fork range.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
726
727
728
729
730
731
class ValidityMarkerCombinationError(Exception):
    """
    Combination of two validity markers generates an empty fork range.
    """

    pass

__init_subclass__(marker_name=None, mutually_exclusive=None, flag=False, **kwargs)

Register the validity marker subclass.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
def __init_subclass__(
    cls,
    marker_name: str | None = None,
    mutually_exclusive: List[Type["ValidityMarker"]] | None = None,
    flag: bool = False,
    **kwargs: Any,
) -> None:
    """Register the validity marker subclass."""
    super().__init_subclass__(**kwargs)
    if marker_name is None:
        # Use the class name converted to underscore:
        # https://stackoverflow.com/a/1176023
        marker_name = MARKER_NAME_REGEX.sub("_", cls.__name__).lower()
    cls.marker_name = marker_name
    cls.mutually_exclusive = (
        mutually_exclusive if mutually_exclusive else []
    )
    cls.flag = flag
    if marker_name in ALL_VALIDITY_MARKERS:
        raise ValueError(f"Duplicate validity marker class: {cls}")
    ALL_VALIDITY_MARKERS[marker_name] = cls

__post_init__()

Post-initialize the validity marker.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
755
756
757
758
759
760
761
762
763
764
def __post_init__(self) -> None:
    """Post-initialize the validity marker."""
    if self.flag:
        return
    if self.mark is None:
        raise Exception(f"Marker error '{self.marker_name}'")
    if len(self.mark.args) == 0:
        raise Exception(
            f"Missing fork argument with '{self.marker_name}' marker"
        )

process_fork_arguments(*fork_args)

Process the fork arguments.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
def process_fork_arguments(
    self, *fork_args: str
) -> Set[Fork | TransitionFork]:
    """Process the fork arguments."""
    fork_eips_set = ForkEIPSetAdapter.validate_python(fork_args)
    if len(fork_eips_set) != len(fork_args):
        raise Exception(
            f"Duplicate argument specified in '{self.marker_name}'"
        )
    forks_set: Set[Fork | TransitionFork] = set()
    for fork_eip in fork_eips_set:
        if fork_eip.is_transition_fork:
            forks_set.add(fork_eip)
        else:
            if not fork_eip.is_eip():
                forks_set.add(fork_eip)
            else:
                forks_set |= fork_eip.enabling_forks()
    return forks_set

get_all_validity_markers(markers) staticmethod

Get all the validity markers applied to the test function.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
@staticmethod
def get_all_validity_markers(
    markers: Iterator[pytest.Mark],
) -> List["ValidityMarker"]:
    """Get all the validity markers applied to the test function."""
    markers_dict: Dict[str, ValidityMarker] = {}
    for marker in markers:
        for marker_name in ALL_VALIDITY_MARKERS:
            if marker.name == marker_name:
                if marker_name in markers_dict:
                    raise Exception(
                        f"Too many '{marker_name}' markers applied to test"
                    )
                cls = ALL_VALIDITY_MARKERS[marker.name]
                markers_dict[marker_name] = cls(mark=marker)

    for cls in ALL_VALIDITY_MARKERS.values():
        if cls.flag and cls.marker_name not in markers_dict:
            markers_dict[cls.marker_name] = cls(mark=None)

    for marker_name, validity_marker in markers_dict.items():
        for incompatible_marker in validity_marker.mutually_exclusive:
            if incompatible_marker.marker_name in markers_dict:
                raise Exception(
                    f"The markers '{incompatible_marker.marker_name}' and "
                    f"'{marker_name}' can't be combined. "
                )
    return list(markers_dict.values())

get_test_fork_set(validity_markers) staticmethod

Get the set of forks where a test is valid from the validity markers and filters.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
@staticmethod
def get_test_fork_set(
    validity_markers: List["ValidityMarker"],
) -> Set[Fork | TransitionFork]:
    """
    Get the set of forks where a test is valid from the validity markers
    and filters.
    """
    test_fork_set: Set[Fork | TransitionFork] = set()
    if not len(
        [
            validity_marker
            for validity_marker in validity_markers
            if not validity_marker.flag
        ]
    ):
        # Limit to non-transition forks if no validity markers were applied
        test_fork_set |= set(ALL_FORKS)
    else:
        # Start with all forks and transitions if any validity markers were
        # applied
        test_fork_set |= set(ALL_FORKS_WITH_TRANSITIONS)

    for v in validity_markers:
        # Apply the validity markers to the test function if applicable
        test_fork_set = v.process(test_fork_set)

    return test_fork_set

get_test_fork_set_from_markers(markers) staticmethod

Get the set of forks where a test is valid using the markers applied to the test.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
844
845
846
847
848
849
850
851
852
853
854
@staticmethod
def get_test_fork_set_from_markers(
    markers: Iterator[pytest.Mark],
) -> Set[Fork | TransitionFork]:
    """
    Get the set of forks where a test is valid using the markers applied to
    the test.
    """
    return ValidityMarker.get_test_fork_set(
        ValidityMarker.get_all_validity_markers(markers)
    )

get_test_fork_set_from_metafunc(metafunc) staticmethod

Get the set of forks where a test is valid using its pytest meta-function.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
856
857
858
859
860
861
862
863
864
865
866
@staticmethod
def get_test_fork_set_from_metafunc(
    metafunc: Metafunc,
) -> Set[Fork | TransitionFork]:
    """
    Get the set of forks where a test is valid using its pytest
    meta-function.
    """
    return ValidityMarker.get_test_fork_set_from_markers(
        metafunc.definition.iter_markers()
    )

is_validity_or_filter_marker(name) staticmethod

Check if a marker is a validity or filter marker.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
868
869
870
871
@staticmethod
def is_validity_or_filter_marker(name: str) -> bool:
    """Check if a marker is a validity or filter marker."""
    return name in ALL_VALIDITY_MARKERS

process(forks)

Process the fork arguments.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
def process(
    self, forks: Set[Fork | TransitionFork]
) -> Set[Fork | TransitionFork]:
    """Process the fork arguments."""
    if self.mark is None:
        fork_set = self._process_with_marker_args()
    else:
        fork_set = self._process_with_marker_args(
            *self.mark.args, **self.mark.kwargs
        )
    if self.flag:
        return forks - fork_set
    if not fork_set:
        # Test is marked for an EIP that is not yet enabled in any
        # fork.
        return fork_set
    resulting_set = forks & fork_set
    if not resulting_set:
        raise ValidityMarker.ValidityMarkerCombinationError()
    return resulting_set

ValidFrom dataclass

Bases: ValidityMarker

Marker used to specify the fork from which the test is valid. The test will not be filled for forks before the specified fork.

import pytest

from execution_testing import  Alloc, StateTestFiller

@pytest.mark.valid_from("London")
def test_something_only_valid_after_london(
    state_test: StateTestFiller,
    pre: Alloc
):
    pass

In this example, the test will only be filled for the London fork and after, e.g. London, Paris, Shanghai, Cancun, etc.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
class ValidFrom(ValidityMarker):
    """
    Marker used to specify the fork from which the test is valid. The test will
    not be filled for forks before the specified fork.

    ```python
    import pytest

    from execution_testing import  Alloc, StateTestFiller

    @pytest.mark.valid_from("London")
    def test_something_only_valid_after_london(
        state_test: StateTestFiller,
        pre: Alloc
    ):
        pass
    ```

    In this example, the test will only be filled for the London fork and
    after, e.g. London, Paris, Shanghai, Cancun, etc.
    """

    def _process_with_marker_args(
        self, *fork_args: str
    ) -> Set[Fork | TransitionFork]:
        """Process the fork arguments."""
        forks: Set[Fork | TransitionFork] = self.process_fork_arguments(
            *fork_args
        )
        resulting_set: Set[Fork | TransitionFork] = set()
        for fork in forks:
            resulting_set |= {f for f in ALL_FORKS if f >= fork}
        return resulting_set

ValidUntil dataclass

Bases: ValidityMarker

Marker to specify the fork until which the test is valid. The test will not be filled for forks after the specified fork.

import pytest

from execution_testing import  Alloc, StateTestFiller

@pytest.mark.valid_until("London")
def test_something_only_valid_until_london(
    state_test: StateTestFiller,
    pre: Alloc
):
    pass

In this example, the test will only be filled for the London fork and before, e.g. London, Berlin, Istanbul, etc.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
class ValidUntil(ValidityMarker):
    """
    Marker to specify the fork until which the test is valid. The test will not
    be filled for forks after the specified fork.

    ```python
    import pytest

    from execution_testing import  Alloc, StateTestFiller

    @pytest.mark.valid_until("London")
    def test_something_only_valid_until_london(
        state_test: StateTestFiller,
        pre: Alloc
    ):
        pass
    ```

    In this example, the test will only be filled for the London fork and
    before, e.g. London, Berlin, Istanbul, etc.
    """

    def _process_with_marker_args(
        self, *fork_args: str
    ) -> Set[Fork | TransitionFork]:
        """Process the fork arguments."""
        forks: Set[Fork | TransitionFork] = self.process_fork_arguments(
            *fork_args
        )
        resulting_set: Set[Fork | TransitionFork] = set()
        for fork in forks:
            resulting_set |= {f for f in ALL_FORKS if f <= fork}
        return resulting_set

ValidBefore dataclass

Bases: ValidityMarker

Marker to specify the fork or EIP before which the test is valid.

The test will be filled for all forks strictly before the specified fork — the fork itself is excluded.

valid_before vs valid_until:

  • valid_until("Prague") — inclusive: runs through Prague.
  • valid_before("EIP7825") — exclusive: runs up to but not at the point where EIP-7825 activates.
import pytest

from execution_testing import  Alloc, StateTestFiller

@pytest.mark.valid_before("EIP7825")
def test_something_only_valid_before_eip7825(
    state_test: StateTestFiller,
    pre: Alloc
):
    pass

In this example, the test will only be filled for forks where EIP-7825 is not yet active.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
 980
 981
 982
 983
 984
 985
 986
 987
 988
 989
 990
 991
 992
 993
 994
 995
 996
 997
 998
 999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
class ValidBefore(ValidityMarker, mutually_exclusive=[ValidUntil]):
    """
    Marker to specify the fork or EIP before which the test is valid.

    The test will be filled for all forks strictly before the specified
    fork — the fork itself is **excluded**.

    ``valid_before`` vs ``valid_until``:

    - ``valid_until("Prague")`` — inclusive: runs *through* Prague.
    - ``valid_before("EIP7825")`` — exclusive: runs up to but *not at*
      the point where EIP-7825 activates.

    ```python
    import pytest

    from execution_testing import  Alloc, StateTestFiller

    @pytest.mark.valid_before("EIP7825")
    def test_something_only_valid_before_eip7825(
        state_test: StateTestFiller,
        pre: Alloc
    ):
        pass
    ```

    In this example, the test will only be filled for forks where
    EIP-7825 is not yet active.
    """

    def _process_with_marker_args(
        self, *fork_args: str
    ) -> Set[Fork | TransitionFork]:
        """Process the fork arguments."""
        forks: Set[Fork | TransitionFork] = self.process_fork_arguments(
            *fork_args
        )
        resulting_set: Set[Fork | TransitionFork] = set()
        for fork in forks:
            resulting_set |= {f for f in ALL_FORKS if f < fork}
        return resulting_set

ValidAt dataclass

Bases: ValidityMarker

Marker to specify each fork individually for which the test is valid.

import pytest

from execution_testing import  Alloc, StateTestFiller

@pytest.mark.valid_at("London", "Cancun")
def test_something_only_valid_at_london_and_cancun(
    state_test: StateTestFiller,
    pre: Alloc
):
    pass

In this example, the test will only be filled for the London and Cancun forks.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
class ValidAt(ValidityMarker):
    """
    Marker to specify each fork individually for which the test is valid.

    ```python
    import pytest

    from execution_testing import  Alloc, StateTestFiller

    @pytest.mark.valid_at("London", "Cancun")
    def test_something_only_valid_at_london_and_cancun(
        state_test: StateTestFiller,
        pre: Alloc
    ):
        pass
    ```

    In this example, the test will only be filled for the London and Cancun
    forks.
    """

    def _process_with_marker_args(
        self, *fork_args: str
    ) -> Set[Fork | TransitionFork]:
        """Process the fork arguments."""
        return self.process_fork_arguments(*fork_args)

ValidAtTransitionTo dataclass

Bases: ValidityMarker

Marker to specify that a test is only meant to be filled at the transition to the specified fork.

The test usually starts at the fork prior to the specified fork at genesis and at block 5 (for pre-merge forks) or at timestamp 15,000 (for post-merge forks) the fork transition occurs.

import pytest

from execution_testing import  Alloc, BlockchainTestFiller

@pytest.mark.valid_at_transition_to("London")
def test_something_that_happens_during_the_fork_transition_to_london(
    blockchain_test: BlockchainTestFiller,
    pre: Alloc
):
    pass

In this example, the test will only be filled for the fork that transitions to London at block number 5, BerlinToLondonAt5, and no other forks.

To see or add a new transition fork, see the execution_testing.forks.forks.transition module.

Note that the test uses a BlockchainTestFiller fixture instead of a StateTestFiller, as the transition forks are used to test changes throughout the blockchain progression, and not just the state change of a single transaction.

This marker also accepts the following keyword arguments:

  • subsequent_transitions: Force the test to also fill for subsequent fork transitions.
  • until: Implies subsequent_transitions and puts a limit on which transition fork will the test filling will be limited to.

For example:

@pytest.mark.valid_at_transition_to("Cancun", subsequent_transitions=True)

produces tests on ShanghaiToCancunAtTime15k and CancunToPragueAtTime15k, and any transition fork after that.

And:

@pytest.mark.valid_at_transition_to("Cancun",
subsequent_transitions=True, until="Prague")

produces tests on ShanghaiToCancunAtTime15k and CancunToPragueAtTime15k, but no forks after Prague.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
class ValidAtTransitionTo(
    ValidityMarker,
    mutually_exclusive=[ValidAt, ValidFrom, ValidUntil, ValidBefore],
):
    """
    Marker to specify that a test is only meant to be filled at the transition
    to the specified fork.

    The test usually starts at the fork prior to the specified fork at genesis
    and at block 5 (for pre-merge forks) or at timestamp 15,000 (for post-merge
    forks) the fork transition occurs.

    ```python
    import pytest

    from execution_testing import  Alloc, BlockchainTestFiller

    @pytest.mark.valid_at_transition_to("London")
    def test_something_that_happens_during_the_fork_transition_to_london(
        blockchain_test: BlockchainTestFiller,
        pre: Alloc
    ):
        pass
    ```

    In this example, the test will only be filled for the fork that transitions
    to London at block number 5, `BerlinToLondonAt5`, and no other forks.

    To see or add a new transition fork, see the
    `execution_testing.forks.forks.transition` module.

    Note that the test uses a `BlockchainTestFiller` fixture instead of a
    `StateTestFiller`, as the transition forks are used to test changes
    throughout the blockchain progression, and not just the state change of a
    single transaction.

    This marker also accepts the following keyword arguments:

    - `subsequent_transitions`: Force the test to also fill for subsequent fork
    transitions.
    - `until`: Implies `subsequent_transitions` and puts a limit
    on which transition fork will the test filling will be limited to.

    For example:
    ```python
    @pytest.mark.valid_at_transition_to("Cancun", subsequent_transitions=True)
    ```

    produces tests on `ShanghaiToCancunAtTime15k` and
    `CancunToPragueAtTime15k`, and any transition fork after that.

    And:
    ```python
    @pytest.mark.valid_at_transition_to("Cancun",
    subsequent_transitions=True, until="Prague")
    ```

    produces tests on `ShanghaiToCancunAtTime15k` and
    `CancunToPragueAtTime15k`, but no forks after Prague.
    """

    def _process_with_marker_args(
        self,
        *fork_args: str,
        subsequent_forks: bool = False,
        until: str | None = None,
    ) -> Set[Fork | TransitionFork]:
        """Process the fork arguments."""
        forks: Set[Fork | TransitionFork] = self.process_fork_arguments(
            *fork_args
        )
        until_forks: Set[Fork | TransitionFork] | None = (
            None if until is None else self.process_fork_arguments(until)
        )
        if len(forks) == 0:
            raise Exception(
                "Missing fork argument with 'valid_at_transition_to' marker."
            )

        if len(forks) > 1:
            raise Exception(
                "Too many forks specified to 'valid_at_transition_to' marker."
            )

        resulting_set: Set[Fork | TransitionFork] = set()
        for fork in forks:
            resulting_set |= transition_fork_to(fork)
            if subsequent_forks:
                for transition_forks in (
                    transition_fork_to(f) for f in ALL_FORKS if f > fork
                ):
                    for transition_fork in transition_forks:
                        if transition_fork and (
                            until_forks is None
                            or any(
                                transition_fork <= until_fork
                                for until_fork in until_forks
                            )
                        ):
                            resulting_set.add(transition_fork)
        return resulting_set

ValidForBPOForks dataclass

Bases: ValidityMarker

Marker to specify that a test is valid for BPO forks.

import pytest

from execution_testing import  Alloc, BlockchainTestFiller

@pytest.mark.valid_for_bpo_forks()
def test_something_in_a_bpo_fork(
    blockchain_test: BlockchainTestFiller,
    pre: Alloc
):
    pass
Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
class ValidForBPOForks(
    ValidityMarker, marker_name="valid_for_bpo_forks", flag=True
):
    """
    Marker to specify that a test is valid for BPO forks.

    ```python
    import pytest

    from execution_testing import  Alloc, BlockchainTestFiller

    @pytest.mark.valid_for_bpo_forks()
    def test_something_in_a_bpo_fork(
        blockchain_test: BlockchainTestFiller,
        pre: Alloc
    ):
        pass
    ```
    """

    def _process_with_marker_args(self) -> Set[Fork | TransitionFork]:
        """Process the fork arguments."""
        include_bpo_forks = self.mark is not None
        if include_bpo_forks:
            return set()
        resulting_set: Set[Fork | TransitionFork] = set()
        for fork in ALL_FORKS:
            if fork.bpo_fork():
                resulting_set.add(fork)
                resulting_set |= transition_fork_to(fork)
        return resulting_set

fork_markers(*, fork)

Return the marks that have to be added to a test given the fork.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
def fork_markers(
    *, fork: Fork | TransitionFork
) -> List[pytest.MarkDecorator | pytest.Mark]:
    """Return the marks that have to be added to a test given the fork."""
    return [
        pytest.mark.fixture_subfolder(
            level=0,
            prefix=f"for_{fork.name().lower()}",
        ),
    ]

pytest_generate_tests(metafunc)

Pytest hook used to dynamically generate test cases.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
    """Pytest hook used to dynamically generate test cases."""
    test_name = metafunc.function.__name__
    try:
        test_fork_set = ValidityMarker.get_test_fork_set_from_metafunc(
            metafunc
        )
    except ValidityMarker.ValidityMarkerCombinationError:
        markers = ValidityMarker.get_all_validity_markers(
            metafunc.definition.iter_markers()
        )
        marker_names = [
            f"@pytest.mark.{marker.marker_name}" for marker in markers
        ]
        pytest.fail(
            "The test function's "
            f"'{test_name}' fork validity markers generate "
            "an empty fork range. Please check the arguments to its "
            f"markers: {', '.join(marker_names)}."
        )
    except Exception as e:
        pytest.fail(f"Error generating tests for {test_name}: {e}")

    pytest_params: List[Any]
    if not test_fork_set:
        if metafunc.config.getoption("verbose") >= 2:
            pytest_params = [
                pytest.param(
                    None,
                    marks=[
                        pytest.mark.skip(
                            reason=(
                                f"{test_name} is not enabled for any fork."
                            )
                        )
                    ],
                )
            ]
            metafunc.parametrize(
                "parametrized_fork", pytest_params, scope="function"
            )
        return

    # Get the intersection between the test's validity marker and the current
    # filling parameters.
    intersection_set: Set[Fork | TransitionFork] = (
        test_fork_set & metafunc.config.selected_fork_set  # type: ignore
    )

    if "parametrized_fork" not in metafunc.fixturenames:
        return

    unsupported_forks: Set[Fork | TransitionFork] = (
        metafunc.config.unsupported_forks  # type: ignore
    )
    intersection_set -= unsupported_forks

    if not intersection_set:
        if metafunc.config.getoption("verbose") >= 2:
            pytest_params = [
                pytest.param(
                    None,
                    marks=[
                        pytest.mark.skip(
                            reason=(
                                f"{test_name} is not valid for any of the "
                                "forks specified on the command-line."
                            )
                        )
                    ],
                )
            ]
            metafunc.parametrize(
                "parametrized_fork", pytest_params, scope="function"
            )
    else:
        pytest_params = []
        for fork in sorted(intersection_set):
            marks: List[pytest.MarkDecorator | pytest.Mark] = fork_markers(
                fork=fork,
            )
            pytest_params.append(ForkParametrizer(fork=fork, marks=marks))
        add_fork_covariant_parameters(metafunc, pytest_params)
        parametrize_fork(metafunc, pytest_params)

get_param_level_min_valid_fork(metafunc)

Extract the minimum valid fork from param-level valid_from markers.

Returns the earliest fork from any valid_from marker inside pytest.param, or None if no such markers exist.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
def get_param_level_min_valid_fork(metafunc: Metafunc) -> Fork | None:
    """
    Extract the minimum valid fork from param-level valid_from markers.

    Returns the earliest fork from any valid_from marker inside pytest.param,
    or None if no such markers exist.
    """
    min_fork: Fork | None = None

    for marker in metafunc.definition.iter_markers("parametrize"):
        if len(marker.args) < 2:
            continue

        for value in marker.args[1]:
            if not isinstance(value, ParameterSet) or not value.marks:
                continue

            for mark in value.marks:
                mark_obj = mark.mark if hasattr(mark, "mark") else mark
                if mark_obj.name == "valid_from" and mark_obj.args:
                    fork_name = mark_obj.args[0]
                    try:
                        for fork in ALL_FORKS:
                            if fork.name() == fork_name:
                                if min_fork is None or fork < min_fork:
                                    min_fork = fork
                                break
                    except (ValueError, InvalidForkError):
                        pass

    return min_fork

add_fork_covariant_parameters(metafunc, fork_parametrizers)

Iterate over the fork covariant descriptors and add their values to the test function.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
def add_fork_covariant_parameters(
    metafunc: Metafunc, fork_parametrizers: List[ForkParametrizer]
) -> None:
    """
    Iterate over the fork covariant descriptors and add their values to the
    test function.
    """
    # Check if any covariant markers are present
    has_covariant_markers = any(
        list(metafunc.definition.iter_markers(cd.marker_name))
        for cd in fork_covariant_decorators
    ) or any(
        marker.name == "parametrize_by_fork"
        for marker in metafunc.definition.iter_markers()
    )

    # Filter forks before any param-level valid_from to avoid covariant
    # assertion errors
    if has_covariant_markers:
        param_min_fork = get_param_level_min_valid_fork(metafunc)
        if param_min_fork:
            fork_parametrizers[:] = [
                fp for fp in fork_parametrizers if fp.fork >= param_min_fork
            ]

    # Filter out forks where blob params don't change for valid_for_bpo_forks
    if list(metafunc.definition.iter_markers(name="valid_for_bpo_forks")):
        filtered_forks = [
            fp.fork
            for fp in fork_parametrizers
            if not blob_params_changed_at_transition(fp.fork)
        ]
        if filtered_forks:
            logger.debug(
                f"Skipping {metafunc.function.__name__} for forks with "
                f"unchanged blob params: {[f.name() for f in filtered_forks]}"
            )
        fork_parametrizers[:] = [
            fp
            for fp in fork_parametrizers
            if blob_params_changed_at_transition(fp.fork)
        ]

    for covariant_descriptor in fork_covariant_decorators:
        if list(
            metafunc.definition.iter_markers(covariant_descriptor.marker_name)
        ):
            for fork_parametrizer in fork_parametrizers:
                covariant_descriptor(metafunc=metafunc).add_values(
                    fork_parametrizer=fork_parametrizer
                )

    # Handle custom parametrize_by_fork markers
    for marker in metafunc.definition.iter_markers():
        if marker.name == "parametrize_by_fork":
            descriptor = CovariantDescriptor(
                *marker.args,
                **marker.kwargs,
            )
            for fork_parametrizer in fork_parametrizers:
                descriptor.add_values(fork_parametrizer=fork_parametrizer)

parameters_from_fork_parametrizer_list(fork_parametrizers)

Get the parameters from the fork parametrizers.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
def parameters_from_fork_parametrizer_list(
    fork_parametrizers: List[ForkParametrizer],
) -> Tuple[List[str], List[ParameterSet]]:
    """Get the parameters from the fork parametrizers."""
    param_names: List[str] = []
    param_values: List[ParameterSet] = []

    for fork_parametrizer in fork_parametrizers:
        if not param_names:
            param_names = fork_parametrizer.argnames
        else:
            assert param_names == fork_parametrizer.argnames
        param_values.extend(fork_parametrizer.argvalues)

    # Remove duplicate parameters
    param_1 = 0
    while param_1 < len(param_names):
        param_2 = param_1 + 1
        while param_2 < len(param_names):
            if param_names[param_1] == param_names[param_2]:
                i = 0
                while i < len(param_values):
                    if (
                        param_values[i].values[param_1]
                        != param_values[i].values[param_2]
                    ):
                        del param_values[i]
                    else:
                        param_values[i] = pytest.param(
                            *param_values[i].values[:param_2],
                            *param_values[i].values[(param_2 + 1) :],
                            id=param_values[i].id,
                            marks=param_values[i].marks,
                        )
                        i += 1

                del param_names[param_2]
            else:
                param_2 += 1
        param_1 += 1

    return param_names, param_values

parametrize_fork(metafunc, fork_parametrizers)

Add the fork parameters to the test function.

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
def parametrize_fork(
    metafunc: Metafunc, fork_parametrizers: List[ForkParametrizer]
) -> None:
    """Add the fork parameters to the test function."""
    param_names, param_values = parameters_from_fork_parametrizer_list(
        fork_parametrizers
    )

    # Collect all parameters that should be indirect from the decorators
    indirect = []
    for covariant_descriptor in fork_covariant_decorators:
        if (
            list(
                metafunc.definition.iter_markers(
                    covariant_descriptor.marker_name
                )
            )
            and covariant_descriptor.indirect
        ):
            # Add all argnames from this decorator to indirect list
            indirect.extend(covariant_descriptor.marker_parameter_names)

    metafunc.parametrize(
        param_names, param_values, scope="function", indirect=indirect
    )

blob_params_changed_at_transition(fork)

Check if BPO-relevant blob parameters change at a fork transition.

For transition forks, compares the 3 blob parameters that BPO forks modify between the from_fork and to_fork:

  • target_blobs_per_block
  • max_blobs_per_block
  • blob_base_fee_update_fraction

Returns True if any parameter changed, False otherwise.

For non-transition forks, returns True (no filtering needed).

Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
def blob_params_changed_at_transition(fork: Fork | TransitionFork) -> bool:
    """
    Check if BPO-relevant blob parameters change at a fork transition.

    For transition forks, compares the 3 blob parameters that BPO forks modify
    between the from_fork and to_fork:

    - target_blobs_per_block
    - max_blobs_per_block
    - blob_base_fee_update_fraction

    Returns True if any parameter changed, False otherwise.

    For non-transition forks, returns True (no filtering needed).
    """
    # Check if this is a transition fork
    if not fork.is_transition_fork:
        return True

    from_fork = fork.transitions_from()
    to_fork = fork.transitions_to()

    # Compare the 3 blob parameters that BPO forks modify
    bpo_blob_params = [
        "target_blobs_per_block",
        "max_blobs_per_block",
        "blob_base_fee_update_fraction",
    ]

    for param in bpo_blob_params:
        from_method = getattr(from_fork, param, None)
        to_method = getattr(to_fork, param, None)
        if from_method is None or to_method is None:
            continue
        if from_method() != to_method():
            return True

    return False

pytest_collection_modifyitems(config, items)

Filter tests after parametrization.

Two kinds of filtering are applied:

  1. Validity markers — param-level valid_from / valid_until / valid_before markers that the pytest_generate_tests hook cannot see.
  2. Combination filters — the filter_combinations marker lets test authors reject specific cross-parameter tuples at collection time instead of calling pytest.skip() at runtime.
Source code in packages/testing/src/execution_testing/cli/pytest_commands/plugins/forks/forks.py
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
def pytest_collection_modifyitems(
    config: pytest.Config, items: List[pytest.Item]
) -> None:
    """
    Filter tests after parametrization.

    Two kinds of filtering are applied:

    1. **Validity markers** — param-level ``valid_from`` / ``valid_until`` /
       ``valid_before`` markers that the ``pytest_generate_tests`` hook
       cannot see.
    2. **Combination filters** — the ``filter_combinations`` marker lets
       test authors reject specific cross-parameter tuples at collection
       time instead of calling ``pytest.skip()`` at runtime.
    """
    items_to_remove = []
    deselected: List[pytest.Item] = []
    # function name -> (reason, total, deselected_count)
    filter_stats: Dict[str, Tuple[str, int, int]] = {}

    for i, item in enumerate(items):
        params = _get_item_params(item)
        if not params:
            continue

        # --- combination filter ---
        marker = next(item.iter_markers("filter_combinations"), None)
        if marker is not None:
            fn_name = item.nodeid.split("[")[0]
            if fn_name not in filter_stats:
                reason = marker.kwargs.get(
                    "reason", "rejected by filter_combinations"
                )
                filter_stats[fn_name] = (reason, 0, 0)
            r, total, dc = filter_stats[fn_name]
            total += 1

            filter_reason = _combination_filter_reason(item, params)
            if filter_reason is not None:
                items_to_remove.append(i)
                deselected.append(item)
                dc += 1

            filter_stats[fn_name] = (r, total, dc)
            if filter_reason is not None:
                continue

        # --- validity markers ---
        fork = params.get("parametrized_fork")
        if fork is None:
            continue

        markers = item.iter_markers()

        try:
            valid_fork_set = ValidityMarker.get_test_fork_set_from_markers(
                markers
            )
        except Exception as e:
            pytest.exit(
                f"Error in test '{item.name}': {e}",
                returncode=pytest.ExitCode.USAGE_ERROR,
            )

        if fork not in valid_fork_set:
            items_to_remove.append(i)

    # Fail if a filter_combinations predicate eliminated every case
    # for a test function — the predicate is almost certainly wrong.
    for fn_name, (reason, total, dc) in filter_stats.items():
        if total > 0 and dc == total:
            pytest.exit(
                f"filter_combinations deselected all {total} "
                f"parametrizations of {fn_name} "
                f"(reason: {reason}). "
                f"Check the predicate logic.",
                returncode=pytest.ExitCode.USAGE_ERROR,
            )

    # Remove items in reverse order to maintain indices
    for i in reversed(items_to_remove):
        del items[i]

    if deselected:
        config.hook.pytest_deselected(items=deselected)
        if config.option.verbose >= 0:
            writer = config.get_terminal_writer()
            writer.line("")
            writer.sep(
                "-",
                f"{len(deselected)} deselected by filter_combinations",
            )
            for fn_name, (reason, _, dc) in sorted(filter_stats.items()):
                if dc:
                    writer.line(f"  {fn_name}: {dc} deselected ({reason})")
            if config.option.verbose >= 2:
                for item in deselected:
                    writer.line(f"    {item.nodeid}")