diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ceef2b6c5..f716fb97c 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -32,7 +32,7 @@ repos: - name: Check and insert license on Markdown files id: insert-license files: .*\.md$ - # exclude: + exclude: ^tests/data/.*\.md$ args: - --license-filepath - .github/license-short.txt diff --git a/anta/cli/nrfu/__init__.py b/anta/cli/nrfu/__init__.py index a85277102..6263e845a 100644 --- a/anta/cli/nrfu/__init__.py +++ b/anta/cli/nrfu/__init__.py @@ -147,3 +147,4 @@ def nrfu( nrfu.add_command(commands.json) nrfu.add_command(commands.text) nrfu.add_command(commands.tpl_report) +nrfu.add_command(commands.md_report) diff --git a/anta/cli/nrfu/commands.py b/anta/cli/nrfu/commands.py index 6043dbef9..a5492680b 100644 --- a/anta/cli/nrfu/commands.py +++ b/anta/cli/nrfu/commands.py @@ -13,7 +13,7 @@ from anta.cli.utils import exit_with_code -from .utils import print_jinja, print_json, print_table, print_text, run_tests, save_to_csv +from .utils import print_jinja, print_json, print_table, print_text, run_tests, save_markdown_report, save_to_csv logger = logging.getLogger(__name__) @@ -28,7 +28,7 @@ required=False, ) def table(ctx: click.Context, group_by: Literal["device", "test"] | None) -> None: - """ANTA command to check network states with table result.""" + """ANTA command to check network state with table results.""" run_tests(ctx) print_table(ctx, group_by=group_by) exit_with_code(ctx) @@ -45,7 +45,7 @@ def table(ctx: click.Context, group_by: Literal["device", "test"] | None) -> Non help="Path to save report as a JSON file", ) def json(ctx: click.Context, output: pathlib.Path | None) -> None: - """ANTA command to check network state with JSON result.""" + """ANTA command to check network state with JSON results.""" run_tests(ctx) print_json(ctx, output=output) exit_with_code(ctx) @@ -54,7 +54,7 @@ def json(ctx: click.Context, output: pathlib.Path | None) -> None: @click.command() @click.pass_context def text(ctx: click.Context) -> None: - """ANTA command to check network states with text result.""" + """ANTA command to check network state with text results.""" run_tests(ctx) print_text(ctx) exit_with_code(ctx) @@ -105,3 +105,19 @@ def tpl_report(ctx: click.Context, template: pathlib.Path, output: pathlib.Path run_tests(ctx) print_jinja(results=ctx.obj["result_manager"], template=template, output=output) exit_with_code(ctx) + + +@click.command() +@click.pass_context +@click.option( + "--md-output", + type=click.Path(file_okay=True, dir_okay=False, exists=False, writable=True, path_type=pathlib.Path), + show_envvar=True, + required=True, + help="Path to save the report as a Markdown file", +) +def md_report(ctx: click.Context, md_output: pathlib.Path) -> None: + """ANTA command to check network state with Markdown report.""" + run_tests(ctx) + save_markdown_report(ctx, md_output=md_output) + exit_with_code(ctx) diff --git a/anta/cli/nrfu/utils.py b/anta/cli/nrfu/utils.py index cfc2e1ed1..748578dec 100644 --- a/anta/cli/nrfu/utils.py +++ b/anta/cli/nrfu/utils.py @@ -19,6 +19,7 @@ from anta.models import AntaTest from anta.reporter import ReportJinja, ReportTable from anta.reporter.csv_reporter import ReportCsv +from anta.reporter.md_reporter import MDReportGenerator from anta.runner import main if TYPE_CHECKING: @@ -141,6 +142,22 @@ def save_to_csv(ctx: click.Context, csv_file: pathlib.Path) -> None: ctx.exit(ExitCode.USAGE_ERROR) +def save_markdown_report(ctx: click.Context, md_output: pathlib.Path) -> None: + """Save the markdown report to a file. + + Parameters + ---------- + ctx: Click context containing the result manager. + md_output: Path to save the markdown report. + """ + try: + MDReportGenerator.generate(results=_get_result_manager(ctx), md_filename=md_output) + console.print(f"Markdown report saved to {md_output} ✅", style="cyan") + except OSError: + console.print(f"Failed to save Markdown report to {md_output} ❌", style="cyan") + ctx.exit(ExitCode.USAGE_ERROR) + + # Adding our own ANTA spinner - overriding rich SPINNERS for our own # so ignore warning for redefinition rich.spinner.SPINNERS = { # type: ignore[attr-defined] diff --git a/anta/constants.py b/anta/constants.py new file mode 100644 index 000000000..175a4adcc --- /dev/null +++ b/anta/constants.py @@ -0,0 +1,19 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Constants used in ANTA.""" + +from __future__ import annotations + +ACRONYM_CATEGORIES: set[str] = {"aaa", "mlag", "snmp", "bgp", "ospf", "vxlan", "stp", "igmp", "ip", "lldp", "ntp", "bfd", "ptp", "lanz", "stun", "vlan"} +"""A set of network protocol or feature acronyms that should be represented in uppercase.""" + +MD_REPORT_TOC = """**Table of Contents:** + +- [ANTA Report](#anta-report) + - [Test Results Summary](#test-results-summary) + - [Summary Totals](#summary-totals) + - [Summary Totals Device Under Test](#summary-totals-device-under-test) + - [Summary Totals Per Category](#summary-totals-per-category) + - [Test Results](#test-results)""" +"""Table of Contents for the Markdown report.""" diff --git a/anta/reporter/__init__.py b/anta/reporter/__init__.py index 7c911f243..c4e4f7bcf 100644 --- a/anta/reporter/__init__.py +++ b/anta/reporter/__init__.py @@ -154,21 +154,15 @@ def report_summary_tests( self.Headers.list_of_error_nodes, ] table = self._build_headers(headers=headers, table=table) - for test in manager.get_tests(): + for test, stats in sorted(manager.test_stats.items()): if tests is None or test in tests: - results = manager.filter_by_tests({test}).results - nb_failure = len([result for result in results if result.result == "failure"]) - nb_error = len([result for result in results if result.result == "error"]) - list_failure = [result.name for result in results if result.result in ["failure", "error"]] - nb_success = len([result for result in results if result.result == "success"]) - nb_skipped = len([result for result in results if result.result == "skipped"]) table.add_row( test, - str(nb_success), - str(nb_skipped), - str(nb_failure), - str(nb_error), - str(list_failure), + str(stats.devices_success_count), + str(stats.devices_skipped_count), + str(stats.devices_failure_count), + str(stats.devices_error_count), + ", ".join(stats.devices_failure), ) return table @@ -202,21 +196,15 @@ def report_summary_devices( self.Headers.list_of_error_tests, ] table = self._build_headers(headers=headers, table=table) - for device in manager.get_devices(): + for device, stats in sorted(manager.device_stats.items()): if devices is None or device in devices: - results = manager.filter_by_devices({device}).results - nb_failure = len([result for result in results if result.result == "failure"]) - nb_error = len([result for result in results if result.result == "error"]) - list_failure = [result.test for result in results if result.result in ["failure", "error"]] - nb_success = len([result for result in results if result.result == "success"]) - nb_skipped = len([result for result in results if result.result == "skipped"]) table.add_row( device, - str(nb_success), - str(nb_skipped), - str(nb_failure), - str(nb_error), - str(list_failure), + str(stats.tests_success_count), + str(stats.tests_skipped_count), + str(stats.tests_failure_count), + str(stats.tests_error_count), + ", ".join(stats.tests_failure), ) return table diff --git a/anta/reporter/md_reporter.py b/anta/reporter/md_reporter.py new file mode 100644 index 000000000..0cc5b03e2 --- /dev/null +++ b/anta/reporter/md_reporter.py @@ -0,0 +1,287 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Markdown report generator for ANTA test results.""" + +from __future__ import annotations + +import logging +import re +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, ClassVar + +from anta.constants import MD_REPORT_TOC +from anta.logger import anta_log_exception + +if TYPE_CHECKING: + from collections.abc import Generator + from io import TextIOWrapper + from pathlib import Path + + from anta.result_manager import ResultManager + +logger = logging.getLogger(__name__) + + +# pylint: disable=too-few-public-methods +class MDReportGenerator: + """Class responsible for generating a Markdown report based on the provided `ResultManager` object. + + It aggregates different report sections, each represented by a subclass of `MDReportBase`, + and sequentially generates their content into a markdown file. + + The `generate` class method will loop over all the section subclasses and call their `generate_section` method. + The final report will be generated in the same order as the `sections` list of the method. + """ + + @classmethod + def generate(cls, results: ResultManager, md_filename: Path) -> None: + """Generate and write the various sections of the markdown report. + + Parameters + ---------- + results: The ResultsManager instance containing all test results. + md_filename: The path to the markdown file to write the report into. + """ + try: + with md_filename.open("w", encoding="utf-8") as mdfile: + sections: list[MDReportBase] = [ + ANTAReport(mdfile, results), + TestResultsSummary(mdfile, results), + SummaryTotals(mdfile, results), + SummaryTotalsDeviceUnderTest(mdfile, results), + SummaryTotalsPerCategory(mdfile, results), + TestResults(mdfile, results), + ] + for section in sections: + section.generate_section() + except OSError as exc: + message = f"OSError caught while writing the Markdown file '{md_filename.resolve()}'." + anta_log_exception(exc, message, logger) + raise + + +class MDReportBase(ABC): + """Base class for all sections subclasses. + + Every subclasses must implement the `generate_section` method that uses the `ResultManager` object + to generate and write content to the provided markdown file. + """ + + def __init__(self, mdfile: TextIOWrapper, results: ResultManager) -> None: + """Initialize the MDReportBase with an open markdown file object to write to and a ResultManager instance. + + Parameters + ---------- + mdfile: An open file object to write the markdown data into. + results: The ResultsManager instance containing all test results. + """ + self.mdfile = mdfile + self.results = results + + @abstractmethod + def generate_section(self) -> None: + """Abstract method to generate a specific section of the markdown report. + + Must be implemented by subclasses. + """ + msg = "Must be implemented by subclasses" + raise NotImplementedError(msg) + + def generate_rows(self) -> Generator[str, None, None]: + """Generate the rows of a markdown table for a specific report section. + + Subclasses can implement this method to generate the content of the table rows. + """ + msg = "Subclasses should implement this method" + raise NotImplementedError(msg) + + def generate_heading_name(self) -> str: + """Generate a formatted heading name based on the class name. + + Returns + ------- + str: Formatted header name. + + Example + ------- + - `ANTAReport` will become ANTA Report. + - `TestResultsSummary` will become Test Results Summary. + """ + class_name = self.__class__.__name__ + + # Split the class name into words, keeping acronyms together + words = re.findall(r"[A-Z]?[a-z]+|[A-Z]+(?=[A-Z][a-z]|\d|\W|$)|\d+", class_name) + + # Capitalize each word, but keep acronyms in all caps + formatted_words = [word if word.isupper() else word.capitalize() for word in words] + + return " ".join(formatted_words) + + def write_table(self, table_heading: list[str], *, last_table: bool = False) -> None: + """Write a markdown table with a table heading and multiple rows to the markdown file. + + Parameters + ---------- + table_heading: List of strings to join for the table heading. + last_table: Flag to determine if it's the last table of the markdown file to avoid unnecessary new line. Defaults to False. + """ + self.mdfile.write("\n".join(table_heading) + "\n") + for row in self.generate_rows(): + self.mdfile.write(row) + if not last_table: + self.mdfile.write("\n") + + def write_heading(self, heading_level: int) -> None: + """Write a markdown heading to the markdown file. + + The heading name used is the class name. + + Parameters + ---------- + heading_level: The level of the heading (1-6). + + Example + ------- + ## Test Results Summary + """ + # Ensure the heading level is within the valid range of 1 to 6 + heading_level = max(1, min(heading_level, 6)) + heading_name = self.generate_heading_name() + heading = "#" * heading_level + " " + heading_name + self.mdfile.write(f"{heading}\n\n") + + def safe_markdown(self, text: str | None) -> str: + """Escape markdown characters in the text to prevent markdown rendering issues. + + Parameters + ---------- + text: The text to escape markdown characters from. + + Returns + ------- + str: The text with escaped markdown characters. + """ + # Custom field from a TestResult object can be None + if text is None: + return "" + + # Replace newlines with spaces to keep content on one line + text = text.replace("\n", " ") + + # Replace backticks with single quotes + return text.replace("`", "'") + + +class ANTAReport(MDReportBase): + """Generate the `# ANTA Report` section of the markdown report.""" + + def generate_section(self) -> None: + """Generate the `# ANTA Report` section of the markdown report.""" + self.write_heading(heading_level=1) + toc = MD_REPORT_TOC + self.mdfile.write(toc + "\n\n") + + +class TestResultsSummary(MDReportBase): + """Generate the `## Test Results Summary` section of the markdown report.""" + + def generate_section(self) -> None: + """Generate the `## Test Results Summary` section of the markdown report.""" + self.write_heading(heading_level=2) + + +class SummaryTotals(MDReportBase): + """Generate the `### Summary Totals` section of the markdown report.""" + + TABLE_HEADING: ClassVar[list[str]] = [ + "| Total Tests | Total Tests Success | Total Tests Skipped | Total Tests Failure | Total Tests Error |", + "| ----------- | ------------------- | ------------------- | ------------------- | ------------------|", + ] + + def generate_rows(self) -> Generator[str, None, None]: + """Generate the rows of the summary totals table.""" + yield ( + f"| {self.results.get_total_results()} " + f"| {self.results.get_total_results({'success'})} " + f"| {self.results.get_total_results({'skipped'})} " + f"| {self.results.get_total_results({'failure'})} " + f"| {self.results.get_total_results({'error'})} |\n" + ) + + def generate_section(self) -> None: + """Generate the `### Summary Totals` section of the markdown report.""" + self.write_heading(heading_level=3) + self.write_table(table_heading=self.TABLE_HEADING) + + +class SummaryTotalsDeviceUnderTest(MDReportBase): + """Generate the `### Summary Totals Devices Under Tests` section of the markdown report.""" + + TABLE_HEADING: ClassVar[list[str]] = [ + "| Device Under Test | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error | Categories Skipped | Categories Failed |", + "| ------------------| ----------- | ------------- | ------------- | ------------- | ----------- | -------------------| ------------------|", + ] + + def generate_rows(self) -> Generator[str, None, None]: + """Generate the rows of the summary totals device under test table.""" + for device, stat in self.results.device_stats.items(): + total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count + categories_skipped = ", ".join(sorted(stat.categories_skipped)) + categories_failed = ", ".join(sorted(stat.categories_failed)) + yield ( + f"| {device} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} | {stat.tests_error_count} " + f"| {categories_skipped or '-'} | {categories_failed or '-'} |\n" + ) + + def generate_section(self) -> None: + """Generate the `### Summary Totals Devices Under Tests` section of the markdown report.""" + self.write_heading(heading_level=3) + self.write_table(table_heading=self.TABLE_HEADING) + + +class SummaryTotalsPerCategory(MDReportBase): + """Generate the `### Summary Totals Per Category` section of the markdown report.""" + + TABLE_HEADING: ClassVar[list[str]] = [ + "| Test Category | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error |", + "| ------------- | ----------- | ------------- | ------------- | ------------- | ----------- |", + ] + + def generate_rows(self) -> Generator[str, None, None]: + """Generate the rows of the summary totals per category table.""" + for category, stat in self.results.sorted_category_stats.items(): + total_tests = stat.tests_success_count + stat.tests_skipped_count + stat.tests_failure_count + stat.tests_error_count + yield ( + f"| {category} | {total_tests} | {stat.tests_success_count} | {stat.tests_skipped_count} | {stat.tests_failure_count} " + f"| {stat.tests_error_count} |\n" + ) + + def generate_section(self) -> None: + """Generate the `### Summary Totals Per Category` section of the markdown report.""" + self.write_heading(heading_level=3) + self.write_table(table_heading=self.TABLE_HEADING) + + +class TestResults(MDReportBase): + """Generates the `## Test Results` section of the markdown report.""" + + TABLE_HEADING: ClassVar[list[str]] = [ + "| Device Under Test | Categories | Test | Description | Custom Field | Result | Messages |", + "| ----------------- | ---------- | ---- | ----------- | ------------ | ------ | -------- |", + ] + + def generate_rows(self) -> Generator[str, None, None]: + """Generate the rows of the all test results table.""" + for result in self.results.get_results(sort_by=["name", "test"]): + messages = self.safe_markdown(", ".join(result.messages)) + categories = ", ".join(result.categories) + yield ( + f"| {result.name or '-'} | {categories or '-'} | {result.test or '-'} " + f"| {result.description or '-'} | {self.safe_markdown(result.custom_field) or '-'} | {result.result or '-'} | {messages or '-'} |\n" + ) + + def generate_section(self) -> None: + """Generate the `## Test Results` section of the markdown report.""" + self.write_heading(heading_level=2) + self.write_table(table_heading=self.TABLE_HEADING, last_table=True) diff --git a/anta/result_manager/__init__.py b/anta/result_manager/__init__.py index 4278c0da3..1900a28b1 100644 --- a/anta/result_manager/__init__.py +++ b/anta/result_manager/__init__.py @@ -6,14 +6,18 @@ from __future__ import annotations import json -from typing import TYPE_CHECKING +from collections import defaultdict +from functools import cached_property +from itertools import chain +from typing import get_args from pydantic import TypeAdapter +from anta.constants import ACRONYM_CATEGORIES from anta.custom_types import TestStatus +from anta.result_manager.models import TestResult -if TYPE_CHECKING: - from anta.result_manager.models import TestResult +from .models import CategoryStats, DeviceStats, TestStats class ResultManager: @@ -94,6 +98,10 @@ def __init__(self) -> None: self.status: TestStatus = "unset" self.error_status = False + self.device_stats: defaultdict[str, DeviceStats] = defaultdict(DeviceStats) + self.category_stats: defaultdict[str, CategoryStats] = defaultdict(CategoryStats) + self.test_stats: defaultdict[str, TestStats] = defaultdict(TestStats) + def __len__(self) -> int: """Implement __len__ method to count number of results.""" return len(self._result_entries) @@ -105,38 +113,147 @@ def results(self) -> list[TestResult]: @results.setter def results(self, value: list[TestResult]) -> None: + """Set the list of TestResult.""" + # When setting the results, we need to reset the state of the current instance self._result_entries = [] self.status = "unset" self.error_status = False - for e in value: - self.add(e) + + # Also reset the stats attributes + self.device_stats = defaultdict(DeviceStats) + self.category_stats = defaultdict(CategoryStats) + self.test_stats = defaultdict(TestStats) + + for result in value: + self.add(result) @property def json(self) -> str: """Get a JSON representation of the results.""" return json.dumps([result.model_dump() for result in self._result_entries], indent=4) + @property + def sorted_category_stats(self) -> dict[str, CategoryStats]: + """A property that returns the category_stats dictionary sorted by key name.""" + return dict(sorted(self.category_stats.items())) + + @cached_property + def results_by_status(self) -> dict[TestStatus, list[TestResult]]: + """A cached property that returns the results grouped by status.""" + return {status: [result for result in self._result_entries if result.result == status] for status in get_args(TestStatus)} + + def _update_status(self, test_status: TestStatus) -> None: + """Update the status of the ResultManager instance based on the test status. + + Parameters + ---------- + test_status: TestStatus to update the ResultManager status. + """ + result_validator: TypeAdapter[TestStatus] = TypeAdapter(TestStatus) + result_validator.validate_python(test_status) + if test_status == "error": + self.error_status = True + return + if self.status == "unset" or self.status == "skipped" and test_status in {"success", "failure"}: + self.status = test_status + elif self.status == "success" and test_status == "failure": + self.status = "failure" + + def _update_stats(self, result: TestResult) -> None: + """Update the statistics based on the test result. + + Parameters + ---------- + result: TestResult to update the statistics. + """ + result.categories = [ + " ".join(word.upper() if word.lower() in ACRONYM_CATEGORIES else word.title() for word in category.split()) for category in result.categories + ] + count_attr = f"tests_{result.result}_count" + + # Update device stats + device_stats: DeviceStats = self.device_stats[result.name] + setattr(device_stats, count_attr, getattr(device_stats, count_attr) + 1) + if result.result in ("failure", "error"): + device_stats.tests_failure.add(result.test) + device_stats.categories_failed.update(result.categories) + elif result.result == "skipped": + device_stats.categories_skipped.update(result.categories) + + # Update category stats + for category in result.categories: + category_stats: CategoryStats = self.category_stats[category] + setattr(category_stats, count_attr, getattr(category_stats, count_attr) + 1) + + # Update test stats + count_attr = f"devices_{result.result}_count" + test_stats: TestStats = self.test_stats[result.test] + setattr(test_stats, count_attr, getattr(test_stats, count_attr) + 1) + if result.result in ("failure", "error"): + test_stats.devices_failure.add(result.name) + def add(self, result: TestResult) -> None: """Add a result to the ResultManager instance. + The result is added to the internal list of results and the overall status + of the ResultManager instance is updated based on the added test status. + Parameters ---------- result: TestResult to add to the ResultManager instance. """ + self._result_entries.append(result) + self._update_status(result.result) + self._update_stats(result) - def _update_status(test_status: TestStatus) -> None: - result_validator: TypeAdapter[TestStatus] = TypeAdapter(TestStatus) - result_validator.validate_python(test_status) - if test_status == "error": - self.error_status = True - return - if self.status == "unset" or self.status == "skipped" and test_status in {"success", "failure"}: - self.status = test_status - elif self.status == "success" and test_status == "failure": - self.status = "failure" + # Every time a new result is added, we need to clear the cached property + self.__dict__.pop("results_by_status", None) - self._result_entries.append(result) - _update_status(result.result) + def get_results(self, status: set[TestStatus] | None = None, sort_by: list[str] | None = None) -> list[TestResult]: + """Get the results, optionally filtered by status and sorted by TestResult fields. + + If no status is provided, all results are returned. + + Parameters + ---------- + status: Optional set of TestStatus literals to filter the results. + sort_by: Optional list of TestResult fields to sort the results. + + Returns + ------- + List of TestResult. + """ + # Return all results if no status is provided, otherwise return results for multiple statuses + results = self._result_entries if status is None else list(chain.from_iterable(self.results_by_status.get(status, []) for status in status)) + + if sort_by: + accepted_fields = TestResult.model_fields.keys() + if not set(sort_by).issubset(set(accepted_fields)): + msg = f"Invalid sort_by fields: {sort_by}. Accepted fields are: {list(accepted_fields)}" + raise ValueError(msg) + results = sorted(results, key=lambda result: [getattr(result, field) for field in sort_by]) + + return results + + def get_total_results(self, status: set[TestStatus] | None = None) -> int: + """Get the total number of results, optionally filtered by status. + + If no status is provided, the total number of results is returned. + + Parameters + ---------- + status: Optional set of TestStatus literals to filter the results. + + Returns + ------- + Total number of results. + """ + if status is None: + # Return the total number of results + return sum(len(results) for results in self.results_by_status.values()) + + # Return the total number of results for multiple statuses + return sum(len(self.results_by_status.get(status, [])) for status in status) def get_status(self, *, ignore_error: bool = False) -> str: """Return the current status including error_status if ignore_error is False.""" @@ -153,8 +270,9 @@ def filter(self, hide: set[TestStatus]) -> ResultManager: ------- A filtered `ResultManager`. """ + possible_statuses = set(get_args(TestStatus)) manager = ResultManager() - manager.results = [test for test in self._result_entries if test.result not in hide] + manager.results = self.get_results(possible_statuses - hide) return manager def filter_by_tests(self, tests: set[str]) -> ResultManager: diff --git a/anta/result_manager/models.py b/anta/result_manager/models.py index e1171c88a..6abce0233 100644 --- a/anta/result_manager/models.py +++ b/anta/result_manager/models.py @@ -5,6 +5,8 @@ from __future__ import annotations +from dataclasses import dataclass, field + from pydantic import BaseModel from anta.custom_types import TestStatus @@ -89,3 +91,42 @@ def _set_status(self, status: TestStatus, message: str | None = None) -> None: def __str__(self) -> str: """Return a human readable string of this TestResult.""" return f"Test '{self.test}' (on '{self.name}'): Result '{self.result}'\nMessages: {self.messages}" + + +# Pylint does not treat dataclasses differently: https://github.com/pylint-dev/pylint/issues/9058 +# pylint: disable=too-many-instance-attributes +@dataclass +class DeviceStats: + """Device statistics for a run of tests.""" + + tests_success_count: int = 0 + tests_skipped_count: int = 0 + tests_failure_count: int = 0 + tests_error_count: int = 0 + tests_unset_count: int = 0 + tests_failure: set[str] = field(default_factory=set) + categories_failed: set[str] = field(default_factory=set) + categories_skipped: set[str] = field(default_factory=set) + + +@dataclass +class CategoryStats: + """Category statistics for a run of tests.""" + + tests_success_count: int = 0 + tests_skipped_count: int = 0 + tests_failure_count: int = 0 + tests_error_count: int = 0 + tests_unset_count: int = 0 + + +@dataclass +class TestStats: + """Test statistics for a run of tests.""" + + devices_success_count: int = 0 + devices_skipped_count: int = 0 + devices_failure_count: int = 0 + devices_error_count: int = 0 + devices_unset_count: int = 0 + devices_failure: set[str] = field(default_factory=set) diff --git a/docs/cli/nrfu.md b/docs/cli/nrfu.md index 2f4e7eedc..0de782551 100644 --- a/docs/cli/nrfu.md +++ b/docs/cli/nrfu.md @@ -45,7 +45,7 @@ Options `--device` and `--test` can be used to target one or multiple devices an ### Hide results -Option `--hide` can be used to hide test results in the output based on their status. The option can be repeated. Example: `anta nrfu --hide error --hide skipped`. +Option `--hide` can be used to hide test results in the output or report file based on their status. The option can be repeated. Example: `anta nrfu --hide error --hide skipped`. ## Performing NRFU with text rendering @@ -167,6 +167,29 @@ Options: ![anta nrfu csv results](../imgs/anta_nrfu_csv.png){ loading=lazy width="1600" } +## Performing NRFU and saving results in a Markdown file + +The `md-report` command in NRFU testing generates a comprehensive Markdown report containing various sections, including detailed statistics for devices and test categories. + +### Command overview + +```bash +anta nrfu md-report --help + +Usage: anta nrfu md-report [OPTIONS] + + ANTA command to check network state with Markdown report. + +Options: + --md-output FILE Path to save the report as a Markdown file [env var: + ANTA_NRFU_MD_REPORT_MD_OUTPUT; required] + --help Show this message and exit. +``` + +### Example + +![anta nrfu md-report results](../imgs/anta-nrfu-md-report-output.png){ loading=lazy width="1600" } + ## Performing NRFU with custom reports ANTA offers a CLI option for creating custom reports. This leverages the Jinja2 template system, allowing you to tailor reports to your specific needs. diff --git a/docs/imgs/anta-nrfu-md-report-output.png b/docs/imgs/anta-nrfu-md-report-output.png new file mode 100644 index 000000000..984e76b5c Binary files /dev/null and b/docs/imgs/anta-nrfu-md-report-output.png differ diff --git a/docs/snippets/anta_nrfu_help.txt b/docs/snippets/anta_nrfu_help.txt index 365da0474..cb23fa7ed 100644 --- a/docs/snippets/anta_nrfu_help.txt +++ b/docs/snippets/anta_nrfu_help.txt @@ -53,7 +53,8 @@ Options: Commands: csv ANTA command to check network state with CSV report. - json ANTA command to check network state with JSON result. - table ANTA command to check network states with table result. - text ANTA command to check network states with text result. + json ANTA command to check network state with JSON results. + md-report ANTA command to check network state with Markdown report. + table ANTA command to check network state with table results. + text ANTA command to check network state with text results. tpl-report ANTA command to check network state with templated report. diff --git a/tests/data/test_md_report.md b/tests/data/test_md_report.md new file mode 100644 index 000000000..9360dbc74 --- /dev/null +++ b/tests/data/test_md_report.md @@ -0,0 +1,79 @@ +# ANTA Report + +**Table of Contents:** + +- [ANTA Report](#anta-report) + - [Test Results Summary](#test-results-summary) + - [Summary Totals](#summary-totals) + - [Summary Totals Device Under Test](#summary-totals-device-under-test) + - [Summary Totals Per Category](#summary-totals-per-category) + - [Test Results](#test-results) + +## Test Results Summary + +### Summary Totals + +| Total Tests | Total Tests Success | Total Tests Skipped | Total Tests Failure | Total Tests Error | +| ----------- | ------------------- | ------------------- | ------------------- | ------------------| +| 30 | 7 | 2 | 19 | 2 | + +### Summary Totals Device Under Test + +| Device Under Test | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error | Categories Skipped | Categories Failed | +| ------------------| ----------- | ------------- | ------------- | ------------- | ----------- | -------------------| ------------------| +| DC1-SPINE1 | 15 | 2 | 2 | 10 | 1 | MLAG, VXLAN | AAA, BFD, BGP, Connectivity, Routing, SNMP, STP, Services, Software, System | +| DC1-LEAF1A | 15 | 5 | 0 | 9 | 1 | - | AAA, BFD, BGP, Connectivity, SNMP, STP, Services, Software, System | + +### Summary Totals Per Category + +| Test Category | Total Tests | Tests Success | Tests Skipped | Tests Failure | Tests Error | +| ------------- | ----------- | ------------- | ------------- | ------------- | ----------- | +| AAA | 2 | 0 | 0 | 2 | 0 | +| BFD | 2 | 0 | 0 | 2 | 0 | +| BGP | 2 | 0 | 0 | 2 | 0 | +| Connectivity | 4 | 0 | 0 | 2 | 2 | +| Interfaces | 2 | 2 | 0 | 0 | 0 | +| MLAG | 2 | 1 | 1 | 0 | 0 | +| Routing | 2 | 1 | 0 | 1 | 0 | +| SNMP | 2 | 0 | 0 | 2 | 0 | +| STP | 2 | 0 | 0 | 2 | 0 | +| Security | 2 | 2 | 0 | 0 | 0 | +| Services | 2 | 0 | 0 | 2 | 0 | +| Software | 2 | 0 | 0 | 2 | 0 | +| System | 2 | 0 | 0 | 2 | 0 | +| VXLAN | 2 | 1 | 1 | 0 | 0 | + +## Test Results + +| Device Under Test | Categories | Test | Description | Custom Field | Result | Messages | +| ----------------- | ---------- | ---- | ----------- | ------------ | ------ | -------- | +| DC1-LEAF1A | BFD | VerifyBFDSpecificPeers | Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF. | - | failure | Following BFD peers are not configured, status is not up or remote disc is zero: {'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}} | +| DC1-LEAF1A | BGP | VerifyBGPPeerCount | Verifies the count of BGP peers. | - | failure | Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Expected: 2, Actual: 1'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Expected: 3, Actual: 0'}}] | +| DC1-LEAF1A | Software | VerifyEOSVersion | Verifies the EOS version of the device. | - | failure | device is running version "4.31.1F-34554157.4311F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F'] | +| DC1-LEAF1A | Services | VerifyHostname | Verifies the hostname of a device. | - | failure | Expected 's1-spine1' as the hostname, but found 'DC1-LEAF1A' instead. | +| DC1-LEAF1A | Interfaces | VerifyInterfaceUtilization | Verifies that the utilization of interfaces is below a certain threshold. | - | success | - | +| DC1-LEAF1A | Connectivity | VerifyLLDPNeighbors | Verifies that the provided LLDP neighbors are connected properly. | - | failure | Wrong LLDP neighbor(s) on port(s): Ethernet1 DC1-SPINE1_Ethernet1 Ethernet2 DC1-SPINE2_Ethernet1 Port(s) not configured: Ethernet7 | +| DC1-LEAF1A | MLAG | VerifyMlagStatus | Verifies the health status of the MLAG configuration. | - | success | - | +| DC1-LEAF1A | System | VerifyNTP | Verifies if NTP is synchronised. | - | failure | The device is not synchronized with the configured NTP server(s): 'NTP is disabled.' | +| DC1-LEAF1A | Connectivity | VerifyReachability | Test the network reachability to one or many destination IP(s). | - | error | ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1 | +| DC1-LEAF1A | Routing | VerifyRoutingTableEntry | Verifies that the provided routes are present in the routing table of a specified VRF. | - | success | - | +| DC1-LEAF1A | STP | VerifySTPMode | Verifies the configured STP mode for a provided list of VLAN(s). | - | failure | Wrong STP mode configured for the following VLAN(s): [10, 20] | +| DC1-LEAF1A | SNMP | VerifySnmpStatus | Verifies if the SNMP agent is enabled. | - | failure | SNMP agent disabled in vrf default | +| DC1-LEAF1A | AAA | VerifyTacacsSourceIntf | Verifies TACACS source-interface for a specified VRF. | - | failure | Source-interface Management0 is not configured in VRF default | +| DC1-LEAF1A | Security | VerifyTelnetStatus | Verifies if Telnet is disabled in the default VRF. | - | success | - | +| DC1-LEAF1A | VXLAN | VerifyVxlan1Interface | Verifies the Vxlan1 interface status. | - | success | - | +| DC1-SPINE1 | BFD | VerifyBFDSpecificPeers | Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF. | - | failure | Following BFD peers are not configured, status is not up or remote disc is zero: {'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}} | +| DC1-SPINE1 | BGP | VerifyBGPPeerCount | Verifies the count of BGP peers. | - | failure | Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Not Configured', 'default': 'Expected: 3, Actual: 4'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Not Configured'}}, {'afi': 'evpn', 'vrfs': {'default': 'Expected: 2, Actual: 4'}}] | +| DC1-SPINE1 | Software | VerifyEOSVersion | Verifies the EOS version of the device. | - | failure | device is running version "4.31.1F-34554157.4311F (engineering build)" not in expected versions: ['4.25.4M', '4.26.1F'] | +| DC1-SPINE1 | Services | VerifyHostname | Verifies the hostname of a device. | - | failure | Expected 's1-spine1' as the hostname, but found 'DC1-SPINE1' instead. | +| DC1-SPINE1 | Interfaces | VerifyInterfaceUtilization | Verifies that the utilization of interfaces is below a certain threshold. | - | success | - | +| DC1-SPINE1 | Connectivity | VerifyLLDPNeighbors | Verifies that the provided LLDP neighbors are connected properly. | - | failure | Wrong LLDP neighbor(s) on port(s): Ethernet1 DC1-LEAF1A_Ethernet1 Ethernet2 DC1-LEAF1B_Ethernet1 Port(s) not configured: Ethernet7 | +| DC1-SPINE1 | MLAG | VerifyMlagStatus | Verifies the health status of the MLAG configuration. | - | skipped | MLAG is disabled | +| DC1-SPINE1 | System | VerifyNTP | Verifies if NTP is synchronised. | - | failure | The device is not synchronized with the configured NTP server(s): 'NTP is disabled.' | +| DC1-SPINE1 | Connectivity | VerifyReachability | Test the network reachability to one or many destination IP(s). | - | error | ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1 | +| DC1-SPINE1 | Routing | VerifyRoutingTableEntry | Verifies that the provided routes are present in the routing table of a specified VRF. | - | failure | The following route(s) are missing from the routing table of VRF default: ['10.1.0.2'] | +| DC1-SPINE1 | STP | VerifySTPMode | Verifies the configured STP mode for a provided list of VLAN(s). | - | failure | STP mode 'rapidPvst' not configured for the following VLAN(s): [10, 20] | +| DC1-SPINE1 | SNMP | VerifySnmpStatus | Verifies if the SNMP agent is enabled. | - | failure | SNMP agent disabled in vrf default | +| DC1-SPINE1 | AAA | VerifyTacacsSourceIntf | Verifies TACACS source-interface for a specified VRF. | - | failure | Source-interface Management0 is not configured in VRF default | +| DC1-SPINE1 | Security | VerifyTelnetStatus | Verifies if Telnet is disabled in the default VRF. | - | success | - | +| DC1-SPINE1 | VXLAN | VerifyVxlan1Interface | Verifies the Vxlan1 interface status. | - | skipped | Vxlan1 interface is not configured | diff --git a/tests/data/test_md_report_results.json b/tests/data/test_md_report_results.json new file mode 100644 index 000000000..b9ecc0c57 --- /dev/null +++ b/tests/data/test_md_report_results.json @@ -0,0 +1,378 @@ +[ + { + "name": "DC1-SPINE1", + "test": "VerifyTacacsSourceIntf", + "categories": [ + "AAA" + ], + "description": "Verifies TACACS source-interface for a specified VRF.", + "result": "failure", + "messages": [ + "Source-interface Management0 is not configured in VRF default" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyLLDPNeighbors", + "categories": [ + "Connectivity" + ], + "description": "Verifies that the provided LLDP neighbors are connected properly.", + "result": "failure", + "messages": [ + "Wrong LLDP neighbor(s) on port(s):\n Ethernet1\n DC1-LEAF1A_Ethernet1\n Ethernet2\n DC1-LEAF1B_Ethernet1\nPort(s) not configured:\n Ethernet7" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyBGPPeerCount", + "categories": [ + "BGP" + ], + "description": "Verifies the count of BGP peers.", + "result": "failure", + "messages": [ + "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Not Configured', 'default': 'Expected: 3, Actual: 4'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Not Configured'}}, {'afi': 'evpn', 'vrfs': {'default': 'Expected: 2, Actual: 4'}}]" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifySTPMode", + "categories": [ + "STP" + ], + "description": "Verifies the configured STP mode for a provided list of VLAN(s).", + "result": "failure", + "messages": [ + "STP mode 'rapidPvst' not configured for the following VLAN(s): [10, 20]" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifySnmpStatus", + "categories": [ + "SNMP" + ], + "description": "Verifies if the SNMP agent is enabled.", + "result": "failure", + "messages": [ + "SNMP agent disabled in vrf default" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyRoutingTableEntry", + "categories": [ + "Routing" + ], + "description": "Verifies that the provided routes are present in the routing table of a specified VRF.", + "result": "failure", + "messages": [ + "The following route(s) are missing from the routing table of VRF default: ['10.1.0.2']" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyInterfaceUtilization", + "categories": [ + "Interfaces" + ], + "description": "Verifies that the utilization of interfaces is below a certain threshold.", + "result": "success", + "messages": [], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyMlagStatus", + "categories": [ + "MLAG" + ], + "description": "Verifies the health status of the MLAG configuration.", + "result": "skipped", + "messages": [ + "MLAG is disabled" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyVxlan1Interface", + "categories": [ + "VXLAN" + ], + "description": "Verifies the Vxlan1 interface status.", + "result": "skipped", + "messages": [ + "Vxlan1 interface is not configured" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyBFDSpecificPeers", + "categories": [ + "BFD" + ], + "description": "Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF.", + "result": "failure", + "messages": [ + "Following BFD peers are not configured, status is not up or remote disc is zero:\n{'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}}" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyNTP", + "categories": [ + "System" + ], + "description": "Verifies if NTP is synchronised.", + "result": "failure", + "messages": [ + "The device is not synchronized with the configured NTP server(s): 'NTP is disabled.'" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyReachability", + "categories": [ + "Connectivity" + ], + "description": "Test the network reachability to one or many destination IP(s).", + "result": "error", + "messages": [ + "ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyTelnetStatus", + "categories": [ + "Security" + ], + "description": "Verifies if Telnet is disabled in the default VRF.", + "result": "success", + "messages": [], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyEOSVersion", + "categories": [ + "Software" + ], + "description": "Verifies the EOS version of the device.", + "result": "failure", + "messages": [ + "device is running version \"4.31.1F-34554157.4311F (engineering build)\" not in expected versions: ['4.25.4M', '4.26.1F']" + ], + "custom_field": null + }, + { + "name": "DC1-SPINE1", + "test": "VerifyHostname", + "categories": [ + "Services" + ], + "description": "Verifies the hostname of a device.", + "result": "failure", + "messages": [ + "Expected `s1-spine1` as the hostname, but found `DC1-SPINE1` instead." + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyTacacsSourceIntf", + "categories": [ + "AAA" + ], + "description": "Verifies TACACS source-interface for a specified VRF.", + "result": "failure", + "messages": [ + "Source-interface Management0 is not configured in VRF default" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyLLDPNeighbors", + "categories": [ + "Connectivity" + ], + "description": "Verifies that the provided LLDP neighbors are connected properly.", + "result": "failure", + "messages": [ + "Wrong LLDP neighbor(s) on port(s):\n Ethernet1\n DC1-SPINE1_Ethernet1\n Ethernet2\n DC1-SPINE2_Ethernet1\nPort(s) not configured:\n Ethernet7" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyBGPPeerCount", + "categories": [ + "BGP" + ], + "description": "Verifies the count of BGP peers.", + "result": "failure", + "messages": [ + "Failures: [{'afi': 'ipv4', 'safi': 'unicast', 'vrfs': {'PROD': 'Expected: 2, Actual: 1'}}, {'afi': 'ipv4', 'safi': 'multicast', 'vrfs': {'DEV': 'Expected: 3, Actual: 0'}}]" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifySTPMode", + "categories": [ + "STP" + ], + "description": "Verifies the configured STP mode for a provided list of VLAN(s).", + "result": "failure", + "messages": [ + "Wrong STP mode configured for the following VLAN(s): [10, 20]" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifySnmpStatus", + "categories": [ + "SNMP" + ], + "description": "Verifies if the SNMP agent is enabled.", + "result": "failure", + "messages": [ + "SNMP agent disabled in vrf default" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyRoutingTableEntry", + "categories": [ + "Routing" + ], + "description": "Verifies that the provided routes are present in the routing table of a specified VRF.", + "result": "success", + "messages": [], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyInterfaceUtilization", + "categories": [ + "Interfaces" + ], + "description": "Verifies that the utilization of interfaces is below a certain threshold.", + "result": "success", + "messages": [], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyMlagStatus", + "categories": [ + "MLAG" + ], + "description": "Verifies the health status of the MLAG configuration.", + "result": "success", + "messages": [], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyVxlan1Interface", + "categories": [ + "VXLAN" + ], + "description": "Verifies the Vxlan1 interface status.", + "result": "success", + "messages": [], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyBFDSpecificPeers", + "categories": [ + "BFD" + ], + "description": "Verifies the IPv4 BFD peer's sessions and remote disc in the specified VRF.", + "result": "failure", + "messages": [ + "Following BFD peers are not configured, status is not up or remote disc is zero:\n{'192.0.255.8': {'default': 'Not Configured'}, '192.0.255.7': {'default': 'Not Configured'}}" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyNTP", + "categories": [ + "System" + ], + "description": "Verifies if NTP is synchronised.", + "result": "failure", + "messages": [ + "The device is not synchronized with the configured NTP server(s): 'NTP is disabled.'" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyReachability", + "categories": [ + "Connectivity" + ], + "description": "Test the network reachability to one or many destination IP(s).", + "result": "error", + "messages": [ + "ping vrf MGMT 1.1.1.1 source Management1 repeat 2 has failed: No source interface Management1" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyTelnetStatus", + "categories": [ + "Security" + ], + "description": "Verifies if Telnet is disabled in the default VRF.", + "result": "success", + "messages": [], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyEOSVersion", + "categories": [ + "Software" + ], + "description": "Verifies the EOS version of the device.", + "result": "failure", + "messages": [ + "device is running version \"4.31.1F-34554157.4311F (engineering build)\" not in expected versions: ['4.25.4M', '4.26.1F']" + ], + "custom_field": null + }, + { + "name": "DC1-LEAF1A", + "test": "VerifyHostname", + "categories": [ + "Services" + ], + "description": "Verifies the hostname of a device.", + "result": "failure", + "messages": [ + "Expected `s1-spine1` as the hostname, but found `DC1-LEAF1A` instead." + ], + "custom_field": null + } +] diff --git a/tests/lib/fixture.py b/tests/lib/fixture.py index b0205b8bb..92210acfa 100644 --- a/tests/lib/fixture.py +++ b/tests/lib/fixture.py @@ -5,8 +5,10 @@ from __future__ import annotations +import json import logging import shutil +from pathlib import Path from typing import TYPE_CHECKING, Any, Callable from unittest.mock import patch @@ -23,12 +25,15 @@ if TYPE_CHECKING: from collections.abc import Iterator - from pathlib import Path from anta.models import AntaCommand logger = logging.getLogger(__name__) +DATA_DIR: Path = Path(__file__).parent.parent.resolve() / "data" + +JSON_RESULTS = "test_md_report_results.json" + DEVICE_HW_MODEL = "pytest" DEVICE_NAME = "pytest" COMMAND_OUTPUT = "retrieved" @@ -154,6 +159,31 @@ def _factory(number: int = 0) -> ResultManager: return _factory +@pytest.fixture +def result_manager() -> ResultManager: + """Return a ResultManager with 30 random tests loaded from a JSON file. + + Devices: DC1-SPINE1, DC1-LEAF1A + + - Total tests: 30 + - Success: 7 + - Skipped: 2 + - Failure: 19 + - Error: 2 + + See `tests/data/test_md_report_results.json` and `tests/data/test_md_report_all_tests.md` for details. + """ + manager = ResultManager() + + with (DATA_DIR / JSON_RESULTS).open("r", encoding="utf-8") as f: + results = json.load(f) + + for result in results: + manager.add(TestResult(**result)) + + return manager + + # tests.units.cli fixtures @pytest.fixture def temp_env(tmp_path: Path) -> dict[str, str | None]: diff --git a/tests/units/cli/nrfu/test__init__.py b/tests/units/cli/nrfu/test__init__.py index 83369f344..7227a699f 100644 --- a/tests/units/cli/nrfu/test__init__.py +++ b/tests/units/cli/nrfu/test__init__.py @@ -120,3 +120,9 @@ def test_disable_cache(click_runner: CliRunner) -> None: if "disable_cache" in line: assert "True" in line assert result.exit_code == ExitCode.OK + + +def test_hide(click_runner: CliRunner) -> None: + """Test the `--hide` option of the `anta nrfu` command.""" + result = click_runner.invoke(anta, ["nrfu", "--hide", "success", "text"]) + assert "SUCCESS" not in result.output diff --git a/tests/units/cli/nrfu/test_commands.py b/tests/units/cli/nrfu/test_commands.py index 803c8f803..72d5a0154 100644 --- a/tests/units/cli/nrfu/test_commands.py +++ b/tests/units/cli/nrfu/test_commands.py @@ -151,3 +151,23 @@ def test_anta_nrfu_csv_failure(click_runner: CliRunner, tmp_path: Path) -> None: assert result.exit_code == ExitCode.USAGE_ERROR assert "Failed to save CSV report to" in result.output assert not csv_output.exists() + + +def test_anta_nrfu_md_report(click_runner: CliRunner, tmp_path: Path) -> None: + """Test anta nrfu md-report.""" + md_output = tmp_path / "test.md" + result = click_runner.invoke(anta, ["nrfu", "md-report", "--md-output", str(md_output)]) + assert result.exit_code == ExitCode.OK + assert "Markdown report saved to" in result.output + assert md_output.exists() + + +def test_anta_nrfu_md_report_failure(click_runner: CliRunner, tmp_path: Path) -> None: + """Test anta nrfu md-report failure.""" + md_output = tmp_path / "test.md" + with patch("anta.reporter.md_reporter.MDReportGenerator.generate", side_effect=OSError()): + result = click_runner.invoke(anta, ["nrfu", "md-report", "--md-output", str(md_output)]) + + assert result.exit_code == ExitCode.USAGE_ERROR + assert "Failed to save Markdown report to" in result.output + assert not md_output.exists() diff --git a/tests/units/reporter/test_md_reporter.py b/tests/units/reporter/test_md_reporter.py new file mode 100644 index 000000000..a60773374 --- /dev/null +++ b/tests/units/reporter/test_md_reporter.py @@ -0,0 +1,54 @@ +# Copyright (c) 2023-2024 Arista Networks, Inc. +# Use of this source code is governed by the Apache License 2.0 +# that can be found in the LICENSE file. +"""Test anta.reporter.md_reporter.py.""" + +from __future__ import annotations + +from io import StringIO +from pathlib import Path + +import pytest + +from anta.reporter.md_reporter import MDReportBase, MDReportGenerator +from anta.result_manager import ResultManager + +DATA_DIR: Path = Path(__file__).parent.parent.parent.resolve() / "data" + + +def test_md_report_generate(tmp_path: Path, result_manager: ResultManager) -> None: + """Test the MDReportGenerator class.""" + md_filename = tmp_path / "test.md" + expected_report = "test_md_report.md" + + # Generate the Markdown report + MDReportGenerator.generate(result_manager, md_filename) + assert md_filename.exists() + + # Load the existing Markdown report to compare with the generated one + with (DATA_DIR / expected_report).open("r", encoding="utf-8") as f: + expected_content = f.read() + + # Check the content of the Markdown file + content = md_filename.read_text(encoding="utf-8") + + assert content == expected_content + + +def test_md_report_base() -> None: + """Test the MDReportBase class.""" + + class FakeMDReportBase(MDReportBase): + """Fake MDReportBase class.""" + + def generate_section(self) -> None: + pass + + results = ResultManager() + + with StringIO() as mock_file: + report = FakeMDReportBase(mock_file, results) + assert report.generate_heading_name() == "Fake MD Report Base" + + with pytest.raises(NotImplementedError, match="Subclasses should implement this method"): + report.generate_rows() diff --git a/tests/units/result_manager/test__init__.py b/tests/units/result_manager/test__init__.py index 02c694c05..66a6cfb1d 100644 --- a/tests/units/result_manager/test__init__.py +++ b/tests/units/result_manager/test__init__.py @@ -6,6 +6,7 @@ from __future__ import annotations import json +import re from contextlib import AbstractContextManager, nullcontext from typing import TYPE_CHECKING, Callable @@ -71,6 +72,27 @@ def test_json(self, list_result_factory: Callable[[int], list[TestResult]]) -> N assert test.get("custom_field") is None assert test.get("result") == "success" + def test_sorted_category_stats(self, list_result_factory: Callable[[int], list[TestResult]]) -> None: + """Test ResultManager.sorted_category_stats.""" + result_manager = ResultManager() + results = list_result_factory(4) + + # Modify the categories to have a mix of different acronym categories + results[0].categories = ["ospf"] + results[1].categories = ["bgp"] + results[2].categories = ["vxlan"] + results[3].categories = ["system"] + + result_manager.results = results + + # Check the current categories order and name format + expected_order = ["OSPF", "BGP", "VXLAN", "System"] + assert list(result_manager.category_stats.keys()) == expected_order + + # Check the sorted categories order and name format + expected_order = ["BGP", "OSPF", "System", "VXLAN"] + assert list(result_manager.sorted_category_stats.keys()) == expected_order + @pytest.mark.parametrize( ("starting_status", "test_status", "expected_status", "expected_raise"), [ @@ -149,6 +171,91 @@ def test_add( assert result_manager.status == expected_status assert len(result_manager) == 1 + def test_add_clear_cache(self, result_manager: ResultManager, test_result_factory: Callable[[], TestResult]) -> None: + """Test ResultManager.add and make sure the cache is reset after adding a new test.""" + # Check the cache is empty + assert "results_by_status" not in result_manager.__dict__ + + # Access the cache + assert result_manager.get_total_results() == 30 + + # Check the cache is filled with the correct results count + assert "results_by_status" in result_manager.__dict__ + assert sum(len(v) for v in result_manager.__dict__["results_by_status"].values()) == 30 + + # Add a new test + result_manager.add(result=test_result_factory()) + + # Check the cache has been reset + assert "results_by_status" not in result_manager.__dict__ + + # Access the cache again + assert result_manager.get_total_results() == 31 + + # Check the cache is filled again with the correct results count + assert "results_by_status" in result_manager.__dict__ + assert sum(len(v) for v in result_manager.__dict__["results_by_status"].values()) == 31 + + def test_get_results(self, result_manager: ResultManager) -> None: + """Test ResultManager.get_results.""" + # Check for single status + success_results = result_manager.get_results(status={"success"}) + assert len(success_results) == 7 + assert all(r.result == "success" for r in success_results) + + # Check for multiple statuses + failure_results = result_manager.get_results(status={"failure", "error"}) + assert len(failure_results) == 21 + assert all(r.result in {"failure", "error"} for r in failure_results) + + # Check all results + all_results = result_manager.get_results() + assert len(all_results) == 30 + + def test_get_results_sort_by(self, result_manager: ResultManager) -> None: + """Test ResultManager.get_results with sort_by.""" + # Check all results with sort_by result + all_results = result_manager.get_results(sort_by=["result"]) + assert len(all_results) == 30 + assert [r.result for r in all_results] == ["error"] * 2 + ["failure"] * 19 + ["skipped"] * 2 + ["success"] * 7 + + # Check all results with sort_by device (name) + all_results = result_manager.get_results(sort_by=["name"]) + assert len(all_results) == 30 + assert all_results[0].name == "DC1-LEAF1A" + assert all_results[-1].name == "DC1-SPINE1" + + # Check multiple statuses with sort_by categories + success_skipped_results = result_manager.get_results(status={"success", "skipped"}, sort_by=["categories"]) + assert len(success_skipped_results) == 9 + assert success_skipped_results[0].categories == ["Interfaces"] + assert success_skipped_results[-1].categories == ["VXLAN"] + + # Check all results with bad sort_by + with pytest.raises( + ValueError, + match=re.escape( + "Invalid sort_by fields: ['bad_field']. Accepted fields are: ['name', 'test', 'categories', 'description', 'result', 'messages', 'custom_field']", + ), + ): + all_results = result_manager.get_results(sort_by=["bad_field"]) + + def test_get_total_results(self, result_manager: ResultManager) -> None: + """Test ResultManager.get_total_results.""" + # Test all results + assert result_manager.get_total_results() == 30 + + # Test single status + assert result_manager.get_total_results(status={"success"}) == 7 + assert result_manager.get_total_results(status={"failure"}) == 19 + assert result_manager.get_total_results(status={"error"}) == 2 + assert result_manager.get_total_results(status={"skipped"}) == 2 + + # Test multiple statuses + assert result_manager.get_total_results(status={"success", "failure"}) == 26 + assert result_manager.get_total_results(status={"success", "failure", "error"}) == 28 + assert result_manager.get_total_results(status={"success", "failure", "error", "skipped"}) == 30 + @pytest.mark.parametrize( ("status", "error_status", "ignore_error", "expected_status"), [