diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml
index 5c9a8814f..cec49d559 100644
--- a/.github/workflows/coverage.yml
+++ b/.github/workflows/coverage.yml
@@ -30,7 +30,7 @@ concurrency:
jobs:
coverage:
name: "${{ matrix.python-version }} on ${{ matrix.os }}"
- runs-on: "${{ matrix.os }}-latest"
+ runs-on: "${{ matrix.os }}-${{ matrix.os-version || 'latest' }}"
env:
MATRIX_ID: "${{ matrix.python-version }}.${{ matrix.os }}"
@@ -69,13 +69,15 @@ jobs:
python-version: "pypy-3.9"
- os: windows
python-version: "pypy-3.10"
- # Skip 3.13.0a4 and pin to 3.13.0a3 for Windows due to build error.
- # Undo when 3.13.0a5 is released.
- - os: windows
- python-version: "3.13"
+ # GitHub is rolling out macos 14, but it doesn't have Python 3.8 or 3.9.
+ # https://mastodon.social/@hugovk/112320493602782374
include:
- - os: windows
- python-version: "3.13.0-alpha.3"
+ - python-version: "3.8"
+ os: "macos"
+ os-version: "13"
+ - python-version: "3.9"
+ os: "macos"
+ os-version: "13"
# If one job fails, stop the whole thing.
fail-fast: true
diff --git a/.github/workflows/kit.yml b/.github/workflows/kit.yml
index 9d78b430e..36a1d42bd 100644
--- a/.github/workflows/kit.yml
+++ b/.github/workflows/kit.yml
@@ -48,7 +48,7 @@ concurrency:
jobs:
wheels:
name: "${{ matrix.py }} ${{ matrix.os }} ${{ matrix.arch }} wheels"
- runs-on: ${{ matrix.os }}-latest
+ runs-on: "${{ matrix.os }}-${{ matrix.os-version || 'latest' }}"
env:
MATRIX_ID: "${{ matrix.py }}-${{ matrix.os }}-${{ matrix.arch }}"
strategy:
@@ -84,7 +84,7 @@ jobs:
#
# # Some OS/arch combinations need overrides for the Python versions:
# os_arch_pys = {
- # ("macos", "arm64"): ["cp38", "cp39", "cp310", "cp311", "cp312"],
+ # # ("macos", "arm64"): ["cp38", "cp39", "cp310", "cp311", "cp312"],
# }
#
# #----- ^^^ ---------------------- ^^^ -----
@@ -98,6 +98,8 @@ jobs:
# "py": the_py,
# "arch": the_arch,
# }
+ # if the_os == "macos":
+ # them["os-version"] = "13"
# print(f"- {json.dumps(them)}")
# ]]]
- {"os": "ubuntu", "py": "cp38", "arch": "x86_64"}
@@ -115,16 +117,16 @@ jobs:
- {"os": "ubuntu", "py": "cp310", "arch": "aarch64"}
- {"os": "ubuntu", "py": "cp311", "arch": "aarch64"}
- {"os": "ubuntu", "py": "cp312", "arch": "aarch64"}
- - {"os": "macos", "py": "cp38", "arch": "arm64"}
- - {"os": "macos", "py": "cp39", "arch": "arm64"}
- - {"os": "macos", "py": "cp310", "arch": "arm64"}
- - {"os": "macos", "py": "cp311", "arch": "arm64"}
- - {"os": "macos", "py": "cp312", "arch": "arm64"}
- - {"os": "macos", "py": "cp38", "arch": "x86_64"}
- - {"os": "macos", "py": "cp39", "arch": "x86_64"}
- - {"os": "macos", "py": "cp310", "arch": "x86_64"}
- - {"os": "macos", "py": "cp311", "arch": "x86_64"}
- - {"os": "macos", "py": "cp312", "arch": "x86_64"}
+ - {"os": "macos", "py": "cp38", "arch": "arm64", "os-version": "13"}
+ - {"os": "macos", "py": "cp39", "arch": "arm64", "os-version": "13"}
+ - {"os": "macos", "py": "cp310", "arch": "arm64", "os-version": "13"}
+ - {"os": "macos", "py": "cp311", "arch": "arm64", "os-version": "13"}
+ - {"os": "macos", "py": "cp312", "arch": "arm64", "os-version": "13"}
+ - {"os": "macos", "py": "cp38", "arch": "x86_64", "os-version": "13"}
+ - {"os": "macos", "py": "cp39", "arch": "x86_64", "os-version": "13"}
+ - {"os": "macos", "py": "cp310", "arch": "x86_64", "os-version": "13"}
+ - {"os": "macos", "py": "cp311", "arch": "x86_64", "os-version": "13"}
+ - {"os": "macos", "py": "cp312", "arch": "x86_64", "os-version": "13"}
- {"os": "windows", "py": "cp38", "arch": "x86"}
- {"os": "windows", "py": "cp39", "arch": "x86"}
- {"os": "windows", "py": "cp310", "arch": "x86"}
@@ -135,7 +137,7 @@ jobs:
- {"os": "windows", "py": "cp310", "arch": "AMD64"}
- {"os": "windows", "py": "cp311", "arch": "AMD64"}
- {"os": "windows", "py": "cp312", "arch": "AMD64"}
- # [[[end]]] (checksum: a6ca53e9c620c9e5ca85e7322122056c)
+ # [[[end]]] (checksum: 16ed28c185d540b2d9972a0217864472)
fail-fast: false
steps:
diff --git a/.github/workflows/python-nightly.yml b/.github/workflows/python-nightly.yml
index 4a3cc0432..13c347350 100644
--- a/.github/workflows/python-nightly.yml
+++ b/.github/workflows/python-nightly.yml
@@ -37,8 +37,10 @@ jobs:
# because jammy ships 3.10, and deadsnakes doesn't want to clobber it.
# https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly/+packages
# https://github.com/deadsnakes/issues/issues/234
- # bionic: 18, focal: 20, jammy: 22
- runs-on: ubuntu-20.04
+ # See https://github.com/deadsnakes/nightly for the source of the nightly
+ # builds.
+ # bionic: 18, focal: 20, jammy: 22, noble: 24
+ runs-on: ubuntu-22.04
# If it doesn't finish in an hour, it's not going to. Don't spin for six
# hours needlessly.
timeout-minutes: 60
@@ -50,7 +52,6 @@ jobs:
# tox.ini so that tox will run properly. PYVERSIONS
# Available versions:
# https://launchpad.net/~deadsnakes/+archive/ubuntu/nightly/+packages
- - "3.11-dev"
- "3.12-dev"
- "3.13-dev"
# https://github.com/actions/setup-python#available-versions-of-pypy
diff --git a/.github/workflows/quality.yml b/.github/workflows/quality.yml
index d1c1f311a..c9718bf11 100644
--- a/.github/workflows/quality.yml
+++ b/.github/workflows/quality.yml
@@ -31,7 +31,9 @@ jobs:
# Because pylint can report different things on different OS's (!)
# (https://github.com/PyCQA/pylint/issues/3489), run this on Mac where local
# pylint gets run.
- runs-on: macos-latest
+ # GitHub is rolling out macos 14, but it doesn't have Python 3.8 or 3.9.
+ # https://mastodon.social/@hugovk/112320493602782374
+ runs-on: macos-13
steps:
- name: "Check out the repo"
@@ -69,8 +71,6 @@ jobs:
- name: "Install dependencies"
run: |
- # We run on 3.8, but the pins were made on 3.7, so don't insist on
- # hashes, which won't match.
python -m pip install -r requirements/tox.pip
- name: "Tox mypy"
diff --git a/.github/workflows/testsuite.yml b/.github/workflows/testsuite.yml
index ee94b1ea0..e11b3d74e 100644
--- a/.github/workflows/testsuite.yml
+++ b/.github/workflows/testsuite.yml
@@ -30,7 +30,7 @@ concurrency:
jobs:
tests:
name: "${{ matrix.python-version }} on ${{ matrix.os }}"
- runs-on: "${{ matrix.os }}-latest"
+ runs-on: "${{ matrix.os }}-${{ matrix.os-version || 'latest' }}"
# Don't run tests if the branch name includes "-notests"
if: "!contains(github.ref, '-notests')"
strategy:
@@ -62,13 +62,16 @@ jobs:
python-version: "pypy-3.9"
- os: windows
python-version: "pypy-3.10"
- # Skip 3.13.0a4 and pin to 3.13.0a3 for Windows due to build error.
- # Undo when 3.13.0a5 is released.
- - os: windows
- python-version: "3.13"
+ # GitHub is rolling out macos 14, but it doesn't have Python 3.8 or 3.9.
+ # https://mastodon.social/@hugovk/112320493602782374
include:
- - os: windows
- python-version: "3.13.0-alpha.3"
+ - python-version: "3.8"
+ os: "macos"
+ os-version: "13"
+ - python-version: "3.9"
+ os: "macos"
+ os-version: "13"
+
fail-fast: false
steps:
diff --git a/CHANGES.rst b/CHANGES.rst
index 7c7667d39..9aad1decf 100644
--- a/CHANGES.rst
+++ b/CHANGES.rst
@@ -22,6 +22,38 @@ upgrading your version of coverage.py.
.. scriv-start-here
+.. _changes_7-5-0:
+
+Version 7.5.0 — 2024-04-23
+--------------------------
+
+- Added initial support for function and class reporting in the HTML report.
+ There are now three index pages which link to each other: files, functions,
+ and classes. Other reports don't yet have this information, but it will be
+ added in the future where it makes sense. Feedback gladly accepted!
+
+- Other HTML report improvements:
+
+ - There is now a "hide covered" checkbox to filter out 100% files, finishing
+ `issue 1384`_.
+
+ - The index page is always sorted by one of its columns, with clearer
+ indications of the sorting.
+
+ - The "previous file" shortcut key didn't work on the index page, but now it
+ does, fixing `issue 1765`_.
+
+- The debug output showing which configuration files were tried now shows
+ absolute paths to help diagnose problems where settings aren't taking effect,
+ and is renamed from "attempted_config_files" to the more logical
+ "config_files_attempted."
+
+- Python 3.13.0a6 is supported.
+
+.. _issue 1384: https://github.com/nedbat/coveragepy/issues/1384
+.. _issue 1765: https://github.com/nedbat/coveragepy/issues/1765
+
+
.. _changes_7-4-4:
Version 7.4.4 — 2024-03-14
diff --git a/CONTRIBUTORS.txt b/CONTRIBUTORS.txt
index 9c063c20f..1a671fac6 100644
--- a/CONTRIBUTORS.txt
+++ b/CONTRIBUTORS.txt
@@ -102,6 +102,7 @@ J. M. F. Tsang
JT Olds
Jacqueline Lee
Jakub Wilk
+James Valleroy
Jan Rusak
Janakarajan Natarajan
Jerin Peter George
diff --git a/Makefile b/Makefile
index 109221d53..12f8f0e96 100644
--- a/Makefile
+++ b/Makefile
@@ -198,10 +198,10 @@ kit: ## Make the source distribution.
python -m build
kit_upload: ## Upload the built distributions to PyPI.
- twine upload --verbose dist/*
+ twine upload dist/*
test_upload: ## Upload the distributions to PyPI's testing server.
- twine upload --verbose --repository testpypi --password $$TWINE_TEST_PASSWORD dist/*
+ twine upload --repository testpypi --password $$TWINE_TEST_PASSWORD dist/*
kit_local:
# pip.conf looks like this:
diff --git a/README.rst b/README.rst
index 8ceedbf08..dabdc84fc 100644
--- a/README.rst
+++ b/README.rst
@@ -5,7 +5,7 @@
Coverage.py
===========
-Code coverage testing for Python.
+Code coverage measurement for Python.
.. image:: https://raw.githubusercontent.com/vshymanskyy/StandWithUkraine/main/banner2-direct.svg
:target: https://vshymanskyy.github.io/StandWithUkraine
@@ -25,7 +25,7 @@ Coverage.py runs on these versions of Python:
.. PYVERSIONS
-* Python 3.8 through 3.12, and 3.13.0a3 and up.
+* Python 3.8 through 3.12, and 3.13.0a6 and up.
* PyPy3 versions 3.8 through 3.10.
Documentation is on `Read the Docs`_. Code repository and issue tracker are on
@@ -35,6 +35,7 @@ Documentation is on `Read the Docs`_. Code repository and issue tracker are on
.. _GitHub: https://github.com/nedbat/coveragepy
**New in 7.x:**
+initial function/class reporting;
experimental support for sys.monitoring;
dropped support for Python 3.7;
added ``Coverage.collect()`` context manager;
diff --git a/coverage/__init__.py b/coverage/__init__.py
index c3403d444..1bda8921d 100644
--- a/coverage/__init__.py
+++ b/coverage/__init__.py
@@ -28,6 +28,7 @@
from coverage.data import CoverageData as CoverageData
from coverage.exceptions import CoverageException as CoverageException
from coverage.plugin import (
+ CodeRegion as CodeRegion,
CoveragePlugin as CoveragePlugin,
FileReporter as FileReporter,
FileTracer as FileTracer,
@@ -35,7 +36,3 @@
# Backward compatibility.
coverage = Coverage
-
-# On Windows, we encode and decode deep enough that something goes wrong and
-# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
-# Adding a reference here prevents it from being unloaded. Yuk.
diff --git a/coverage/cmdline.py b/coverage/cmdline.py
index 463ea8fde..9f9c06559 100644
--- a/coverage/cmdline.py
+++ b/coverage/cmdline.py
@@ -26,7 +26,7 @@
from coverage.debug import info_header, short_stack, write_formatted_info
from coverage.exceptions import _BaseCoverageException, _ExceptionDuringRun, NoSource
from coverage.execfile import PyRunner
-from coverage.results import Numbers, should_fail_under
+from coverage.results import display_covered, should_fail_under
from coverage.version import __url__
# When adding to this file, alphabetization is important. Look for
@@ -760,7 +760,7 @@ def command_line(self, argv: list[str]) -> int:
precision = cast(int, self.coverage.get_option("report:precision"))
if should_fail_under(total, fail_under, precision):
msg = "total of {total} is less than fail-under={fail_under:.{p}f}".format(
- total=Numbers(precision=precision).display_covered(total),
+ total=display_covered(total, precision),
fail_under=fail_under,
p=precision,
)
diff --git a/coverage/config.py b/coverage/config.py
index 7a7cd540e..7aa2471bd 100644
--- a/coverage/config.py
+++ b/coverage/config.py
@@ -180,7 +180,7 @@ def __init__(self) -> None:
"""Initialize the configuration attributes to their defaults."""
# Metadata about the config.
# We tried to read these config files.
- self.attempted_config_files: list[str] = []
+ self.config_files_attempted: list[str] = []
# We did read these config files, but maybe didn't find any content for us.
self.config_files_read: list[str] = []
# The file that gave us our configuration.
@@ -291,7 +291,7 @@ def from_file(self, filename: str, warn: Callable[[str], None], our_file: bool)
else:
cp = HandyConfigParser(our_file)
- self.attempted_config_files.append(filename)
+ self.config_files_attempted.append(os.path.abspath(filename))
try:
files_read = cp.read(filename)
diff --git a/coverage/control.py b/coverage/control.py
index 6f7f9a311..dbca2013d 100644
--- a/coverage/control.py
+++ b/coverage/control.py
@@ -8,6 +8,7 @@
import atexit
import collections
import contextlib
+import functools
import os
import os.path
import platform
@@ -47,7 +48,7 @@
from coverage.python import PythonFileReporter
from coverage.report import SummaryReporter
from coverage.report_core import render_report
-from coverage.results import Analysis
+from coverage.results import Analysis, analysis_from_file_reporter
from coverage.types import (
FilePath, TConfigurable, TConfigSectionIn, TConfigValueIn, TConfigValueOut,
TFileDisposition, TLineNo, TMorf,
@@ -930,24 +931,17 @@ def analysis2(
analysis.missing_formatted(),
)
- def _analyze(self, it: FileReporter | TMorf) -> Analysis:
- """Analyze a single morf or code unit.
-
- Returns an `Analysis` object.
-
- """
- # All reporting comes through here, so do reporting initialization.
+ def _analyze(self, morf: TMorf) -> Analysis:
+ """Analyze a module or file. Private for now."""
self._init()
self._post_init()
data = self.get_data()
- if isinstance(it, FileReporter):
- fr = it
- else:
- fr = self._get_file_reporter(it)
-
- return Analysis(data, self.config.precision, fr, self._file_mapper)
+ file_reporter = self._get_file_reporter(morf)
+ filename = self._file_mapper(file_reporter.filename)
+ return analysis_from_file_reporter(data, self.config.precision, file_reporter, filename)
+ @functools.lru_cache(maxsize=1)
def _get_file_reporter(self, morf: TMorf) -> FileReporter:
"""Get a FileReporter for a module or file name."""
assert self._data is not None
@@ -975,11 +969,14 @@ def _get_file_reporter(self, morf: TMorf) -> FileReporter:
assert isinstance(file_reporter, FileReporter)
return file_reporter
- def _get_file_reporters(self, morfs: Iterable[TMorf] | None = None) -> list[FileReporter]:
- """Get a list of FileReporters for a list of modules or file names.
+ def _get_file_reporters(
+ self,
+ morfs: Iterable[TMorf] | None = None,
+ ) -> list[tuple[FileReporter, TMorf]]:
+ """Get FileReporters for a list of modules or file names.
For each module or file name in `morfs`, find a FileReporter. Return
- the list of FileReporters.
+ a list pairing FileReporters with the morfs.
If `morfs` is a single module or file name, this returns a list of one
FileReporter. If `morfs` is empty or None, then the list of all files
@@ -994,8 +991,7 @@ def _get_file_reporters(self, morfs: Iterable[TMorf] | None = None) -> list[File
if not isinstance(morfs, (list, tuple, set)):
morfs = [morfs] # type: ignore[list-item]
- file_reporters = [self._get_file_reporter(morf) for morf in morfs]
- return file_reporters
+ return [(self._get_file_reporter(morf), morf) for morf in morfs]
def _prepare_data_for_reporting(self) -> None:
"""Re-map data before reporting, to get implicit "combine" behavior."""
@@ -1302,7 +1298,7 @@ def plugin_info(plugins: list[Any]) -> list[str]:
("plugins.file_tracers", plugin_info(self._plugins.file_tracers)),
("plugins.configurers", plugin_info(self._plugins.configurers)),
("plugins.context_switchers", plugin_info(self._plugins.context_switchers)),
- ("configs_attempted", self.config.attempted_config_files),
+ ("configs_attempted", self.config.config_files_attempted),
("configs_read", self.config.config_files_read),
("config_file", self.config.config_file),
("config_contents",
diff --git a/coverage/files.py b/coverage/files.py
index 0dd3c4e01..5fb704350 100644
--- a/coverage/files.py
+++ b/coverage/files.py
@@ -96,13 +96,13 @@ def flat_rootname(filename: str) -> str:
the same directory, but need to differentiate same-named files from
different directories.
- For example, the file a/b/c.py will return 'd_86bbcbe134d28fd2_c_py'
+ For example, the file a/b/c.py will return 'z_86bbcbe134d28fd2_c_py'
"""
dirname, basename = ntpath.split(filename)
if dirname:
fp = hashlib.new("sha3_256", dirname.encode("UTF-8")).hexdigest()[:16]
- prefix = f"d_{fp}_"
+ prefix = f"z_{fp}_"
else:
prefix = ""
return prefix + basename.replace(".", "_")
diff --git a/coverage/html.py b/coverage/html.py
index e2bae1d6b..f32ca0a29 100644
--- a/coverage/html.py
+++ b/coverage/html.py
@@ -6,6 +6,7 @@
from __future__ import annotations
import collections
+import dataclasses
import datetime
import functools
import json
@@ -14,15 +15,17 @@
import shutil
import string
-from dataclasses import dataclass
-from typing import Any, Iterable, TYPE_CHECKING, cast
+from dataclasses import dataclass, field
+from typing import Any, Iterable, TYPE_CHECKING
import coverage
from coverage.data import CoverageData, add_data_to_hash
from coverage.exceptions import NoDataError
from coverage.files import flat_rootname
-from coverage.misc import ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime
-from coverage.misc import human_sorted, plural, stdout_link
+from coverage.misc import (
+ ensure_dir, file_be_gone, Hasher, isolate_module, format_local_datetime,
+ human_sorted, plural, stdout_link,
+)
from coverage.report_core import get_analysis_to_report
from coverage.results import Analysis, Numbers
from coverage.templite import Templite
@@ -31,24 +34,9 @@
if TYPE_CHECKING:
- # To avoid circular imports:
from coverage import Coverage
from coverage.plugins import FileReporter
- # To be able to use 3.8 typing features, and still run on 3.7:
- from typing import TypedDict
-
- class IndexInfoDict(TypedDict):
- """Information for each file, to render the index file."""
- nums: Numbers
- html_filename: str
- relative_filename: str
-
- class FileInfoDict(TypedDict):
- """Summary of the information from last rendering, to avoid duplicate work."""
- hash: str
- index: IndexInfoDict
-
os = isolate_module(os)
@@ -80,7 +68,6 @@ class LineData:
tokens: list[tuple[str, str]]
number: TLineNo
category: str
- statement: bool
contexts: list[str]
contexts_label: str
context_list: list[str]
@@ -101,6 +88,27 @@ class FileData:
lines: list[LineData]
+@dataclass
+class IndexItem:
+ """Information for each index entry, to render an index page."""
+ url: str = ""
+ file: str = ""
+ description: str = ""
+ nums: Numbers = field(default_factory=Numbers)
+
+
+@dataclass
+class IndexPage:
+ """Data for each index page."""
+ noun: str
+ plural: str
+ filename: str
+ summaries: list[IndexItem]
+ totals: Numbers
+ skipped_covered_count: int
+ skipped_empty_count: int
+
+
class HtmlDataGeneration:
"""Generate structured data to be turned into HTML reports."""
@@ -109,21 +117,21 @@ class HtmlDataGeneration:
def __init__(self, cov: Coverage) -> None:
self.coverage = cov
self.config = self.coverage.config
- data = self.coverage.get_data()
- self.has_arcs = data.has_arcs()
+ self.data = self.coverage.get_data()
+ self.has_arcs = self.data.has_arcs()
if self.config.show_contexts:
- if data.measured_contexts() == {""}:
+ if self.data.measured_contexts() == {""}:
self.coverage._warn("No contexts were measured")
- data.set_query_contexts(self.config.report_contexts)
+ self.data.set_query_contexts(self.config.report_contexts)
def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData:
"""Produce the data needed for one file's report."""
if self.has_arcs:
missing_branch_arcs = analysis.missing_branch_arcs()
- arcs_executed = analysis.arcs_executed()
+ arcs_executed = analysis.arcs_executed
if self.config.show_contexts:
- contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename)
+ contexts_by_lineno = self.data.contexts_by_lineno(analysis.filename)
lines = []
@@ -163,7 +171,6 @@ def data_for_file(self, fr: FileReporter, analysis: Analysis) -> FileData:
tokens=tokens,
number=lineno,
category=category,
- statement=(lineno in analysis.statements),
contexts=contexts,
contexts_label=contexts_label,
context_list=context_list,
@@ -187,6 +194,7 @@ def __init__(self, fr: FileReporter, analysis: Analysis) -> None:
self.analysis = analysis
self.rootname = flat_rootname(fr.relative_filename())
self.html_filename = self.rootname + ".html"
+ self.prev_html = self.next_html = ""
HTML_SAFE = string.ascii_letters + string.digits + "!#$%'()*+,-./:;=?@[]^_`{|}~"
@@ -228,8 +236,6 @@ def __init__(self, cov: Coverage) -> None:
self.skip_empty = self.config.html_skip_empty
if self.skip_empty is None:
self.skip_empty = self.config.skip_empty
- self.skipped_covered_count = 0
- self.skipped_empty_count = 0
title = self.config.html_title
@@ -242,11 +248,11 @@ def __init__(self, cov: Coverage) -> None:
self.data = self.coverage.get_data()
self.has_arcs = self.data.has_arcs()
- self.file_summaries: list[IndexInfoDict] = []
- self.all_files_nums: list[Numbers] = []
+ self.index_pages: dict[str, IndexPage] = {
+ "file": self.new_index_page("file", "files"),
+ }
self.incr = IncrementalChecker(self.directory)
self.datagen = HtmlDataGeneration(self.coverage)
- self.totals = Numbers(precision=self.config.precision)
self.directory_was_empty = False
self.first_fr = None
self.final_fr = None
@@ -275,9 +281,22 @@ def __init__(self, cov: Coverage) -> None:
"run": "run",
},
}
+ self.index_tmpl = Templite(read_data("index.html"), self.template_globals)
self.pyfile_html_source = read_data("pyfile.html")
self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
+ def new_index_page(self, noun: str, plural_noun: str) -> IndexPage:
+ """Create an IndexPage for a kind of region."""
+ return IndexPage(
+ noun=noun,
+ plural=plural_noun,
+ filename="index.html" if noun == "file" else f"{noun}_index.html",
+ summaries=[],
+ totals=Numbers(precision=self.config.precision),
+ skipped_covered_count=0,
+ skipped_empty_count=0,
+ )
+
def report(self, morfs: Iterable[TMorf] | None) -> float:
"""Generate an HTML report for `morfs`.
@@ -293,40 +312,47 @@ def report(self, morfs: Iterable[TMorf] | None) -> float:
# to the next and previous page.
files_to_report = []
+ have_data = False
for fr, analysis in get_analysis_to_report(self.coverage, morfs):
+ have_data = True
ftr = FileToReport(fr, analysis)
- should = self.should_report_file(ftr)
- if should:
+ if self.should_report(analysis, self.index_pages["file"]):
files_to_report.append(ftr)
else:
file_be_gone(os.path.join(self.directory, ftr.html_filename))
- for i, ftr in enumerate(files_to_report):
- if i == 0:
- prev_html = "index.html"
- else:
- prev_html = files_to_report[i - 1].html_filename
- if i == len(files_to_report) - 1:
- next_html = "index.html"
- else:
- next_html = files_to_report[i + 1].html_filename
- self.write_html_file(ftr, prev_html, next_html)
-
- if not self.all_files_nums:
+ if not have_data:
raise NoDataError("No data to report.")
- self.totals = cast(Numbers, sum(self.all_files_nums))
-
- # Write the index file.
+ if files_to_report:
+ for ftr1, ftr2 in zip(files_to_report[:-1], files_to_report[1:]):
+ ftr1.next_html = ftr2.html_filename
+ ftr2.prev_html = ftr1.html_filename
+ files_to_report[0].prev_html = "index.html"
+ files_to_report[-1].next_html = "index.html"
+
+ for ftr in files_to_report:
+ self.write_html_page(ftr)
+ for noun, plural_noun in ftr.fr.code_region_kinds():
+ if noun not in self.index_pages:
+ self.index_pages[noun] = self.new_index_page(noun, plural_noun)
+
+ # Write the index page.
if files_to_report:
first_html = files_to_report[0].html_filename
final_html = files_to_report[-1].html_filename
else:
first_html = final_html = "index.html"
- self.index_file(first_html, final_html)
+ self.write_file_index_page(first_html, final_html)
+
+ # Write function and class index pages.
+ self.write_region_index_pages(files_to_report)
self.make_local_static_report_files()
- return self.totals.n_statements and self.totals.pc_covered
+ return (
+ self.index_pages["file"].totals.n_statements
+ and self.index_pages["file"].totals.pc_covered
+ )
def make_directory(self) -> None:
"""Make sure our htmlcov directory exists."""
@@ -352,39 +378,44 @@ def make_local_static_report_files(self) -> None:
assert self.config.extra_css is not None
shutil.copyfile(self.config.extra_css, os.path.join(self.directory, self.extra_css))
- def should_report_file(self, ftr: FileToReport) -> bool:
- """Determine if we'll report this file."""
+ def should_report(self, analysis: Analysis, index_page: IndexPage) -> bool:
+ """Determine if we'll report this file or region."""
# Get the numbers for this file.
- nums = ftr.analysis.numbers
- self.all_files_nums.append(nums)
+ nums = analysis.numbers
+ index_page.totals += nums
if self.skip_covered:
# Don't report on 100% files.
no_missing_lines = (nums.n_missing == 0)
no_missing_branches = (nums.n_partial_branches == 0)
if no_missing_lines and no_missing_branches:
- # If there's an existing file, remove it.
- self.skipped_covered_count += 1
+ index_page.skipped_covered_count += 1
return False
if self.skip_empty:
# Don't report on empty files.
if nums.n_statements == 0:
- self.skipped_empty_count += 1
+ index_page.skipped_empty_count += 1
return False
return True
- def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) -> None:
- """Generate an HTML file for one source file."""
+ def write_html_page(self, ftr: FileToReport) -> None:
+ """Generate an HTML page for one source file.
+
+ If the page on disk is already correct based on our incremental status
+ checking, then the page doesn't have to be generated, and this function
+ only does page summary bookkeeping.
+
+ """
self.make_directory()
- # Find out if the file on disk is already correct.
+ # Find out if the page on disk is already correct.
if self.incr.can_skip_file(self.data, ftr.fr, ftr.rootname):
- self.file_summaries.append(self.incr.index_info(ftr.rootname))
+ self.index_pages["file"].summaries.append(self.incr.index_info(ftr.rootname))
return
- # Write the HTML page for this file.
+ # Write the HTML page for this source file.
file_data = self.datagen.data_for_file(ftr.fr, ftr.analysis)
contexts = collections.Counter(c for cline in file_data.lines for c in cline.contexts)
@@ -455,151 +486,255 @@ def write_html_file(self, ftr: FileToReport, prev_html: str, next_html: str) ->
html = self.source_tmpl.render({
**file_data.__dict__,
"contexts_json": contexts_json,
- "prev_html": prev_html,
- "next_html": next_html,
+ "prev_html": ftr.prev_html,
+ "next_html": ftr.next_html,
})
write_html(html_path, html)
- # Save this file's information for the index file.
- index_info: IndexInfoDict = {
- "nums": ftr.analysis.numbers,
- "html_filename": ftr.html_filename,
- "relative_filename": ftr.fr.relative_filename(),
- }
- self.file_summaries.append(index_info)
+ # Save this file's information for the index page.
+ index_info = IndexItem(
+ url = ftr.html_filename,
+ file = escape(ftr.fr.relative_filename()),
+ nums = ftr.analysis.numbers,
+ )
+ self.index_pages["file"].summaries.append(index_info)
self.incr.set_index_info(ftr.rootname, index_info)
- def index_file(self, first_html: str, final_html: str) -> None:
- """Write the index.html file for this report."""
+ def write_file_index_page(self, first_html: str, final_html: str) -> None:
+ """Write the file index page for this report."""
self.make_directory()
- index_tmpl = Templite(read_data("index.html"), self.template_globals)
+ index_file = self.write_index_page(
+ self.index_pages["file"],
+ first_html=first_html,
+ final_html=final_html,
+ )
+
+ print_href = stdout_link(index_file, f"file://{os.path.abspath(index_file)}")
+ self.coverage._message(f"Wrote HTML report to {print_href}")
+
+ # Write the latest hashes for next time.
+ self.incr.write()
+
+ def write_region_index_pages(self, files_to_report: Iterable[FileToReport]) -> None:
+ """Write the other index pages for this report."""
+ for ftr in files_to_report:
+ region_nouns = [pair[0] for pair in ftr.fr.code_region_kinds()]
+ num_lines = len(ftr.fr.source().splitlines())
+ outside_lines = set(range(1, num_lines + 1))
+ regions = ftr.fr.code_regions()
+
+ for noun in region_nouns:
+ page_data = self.index_pages[noun]
+
+ for region in regions:
+ if region.kind != noun:
+ continue
+ outside_lines -= region.lines
+ analysis = ftr.analysis.narrow(region.lines)
+ if not self.should_report(analysis, page_data):
+ continue
+ sorting_name = region.name.rpartition(".")[-1].lstrip("_")
+ page_data.summaries.append(IndexItem(
+ url=f"{ftr.html_filename}#t{region.start}",
+ file=escape(ftr.fr.relative_filename()),
+ description=(
+ f""
+ + escape(region.name)
+ + ""
+ ),
+ nums=analysis.numbers,
+ ))
+
+ analysis = ftr.analysis.narrow(outside_lines)
+ if self.should_report(analysis, page_data):
+ page_data.summaries.append(IndexItem(
+ url=ftr.html_filename,
+ file=escape(ftr.fr.relative_filename()),
+ description=(
+ ""
+ + f"(no {escape(noun)})"
+ + ""
+ ),
+ nums=analysis.numbers,
+ ))
+
+ for noun, index_page in self.index_pages.items():
+ if noun != "file":
+ self.write_index_page(index_page)
+
+ def write_index_page(self, index_page: IndexPage, **kwargs: str) -> str:
+ """Write an index page specified by `index_page`.
+
+ Returns the filename created.
+ """
skipped_covered_msg = skipped_empty_msg = ""
- if self.skipped_covered_count:
- n = self.skipped_covered_count
- skipped_covered_msg = f"{n} file{plural(n)} skipped due to complete coverage."
- if self.skipped_empty_count:
- n = self.skipped_empty_count
- skipped_empty_msg = f"{n} empty file{plural(n)} skipped."
-
- html = index_tmpl.render({
- "files": self.file_summaries,
- "totals": self.totals,
+ if n := index_page.skipped_covered_count:
+ word = plural(n, index_page.noun, index_page.plural)
+ skipped_covered_msg = f"{n} {word} skipped due to complete coverage."
+ if n := index_page.skipped_empty_count:
+ word = plural(n, index_page.noun, index_page.plural)
+ skipped_empty_msg = f"{n} empty {word} skipped."
+
+ index_buttons = [
+ {
+ "label": ip.plural.title(),
+ "url": ip.filename if ip.noun != index_page.noun else "",
+ "current": ip.noun == index_page.noun,
+ }
+ for ip in self.index_pages.values()
+ ]
+ render_data = {
+ "regions": index_page.summaries,
+ "totals": index_page.totals,
+ "noun": index_page.noun,
+ "column2": index_page.noun if index_page.noun != "file" else "",
+ "skip_covered": self.skip_covered,
"skipped_covered_msg": skipped_covered_msg,
"skipped_empty_msg": skipped_empty_msg,
- "first_html": first_html,
- "final_html": final_html,
- })
+ "first_html": "",
+ "final_html": "",
+ "index_buttons": index_buttons,
+ }
+ render_data.update(kwargs)
+ html = self.index_tmpl.render(render_data)
- index_file = os.path.join(self.directory, "index.html")
+ index_file = os.path.join(self.directory, index_page.filename)
write_html(index_file, html)
+ return index_file
- print_href = stdout_link(index_file, f"file://{os.path.abspath(index_file)}")
- self.coverage._message(f"Wrote HTML report to {print_href}")
- # Write the latest hashes for next time.
- self.incr.write()
+@dataclass
+class FileInfo:
+ """Summary of the information from last rendering, to avoid duplicate work."""
+ hash: str = ""
+ index: IndexItem = field(default_factory=IndexItem)
class IncrementalChecker:
- """Logic and data to support incremental reporting."""
+ """Logic and data to support incremental reporting.
+
+ When generating an HTML report, often only a few of the source files have
+ changed since the last time we made the HTML report. This means previously
+ created HTML pages can be reused without generating them again, speeding
+ the command.
+
+ This class manages a JSON data file that captures enough information to
+ know whether an HTML page for a .py file needs to be regenerated or not.
+ The data file also needs to store all the information needed to create the
+ entry for the file on the index page so that if the HTML page is reused,
+ the index page can still be created to refer to it.
+
+ The data looks like::
+
+ {
+ "note": "This file is an internal implementation detail ...",
+ // A fixed number indicating the data format. STATUS_FORMAT
+ "format": 5,
+ // The version of coverage.py
+ "version": "7.4.4",
+ // A hash of a number of global things, including the configuration
+ // settings and the pyfile.html template itself.
+ "globals": "540ee119c15d52a68a53fe6f0897346d",
+ "files": {
+ // An entry for each source file keyed by the flat_rootname().
+ "z_7b071bdc2a35fa80___init___py": {
+ // Hash of the source, the text of the .py file.
+ "hash": "e45581a5b48f879f301c0f30bf77a50c",
+ // Information for the index.html file.
+ "index": {
+ "url": "z_7b071bdc2a35fa80___init___py.html",
+ "file": "cogapp/__init__.py",
+ "description": "",
+ // The Numbers for this file.
+ "nums": { "precision": 2, "n_files": 1, "n_statements": 43, ... }
+ }
+ },
+ ...
+ }
+ }
+
+ """
STATUS_FILE = "status.json"
- STATUS_FORMAT = 2
+ STATUS_FORMAT = 5
NOTE = (
"This file is an internal implementation detail to speed up HTML report"
+ " generation. Its format can change at any time. You might be looking"
+ " for the JSON report: https://coverage.rtfd.io/cmd.html#cmd-json"
)
- # The data looks like:
- #
- # {
- # "format": 2,
- # "globals": "540ee119c15d52a68a53fe6f0897346d",
- # "version": "4.0a1",
- # "files": {
- # "cogapp___init__": {
- # "hash": "e45581a5b48f879f301c0f30bf77a50c",
- # "index": {
- # "html_filename": "cogapp___init__.html",
- # "relative_filename": "cogapp/__init__",
- # "nums": [ 1, 14, 0, 0, 0, 0, 0 ]
- # }
- # },
- # ...
- # "cogapp_whiteutils": {
- # "hash": "8504bb427fc488c4176809ded0277d51",
- # "index": {
- # "html_filename": "cogapp_whiteutils.html",
- # "relative_filename": "cogapp/whiteutils",
- # "nums": [ 1, 59, 0, 1, 28, 2, 2 ]
- # }
- # }
- # }
- # }
-
def __init__(self, directory: str) -> None:
self.directory = directory
- self.reset()
+ self._reset()
- def reset(self) -> None:
+ def _reset(self) -> None:
"""Initialize to empty. Causes all files to be reported."""
self.globals = ""
- self.files: dict[str, FileInfoDict] = {}
+ self.files: dict[str, FileInfo] = {}
def read(self) -> None:
"""Read the information we stored last time."""
- usable = False
try:
status_file = os.path.join(self.directory, self.STATUS_FILE)
with open(status_file) as fstatus:
status = json.load(fstatus)
except (OSError, ValueError):
+ # Status file is missing or malformed.
usable = False
else:
- usable = True
if status["format"] != self.STATUS_FORMAT:
usable = False
elif status["version"] != coverage.__version__:
usable = False
+ else:
+ usable = True
if usable:
self.files = {}
- for filename, fileinfo in status["files"].items():
- fileinfo["index"]["nums"] = Numbers(*fileinfo["index"]["nums"])
+ for filename, filedict in status["files"].items():
+ indexdict = filedict["index"]
+ index_item = IndexItem(**indexdict)
+ index_item.nums = Numbers(**indexdict["nums"])
+ fileinfo = FileInfo(
+ hash=filedict["hash"],
+ index=index_item,
+ )
self.files[filename] = fileinfo
self.globals = status["globals"]
else:
- self.reset()
+ self._reset()
def write(self) -> None:
"""Write the current status."""
status_file = os.path.join(self.directory, self.STATUS_FILE)
- files = {}
- for filename, fileinfo in self.files.items():
- index = fileinfo["index"]
- index["nums"] = index["nums"].init_args() # type: ignore[typeddict-item]
- files[filename] = fileinfo
-
- status = {
+ status_data = {
"note": self.NOTE,
"format": self.STATUS_FORMAT,
"version": coverage.__version__,
"globals": self.globals,
- "files": files,
+ "files": {
+ fname: dataclasses.asdict(finfo)
+ for fname, finfo in self.files.items()
+ },
}
with open(status_file, "w") as fout:
- json.dump(status, fout, separators=(",", ":"))
+ json.dump(status_data, fout, separators=(",", ":"))
def check_global_data(self, *data: Any) -> None:
- """Check the global data that can affect incremental reporting."""
+ """Check the global data that can affect incremental reporting.
+
+ Pass in whatever global information could affect the content of the
+ HTML pages. If the global data has changed since last time, this will
+ clear the data so that all files are regenerated.
+
+ """
m = Hasher()
for d in data:
m.update(d)
these_globals = m.hexdigest()
if self.globals != these_globals:
- self.reset()
+ self._reset()
self.globals = these_globals
def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) -> bool:
@@ -607,36 +742,33 @@ def can_skip_file(self, data: CoverageData, fr: FileReporter, rootname: str) ->
`data` is a CoverageData object, `fr` is a `FileReporter`, and
`rootname` is the name being used for the file.
+
+ Returns True if the HTML page is fine as-is, False if we need to recreate
+ the HTML page.
+
"""
m = Hasher()
m.update(fr.source().encode("utf-8"))
add_data_to_hash(data, fr.filename, m)
this_hash = m.hexdigest()
- that_hash = self.file_hash(rootname)
+ file_info = self.files.setdefault(rootname, FileInfo())
- if this_hash == that_hash:
+ if this_hash == file_info.hash:
# Nothing has changed to require the file to be reported again.
return True
else:
- self.set_file_hash(rootname, this_hash)
+ # File has changed, record the latest hash and force regeneration.
+ file_info.hash = this_hash
return False
- def file_hash(self, fname: str) -> str:
- """Get the hash of `fname`'s contents."""
- return self.files.get(fname, {}).get("hash", "") # type: ignore[call-overload]
-
- def set_file_hash(self, fname: str, val: str) -> None:
- """Set the hash of `fname`'s contents."""
- self.files.setdefault(fname, {})["hash"] = val # type: ignore[typeddict-item]
-
- def index_info(self, fname: str) -> IndexInfoDict:
+ def index_info(self, fname: str) -> IndexItem:
"""Get the information for index.html for `fname`."""
- return self.files.get(fname, {}).get("index", {}) # type: ignore
+ return self.files.get(fname, FileInfo()).index
- def set_index_info(self, fname: str, info: IndexInfoDict) -> None:
+ def set_index_info(self, fname: str, info: IndexItem) -> None:
"""Set the information for index.html for `fname`."""
- self.files.setdefault(fname, {})["index"] = info # type: ignore[typeddict-item]
+ self.files.setdefault(fname, FileInfo()).index = info
# Helpers for templates and generating HTML
diff --git a/coverage/htmlfiles/coverage_html.js b/coverage/htmlfiles/coverage_html.js
index 593488286..a28c1bef8 100644
--- a/coverage/htmlfiles/coverage_html.js
+++ b/coverage/htmlfiles/coverage_html.js
@@ -36,11 +36,12 @@ function on_click(sel, fn) {
function getCellValue(row, column = 0) {
const cell = row.cells[column] // nosemgrep: eslint.detect-object-injection
if (cell.childElementCount == 1) {
- const child = cell.firstElementChild
- if (child instanceof HTMLTimeElement && child.dateTime) {
- return child.dateTime
- } else if (child instanceof HTMLDataElement && child.value) {
- return child.value
+ var child = cell.firstElementChild;
+ if (child.tagName === "A") {
+ child = child.firstElementChild;
+ }
+ if (child instanceof HTMLDataElement && child.value) {
+ return child.value;
}
}
return cell.innerText || cell.textContent;
@@ -50,28 +51,37 @@ function rowComparator(rowA, rowB, column = 0) {
let valueA = getCellValue(rowA, column);
let valueB = getCellValue(rowB, column);
if (!isNaN(valueA) && !isNaN(valueB)) {
- return valueA - valueB
+ return valueA - valueB;
}
return valueA.localeCompare(valueB, undefined, {numeric: true});
}
function sortColumn(th) {
// Get the current sorting direction of the selected header,
- // clear state on other headers and then set the new sorting direction
+ // clear state on other headers and then set the new sorting direction.
const currentSortOrder = th.getAttribute("aria-sort");
[...th.parentElement.cells].forEach(header => header.setAttribute("aria-sort", "none"));
+ var direction;
if (currentSortOrder === "none") {
- th.setAttribute("aria-sort", th.dataset.defaultSortOrder || "ascending");
- } else {
- th.setAttribute("aria-sort", currentSortOrder === "ascending" ? "descending" : "ascending");
+ direction = th.dataset.defaultSortOrder || "ascending";
+ }
+ else if (currentSortOrder === "ascending") {
+ direction = "descending";
+ }
+ else {
+ direction = "ascending";
}
+ th.setAttribute("aria-sort", direction);
const column = [...th.parentElement.cells].indexOf(th)
- // Sort all rows and afterwards append them in order to move them in the DOM
+ // Sort all rows and afterwards append them in order to move them in the DOM.
Array.from(th.closest("table").querySelectorAll("tbody tr"))
- .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (th.getAttribute("aria-sort") === "ascending" ? 1 : -1))
- .forEach(tr => tr.parentElement.appendChild(tr) );
+ .sort((rowA, rowB) => rowComparator(rowA, rowB, column) * (direction === "ascending" ? 1 : -1))
+ .forEach(tr => tr.parentElement.appendChild(tr));
+
+ // Save the sort order for next time.
+ localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({column, direction}));
}
// Find all the elements with data-shortcut attribute, and use them to assign a shortcut key.
@@ -96,15 +106,40 @@ coverage.wire_up_filter = function () {
const no_rows = document.getElementById("no_rows");
// Observe filter keyevents.
- document.getElementById("filter").addEventListener("input", debounce(event => {
+ const filter_handler = (event => {
// Keep running total of each metric, first index contains number of shown rows
const totals = new Array(table.rows[0].cells.length).fill(0);
// Accumulate the percentage as fraction
totals[totals.length - 1] = { "numer": 0, "denom": 0 }; // nosemgrep: eslint.detect-object-injection
+ var text = document.getElementById("filter").value;
+ const casefold = (text === text.toLowerCase());
+ const hide100 = document.getElementById("hide100").checked;
+
// Hide / show elements.
table_body_rows.forEach(row => {
- if (!row.cells[0].textContent.includes(event.target.value)) {
+ var show = false;
+ // Check the text filter.
+ for (let column = 0; column < totals.length; column++) {
+ cell = row.cells[column];
+ if (cell.classList.contains("name")) {
+ var celltext = cell.textContent;
+ if (casefold) {
+ celltext = celltext.toLowerCase();
+ }
+ if (celltext.includes(text)) {
+ show = true;
+ }
+ }
+ }
+
+ // Check the "hide covered" filter.
+ if (show && hide100) {
+ const [numer, denom] = row.cells[row.cells.length - 1].dataset.ratio.split(" ");
+ show = (numer !== denom);
+ }
+
+ if (!show) {
// hide
row.classList.add("hidden");
return;
@@ -114,15 +149,19 @@ coverage.wire_up_filter = function () {
row.classList.remove("hidden");
totals[0]++;
- for (let column = 1; column < totals.length; column++) {
+ for (let column = 0; column < totals.length; column++) {
// Accumulate dynamic totals
cell = row.cells[column] // nosemgrep: eslint.detect-object-injection
+ if (cell.classList.contains("name")) {
+ continue;
+ }
if (column === totals.length - 1) {
// Last column contains percentage
const [numer, denom] = cell.dataset.ratio.split(" ");
totals[column]["numer"] += parseInt(numer, 10); // nosemgrep: eslint.detect-object-injection
totals[column]["denom"] += parseInt(denom, 10); // nosemgrep: eslint.detect-object-injection
- } else {
+ }
+ else {
totals[column] += parseInt(cell.textContent, 10); // nosemgrep: eslint.detect-object-injection
}
}
@@ -142,9 +181,12 @@ coverage.wire_up_filter = function () {
const footer = table.tFoot.rows[0];
// Calculate new dynamic sum values based on visible rows.
- for (let column = 1; column < totals.length; column++) {
+ for (let column = 0; column < totals.length; column++) {
// Get footer cell element.
const cell = footer.cells[column]; // nosemgrep: eslint.detect-object-injection
+ if (cell.classList.contains("name")) {
+ continue;
+ }
// Set value into dynamic footer cell element.
if (column === totals.length - 1) {
@@ -158,48 +200,47 @@ coverage.wire_up_filter = function () {
cell.textContent = denom
? `${(numer * 100 / denom).toFixed(places)}%`
: `${(100).toFixed(places)}%`;
- } else {
+ }
+ else {
cell.textContent = totals[column]; // nosemgrep: eslint.detect-object-injection
}
}
- }));
+ });
+
+ document.getElementById("filter").addEventListener("input", debounce(filter_handler));
+ document.getElementById("hide100").addEventListener("input", debounce(filter_handler));
// Trigger change event on setup, to force filter on page refresh
// (filter value may still be present).
document.getElementById("filter").dispatchEvent(new Event("input"));
+ document.getElementById("hide100").dispatchEvent(new Event("input"));
};
-coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2";
-
-// Loaded on index.html
-coverage.index_ready = function () {
- coverage.assign_shortkeys();
- coverage.wire_up_filter();
+// Set up the click-to-sort columns.
+coverage.wire_up_sorting = function () {
document.querySelectorAll("[data-sortable] th[aria-sort]").forEach(
th => th.addEventListener("click", e => sortColumn(e.target))
);
// Look for a localStorage item containing previous sort settings:
+ var column = 0, direction = "ascending";
const stored_list = localStorage.getItem(coverage.INDEX_SORT_STORAGE);
-
if (stored_list) {
- const {column, direction} = JSON.parse(stored_list);
- const th = document.querySelector("[data-sortable]").tHead.rows[0].cells[column]; // nosemgrep: eslint.detect-object-injection
- th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending");
- th.click()
+ ({column, direction} = JSON.parse(stored_list));
}
- // Watch for page unload events so we can save the final sort settings:
- window.addEventListener("unload", function () {
- const th = document.querySelector('[data-sortable] th[aria-sort="ascending"], [data-sortable] [aria-sort="descending"]');
- if (!th) {
- return;
- }
- localStorage.setItem(coverage.INDEX_SORT_STORAGE, JSON.stringify({
- column: [...th.parentElement.cells].indexOf(th),
- direction: th.getAttribute("aria-sort"),
- }));
- });
+ const th = document.querySelector("[data-sortable]").tHead.rows[0].cells[column]; // nosemgrep: eslint.detect-object-injection
+ th.setAttribute("aria-sort", direction === "ascending" ? "descending" : "ascending");
+ th.click()
+};
+
+coverage.INDEX_SORT_STORAGE = "COVERAGE_INDEX_SORT_2";
+
+// Loaded on index.html
+coverage.index_ready = function () {
+ coverage.assign_shortkeys();
+ coverage.wire_up_filter();
+ coverage.wire_up_sorting();
on_click(".button_prev_file", coverage.to_prev_file);
on_click(".button_next_file", coverage.to_next_file);
@@ -217,7 +258,8 @@ coverage.pyfile_ready = function () {
if (frag.length > 2 && frag[1] === "t") {
document.querySelector(frag).closest(".n").classList.add("highlight");
coverage.set_sel(parseInt(frag.substr(2), 10));
- } else {
+ }
+ else {
coverage.set_sel(0);
}
@@ -441,7 +483,8 @@ coverage.to_next_chunk_nicely = function () {
if (line.parentElement !== document.getElementById("source")) {
// The element is not a source line but the header or similar
coverage.select_line_or_chunk(1);
- } else {
+ }
+ else {
// We extract the line number from the id
coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10));
}
@@ -460,7 +503,8 @@ coverage.to_prev_chunk_nicely = function () {
if (line.parentElement !== document.getElementById("source")) {
// The element is not a source line but the header or similar
coverage.select_line_or_chunk(coverage.lines_len);
- } else {
+ }
+ else {
// We extract the line number from the id
coverage.select_line_or_chunk(parseInt(line.id.substring(1), 10));
}
@@ -562,7 +606,8 @@ coverage.build_scroll_markers = function () {
if (line_number === previous_line + 1) {
// If this solid missed block just make previous mark higher.
last_mark.style.height = `${line_top + line_height - last_top}px`;
- } else {
+ }
+ else {
// Add colored line in scroll_marker block.
last_mark = document.createElement("div");
last_mark.id = `m${line_number}`;
@@ -590,7 +635,8 @@ coverage.wire_up_sticky_header = function () {
function updateHeader() {
if (window.scrollY > header_bottom) {
header.classList.add("sticky");
- } else {
+ }
+ else {
header.classList.remove("sticky");
}
}
@@ -618,7 +664,8 @@ coverage.expand_contexts = function (e) {
document.addEventListener("DOMContentLoaded", () => {
if (document.body.classList.contains("indexfile")) {
coverage.index_ready();
- } else {
+ }
+ else {
coverage.pyfile_ready();
}
});
diff --git a/coverage/htmlfiles/index.html b/coverage/htmlfiles/index.html
index bde46eafe..69d4b19ed 100644
--- a/coverage/htmlfiles/index.html
+++ b/coverage/htmlfiles/index.html
@@ -2,7 +2,7 @@
{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
-
+
{{ title|escape }}
@@ -11,7 +11,7 @@
{% if extra_css %}
{% endif %}
-
+
@@ -24,13 +24,16 @@ {{ title|escape }}: