From 7a0fee401d2990bd5904e94cd5d3a62c50a8b682 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Jul 2025 17:27:27 -0400 Subject: [PATCH 01/66] [pre-commit.ci] pre-commit autoupdate (#12864) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.12.4 → v0.12.5](https://github.com/astral-sh/ruff-pre-commit/compare/v0.12.4...v0.12.5) - [github.com/pre-commit/mirrors-mypy: v1.15.0 → v1.17.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.15.0...v1.17.0) * Apply suggestion from @cclauss * --ignore=web_programming/current_stock_price.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .github/workflows/build.yml | 3 ++- .pre-commit-config.yaml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8b83cb41c79a..01b67c6de05b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -16,7 +16,7 @@ jobs: cache-dependency-glob: uv.lock - uses: actions/setup-python@v5 with: - python-version: 3.13 + python-version: 3.x allow-prereleases: true - run: uv sync --group=test - name: Run tests @@ -30,6 +30,7 @@ jobs: --ignore=project_euler/ --ignore=quantum/q_fourier_transform.py --ignore=scripts/validate_solutions.py + --ignore=web_programming/current_stock_price.py --ignore=web_programming/fetch_anime_and_play.py --cov-report=term-missing:skip-covered --cov=. . diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f733908c2987..d52c31c42592 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.4 + rev: v0.12.5 hooks: - id: ruff-check - id: ruff-format From d05f5d22d92424f1197950d519fc7a2059de4a5a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 11 Aug 2025 21:03:20 +0200 Subject: [PATCH 02/66] [pre-commit.ci] pre-commit autoupdate (#12880) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/pre-commit/pre-commit-hooks: v5.0.0 → v6.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v5.0.0...v6.0.0) - [github.com/astral-sh/ruff-pre-commit: v0.12.5 → v0.12.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.12.5...v0.12.8) - [github.com/pre-commit/mirrors-mypy: v1.15.0 → v1.17.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.15.0...v1.17.1) * Apply suggestion from @cclauss --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d52c31c42592..594489d87c76 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,6 +1,6 @@ repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v5.0.0 + rev: v6.0.0 hooks: - id: check-executables-have-shebangs - id: check-toml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.5 + rev: v0.12.8 hooks: - id: ruff-check - id: ruff-format From f662b63d1ad958d6f14a25136979f3ebdabb987a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 12 Aug 2025 09:30:10 +0200 Subject: [PATCH 03/66] Bump actions/checkout from 4 to 5 (#12891) Bumps [actions/checkout](https://github.com/actions/checkout) from 4 to 5. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v4...v5) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '5' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/devcontainer_ci.yml | 2 +- .github/workflows/directory_writer.yml | 2 +- .github/workflows/project_euler.yml | 4 ++-- .github/workflows/ruff.yml | 2 +- .github/workflows/sphinx.yml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 01b67c6de05b..69192db0c4c6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -9,7 +9,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 with: enable-cache: true diff --git a/.github/workflows/devcontainer_ci.yml b/.github/workflows/devcontainer_ci.yml index c0b26bb77da6..71623e5e6e69 100644 --- a/.github/workflows/devcontainer_ci.yml +++ b/.github/workflows/devcontainer_ci.yml @@ -12,7 +12,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: devcontainers/ci@v0.3 with: push: never diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index 3edb5c91a951..f5167f8d1a58 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -6,7 +6,7 @@ jobs: directory_writer: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 with: fetch-depth: 0 - uses: actions/setup-python@v5 diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index eaf4150e4eaa..8b8cb2a1e68f 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -14,7 +14,7 @@ jobs: project-euler: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v5 with: @@ -24,7 +24,7 @@ jobs: validate-solutions: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v5 with: diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index ec9f0202bd7e..7bcc2850782f 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -11,6 +11,6 @@ jobs: ruff: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - run: uvx ruff check --output-format=github . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index 2010041d80c5..a5502a586297 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -25,7 +25,7 @@ jobs: build_docs: runs-on: ubuntu-24.04-arm steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v5 with: From beb3cfdf283f68545364aedf34dbbb843ffa95b7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Aug 2025 14:28:03 +0200 Subject: [PATCH 04/66] [pre-commit.ci] pre-commit autoupdate (#12900) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.12.8 → v0.12.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.12.8...v0.12.9) - [github.com/pre-commit/mirrors-mypy: v1.15.0 → v1.17.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.15.0...v1.17.1) * Apply suggestion from @cclauss --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 594489d87c76..252cfebc53a8 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.8 + rev: v0.12.9 hooks: - id: ruff-check - id: ruff-format From e2245321075bcf9b21c580b87adbd95d21b86e91 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 23 Aug 2025 05:48:47 +0200 Subject: [PATCH 05/66] Bump actions/upload-pages-artifact from 3 to 4 (#12922) Bumps [actions/upload-pages-artifact](https://github.com/actions/upload-pages-artifact) from 3 to 4. - [Release notes](https://github.com/actions/upload-pages-artifact/releases) - [Commits](https://github.com/actions/upload-pages-artifact/compare/v3...v4) --- updated-dependencies: - dependency-name: actions/upload-pages-artifact dependency-version: '4' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/sphinx.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index a5502a586297..e28fa04f3ab4 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -34,7 +34,7 @@ jobs: - run: uv sync --group=docs - uses: actions/configure-pages@v5 - run: uv run sphinx-build -c docs . docs/_build/html - - uses: actions/upload-pages-artifact@v3 + - uses: actions/upload-pages-artifact@v4 with: path: docs/_build/html From a8c5616857bb15d61ee764671820ad82a5197219 Mon Sep 17 00:00:00 2001 From: Milad Khoshdel Date: Sun, 24 Aug 2025 13:37:39 +0330 Subject: [PATCH 06/66] Simplify Capitalize Function (#12879) * Simplify the capitalize function using ASCII arithmetic to make the algorithm five times faster. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update capitalize.py * Update capitalize.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- strings/capitalize.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/strings/capitalize.py b/strings/capitalize.py index c0b45e0d9614..628ebffc8852 100644 --- a/strings/capitalize.py +++ b/strings/capitalize.py @@ -1,6 +1,3 @@ -from string import ascii_lowercase, ascii_uppercase - - def capitalize(sentence: str) -> str: """ Capitalizes the first letter of a sentence or word. @@ -19,11 +16,9 @@ def capitalize(sentence: str) -> str: if not sentence: return "" - # Create a dictionary that maps lowercase letters to uppercase letters # Capitalize the first character if it's a lowercase letter # Concatenate the capitalized character with the rest of the string - lower_to_upper = dict(zip(ascii_lowercase, ascii_uppercase)) - return lower_to_upper.get(sentence[0], sentence[0]) + sentence[1:] + return sentence[0].upper() + sentence[1:] if __name__ == "__main__": From c2b90034a080355bfac463b598e5228950fefa47 Mon Sep 17 00:00:00 2001 From: Chandra Sekhar Pola Date: Sun, 24 Aug 2025 07:01:21 -0400 Subject: [PATCH 07/66] Fix code style in README.md (#12843) * Fix #12840 code style * Update README.md --------- Co-authored-by: Maxim Smolskiy --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index d8eba4e016fa..fe65bb253360 100644 --- a/README.md +++ b/README.md @@ -27,8 +27,8 @@ pre-commit - - code style: black + + code style: black

All algorithms implemented in Python - for education

From 9ddb0272bf58f681b0b2b25650464f87ecfea742 Mon Sep 17 00:00:00 2001 From: Diya <149782916+diya94@users.noreply.github.com> Date: Sun, 24 Aug 2025 16:52:00 +0530 Subject: [PATCH 08/66] Adding doctest for md_prefix function in build_directory_md.py (#12874) * Adding doctests for md_prefix function in build_directory_md.py * Update build_directory_md.py * Update build_directory_md.py * Update build_directory_md.py --------- Co-authored-by: Maxim Smolskiy --- scripts/build_directory_md.py | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/scripts/build_directory_md.py b/scripts/build_directory_md.py index aa95b95db4b5..bdad7686c7e3 100755 --- a/scripts/build_directory_md.py +++ b/scripts/build_directory_md.py @@ -18,8 +18,20 @@ def good_file_paths(top_dir: str = ".") -> Iterator[str]: yield os.path.join(dir_path, filename).lstrip("./") -def md_prefix(i): - return f"{i * ' '}*" if i else "\n##" +def md_prefix(indent: int) -> str: + """ + Markdown prefix based on indent for bullet points + + >>> md_prefix(0) + '\\n##' + >>> md_prefix(1) + ' *' + >>> md_prefix(2) + ' *' + >>> md_prefix(3) + ' *' + """ + return f"{indent * ' '}*" if indent else "\n##" def print_path(old_path: str, new_path: str) -> str: From 060fd8ee2cd389bf4a09e1947c1586fb45b4044c Mon Sep 17 00:00:00 2001 From: conti <130701882+mr-conti@users.noreply.github.com> Date: Sun, 24 Aug 2025 13:49:43 +0200 Subject: [PATCH 09/66] Fix reverse_bits.py: correct 32-bit reversal and improve docstrings (#12924) * Fix reverse_bits.py (#12868) * Update reverse_bits.py * Update reverse_bits.py * Update reverse_bits.py * Update reverse_bits.py * Update reverse_bits.py --------- Co-authored-by: Maxim Smolskiy --- bit_manipulation/reverse_bits.py | 62 ++++++++++++++++---------------- 1 file changed, 31 insertions(+), 31 deletions(-) diff --git a/bit_manipulation/reverse_bits.py b/bit_manipulation/reverse_bits.py index 74b4f2563234..4a0b2ff7047a 100644 --- a/bit_manipulation/reverse_bits.py +++ b/bit_manipulation/reverse_bits.py @@ -1,6 +1,6 @@ def get_reverse_bit_string(number: int) -> str: """ - return the bit string of an integer + Return the reverse bit string of a 32 bit integer >>> get_reverse_bit_string(9) '10010000000000000000000000000000' @@ -8,76 +8,76 @@ def get_reverse_bit_string(number: int) -> str: '11010100000000000000000000000000' >>> get_reverse_bit_string(2873) '10011100110100000000000000000000' + >>> get_reverse_bit_string(2550136832) + '00000000000000000000000000011001' >>> get_reverse_bit_string("this is not a number") Traceback (most recent call last): ... - TypeError: operation can not be conducted on a object of type str + TypeError: operation can not be conducted on an object of type str """ if not isinstance(number, int): msg = ( - "operation can not be conducted on a object of type " + "operation can not be conducted on an object of type " f"{type(number).__name__}" ) raise TypeError(msg) bit_string = "" for _ in range(32): bit_string += str(number % 2) - number = number >> 1 + number >>= 1 return bit_string -def reverse_bit(number: int) -> str: +def reverse_bit(number: int) -> int: """ - Take in an 32 bit integer, reverse its bits, - return a string of reverse bits - - result of a reverse_bit and operation on the integer provided. + Take in a 32 bit integer, reverse its bits, return a 32 bit integer result >>> reverse_bit(25) - '00000000000000000000000000011001' + 2550136832 >>> reverse_bit(37) - '00000000000000000000000000100101' + 2751463424 >>> reverse_bit(21) - '00000000000000000000000000010101' + 2818572288 >>> reverse_bit(58) - '00000000000000000000000000111010' + 1543503872 >>> reverse_bit(0) - '00000000000000000000000000000000' + 0 >>> reverse_bit(256) - '00000000000000000000000100000000' + 8388608 + >>> reverse_bit(2550136832) + 25 >>> reverse_bit(-1) Traceback (most recent call last): ... - ValueError: the value of input must be positive + ValueError: The value of input must be non-negative >>> reverse_bit(1.1) Traceback (most recent call last): ... - TypeError: Input value must be a 'int' type + TypeError: Input value must be an 'int' type >>> reverse_bit("0") Traceback (most recent call last): ... - TypeError: '<' not supported between instances of 'str' and 'int' + TypeError: Input value must be an 'int' type """ + if not isinstance(number, int): + raise TypeError("Input value must be an 'int' type") if number < 0: - raise ValueError("the value of input must be positive") - elif isinstance(number, float): - raise TypeError("Input value must be a 'int' type") - elif isinstance(number, str): - raise TypeError("'<' not supported between instances of 'str' and 'int'") + raise ValueError("The value of input must be non-negative") + result = 0 - # iterator over [1 to 32],since we are dealing with 32 bit integer - for _ in range(1, 33): + # iterator over [0 to 31], since we are dealing with a 32 bit integer + for _ in range(32): # left shift the bits by unity - result = result << 1 + result <<= 1 # get the end bit - end_bit = number % 2 + end_bit = number & 1 # right shift the bits by unity - number = number >> 1 - # add that bit to our ans - result = result | end_bit - return get_reverse_bit_string(result) + number >>= 1 + # add that bit to our answer + result |= end_bit + return result if __name__ == "__main__": From d16cac6e4f00b9182b386d6e3bcbc97a323d8f7f Mon Sep 17 00:00:00 2001 From: Ahmad Alharbi Date: Sun, 24 Aug 2025 05:08:52 -0700 Subject: [PATCH 10/66] feat: Add recursive implication function for lists (#12855) * feat: Add recursive implication function for lists * Update imply_gate.py * Update imply_gate.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- boolean_algebra/imply_gate.py | 52 +++++++++++++++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/boolean_algebra/imply_gate.py b/boolean_algebra/imply_gate.py index b64ebaceb306..3d71ff12f8d9 100644 --- a/boolean_algebra/imply_gate.py +++ b/boolean_algebra/imply_gate.py @@ -33,6 +33,58 @@ def imply_gate(input_1: int, input_2: int) -> int: return int(input_1 == 0 or input_2 == 1) +def recursive_imply_list(input_list: list[int]) -> int: + """ + Recursively calculates the implication of a list. + Strictly the implication is applied consecutively left to right: + ( (a -> b) -> c ) -> d ... + + >>> recursive_imply_list([]) + Traceback (most recent call last): + ... + ValueError: Input list must contain at least two elements + >>> recursive_imply_list([0]) + Traceback (most recent call last): + ... + ValueError: Input list must contain at least two elements + >>> recursive_imply_list([1]) + Traceback (most recent call last): + ... + ValueError: Input list must contain at least two elements + >>> recursive_imply_list([0, 0]) + 1 + >>> recursive_imply_list([0, 1]) + 1 + >>> recursive_imply_list([1, 0]) + 0 + >>> recursive_imply_list([1, 1]) + 1 + >>> recursive_imply_list([0, 0, 0]) + 0 + >>> recursive_imply_list([0, 0, 1]) + 1 + >>> recursive_imply_list([0, 1, 0]) + 0 + >>> recursive_imply_list([0, 1, 1]) + 1 + >>> recursive_imply_list([1, 0, 0]) + 1 + >>> recursive_imply_list([1, 0, 1]) + 1 + >>> recursive_imply_list([1, 1, 0]) + 0 + >>> recursive_imply_list([1, 1, 1]) + 1 + """ + if len(input_list) < 2: + raise ValueError("Input list must contain at least two elements") + first_implication = imply_gate(input_list[0], input_list[1]) + if len(input_list) == 2: + return first_implication + new_list = [first_implication, *input_list[2:]] + return recursive_imply_list(new_list) + + if __name__ == "__main__": import doctest From 37b34c2bac250a20dd534f0658c500c4c61ffe47 Mon Sep 17 00:00:00 2001 From: Sowndappan S <147894621+sowndappan5@users.noreply.github.com> Date: Sun, 24 Aug 2025 18:03:18 +0530 Subject: [PATCH 11/66] perf(strings): optimize anagram signature using frequency counts (#12927) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(strings): use frequency-based signature for anagrams Replaced the sorting-based signature implementation with a frequency-based approach using `collections.Counter`. This ensures that the signature represents both characters and their counts, preventing collisions and better grouping of true anagrams. Examples: - "test" → "e1s1t2" - "finaltest" → "a1e1f1i1l1n1s1t2" - "this is a test" → " 3a1e1h1i2s3t3" Also updated the anagram lookup to use the new frequency-based signatures, making results more accurate and avoiding false positives. * Refactor anagram function return type to list[str] * Update anagrams.py * Update anagrams.py * Update anagrams.py * Update anagrams.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: Maxim Smolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- strings/anagrams.py | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/strings/anagrams.py b/strings/anagrams.py index fb9ac0bd1f45..71cc142fb2ad 100644 --- a/strings/anagrams.py +++ b/strings/anagrams.py @@ -6,19 +6,26 @@ def signature(word: str) -> str: - """Return a word sorted + """ + Return a word's frequency-based signature. + >>> signature("test") - 'estt' + 'e1s1t2' >>> signature("this is a test") - ' aehiisssttt' + ' 3a1e1h1i2s3t3' >>> signature("finaltest") - 'aefilnstt' + 'a1e1f1i1l1n1s1t2' """ - return "".join(sorted(word)) + frequencies = collections.Counter(word) + return "".join( + f"{char}{frequency}" for char, frequency in sorted(frequencies.items()) + ) def anagram(my_word: str) -> list[str]: - """Return every anagram of the given word + """ + Return every anagram of the given word from the dictionary. + >>> anagram('test') ['sett', 'stet', 'test'] >>> anagram('this is a test') @@ -40,5 +47,5 @@ def anagram(my_word: str) -> list[str]: all_anagrams = {word: anagram(word) for word in word_list if len(anagram(word)) > 1} with open("anagrams.txt", "w") as file: - file.write("all_anagrams = \n ") + file.write("all_anagrams = \n") file.write(pprint.pformat(all_anagrams)) From d927d67c4afafe82bddb4765c7f660b56e1b31e0 Mon Sep 17 00:00:00 2001 From: Mindaugas <76015221+mindaugl@users.noreply.github.com> Date: Sun, 24 Aug 2025 17:48:59 +0300 Subject: [PATCH 12/66] Update Linked List from sequence script to use doctests (#12766) * Update comments for linked list script. * Add doctests for the linked list script. * Update from_sequence.py --------- Co-authored-by: Maxim Smolskiy --- data_structures/linked_list/from_sequence.py | 41 +++++++++++++------- 1 file changed, 27 insertions(+), 14 deletions(-) diff --git a/data_structures/linked_list/from_sequence.py b/data_structures/linked_list/from_sequence.py index fa43f4d10e08..b16b2258c1f1 100644 --- a/data_structures/linked_list/from_sequence.py +++ b/data_structures/linked_list/from_sequence.py @@ -1,5 +1,7 @@ -# Recursive Program to create a Linked List from a sequence and -# print a string representation of it. +""" +Recursive Program to create a Linked List from a sequence and +print a string representation of it. +""" class Node: @@ -18,13 +20,32 @@ def __repr__(self): return string_rep -def make_linked_list(elements_list): - """Creates a Linked List from the elements of the given sequence - (list/tuple) and returns the head of the Linked List.""" +def make_linked_list(elements_list: list | tuple) -> Node: + """ + Creates a Linked List from the elements of the given sequence + (list/tuple) and returns the head of the Linked List. + + >>> make_linked_list([]) + Traceback (most recent call last): + ... + ValueError: The Elements List is empty + >>> make_linked_list(()) + Traceback (most recent call last): + ... + ValueError: The Elements List is empty + >>> make_linked_list([1]) + <1> ---> + >>> make_linked_list((1,)) + <1> ---> + >>> make_linked_list([1, 3, 5, 32, 44, 12, 43]) + <1> ---> <3> ---> <5> ---> <32> ---> <44> ---> <12> ---> <43> ---> + >>> make_linked_list((1, 3, 5, 32, 44, 12, 43)) + <1> ---> <3> ---> <5> ---> <32> ---> <44> ---> <12> ---> <43> ---> + """ # if elements_list is empty if not elements_list: - raise Exception("The Elements List is empty") + raise ValueError("The Elements List is empty") # Set first element as Head head = Node(elements_list[0]) @@ -34,11 +55,3 @@ def make_linked_list(elements_list): current.next = Node(data) current = current.next return head - - -list_data = [1, 3, 5, 32, 44, 12, 43] -print(f"List: {list_data}") -print("Creating Linked List from List.") -linked_list = make_linked_list(list_data) -print("Linked List:") -print(linked_list) From 561cc383ec04ba61eb9bab5ae891ef5328551df9 Mon Sep 17 00:00:00 2001 From: "S. M. Mohiuddin Khan Shiam" <147746955+mohiuddin-khan-shiam@users.noreply.github.com> Date: Mon, 25 Aug 2025 03:47:46 +0600 Subject: [PATCH 13/66] Fix RuntimeError in bipartite-check DFS/BFS and clean up doctests (#12814) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fix `RuntimeError` in bipartite-check DFS/BFS and clean up doctests * Iteration over `graph` mutated by `defaultdict` neighbours caused `RuntimeError: dictionary changed size during iteration`. – Iterate over `list(graph)` in both DFS and BFS helpers. * Corrected `if __name__ == "__main__":` typo. * Updated two doctests that now succeed after the fix. All doctests now pass (`30/30`), eliminating a critical runtime failure and improving reliability of the graph algorithms. Co-Authored-By: S. M. Mohiuddin Khan Shiam <147746955+mohiuddin-khan-shiam@users.noreply.github.com> * Update check_bipatrite.py * Update check_bipatrite.py * Update check_bipatrite.py * Update check_bipatrite.py * Update check_bipatrite.py * Update check_bipatrite.py * Update check_bipatrite.py * Update check_bipatrite.py * Update check_bipatrite.py --------- Co-authored-by: Odio Marcelino Co-authored-by: Maxim Smolskiy --- graphs/check_bipatrite.py | 68 +++++++++++++-------------------------- 1 file changed, 23 insertions(+), 45 deletions(-) diff --git a/graphs/check_bipatrite.py b/graphs/check_bipatrite.py index 213f3f9480b5..897c78850d58 100644 --- a/graphs/check_bipatrite.py +++ b/graphs/check_bipatrite.py @@ -1,7 +1,7 @@ from collections import defaultdict, deque -def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: +def is_bipartite_dfs(graph: dict[int, list[int]]) -> bool: """ Check if a graph is bipartite using depth-first search (DFS). @@ -16,12 +16,9 @@ def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: Examples: - >>> # FIXME: This test should pass. - >>> is_bipartite_dfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) - Traceback (most recent call last): - ... - RuntimeError: dictionary changed size during iteration - >>> is_bipartite_dfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 1]})) + >>> is_bipartite_dfs({0: [1, 2], 1: [0, 3], 2: [0, 4]}) + True + >>> is_bipartite_dfs({0: [1, 2], 1: [0, 3], 2: [0, 1]}) False >>> is_bipartite_dfs({}) True @@ -34,36 +31,26 @@ def is_bipartite_dfs(graph: defaultdict[int, list[int]]) -> bool: >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) False >>> is_bipartite_dfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) - Traceback (most recent call last): - ... - KeyError: 0 + False >>> # FIXME: This test should fails with KeyError: 4. >>> is_bipartite_dfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) False >>> is_bipartite_dfs({0: [-1, 3], 1: [0, -2]}) - Traceback (most recent call last): - ... - KeyError: -1 + False >>> is_bipartite_dfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) True >>> is_bipartite_dfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) - Traceback (most recent call last): - ... - KeyError: 0 + True >>> # FIXME: This test should fails with >>> # TypeError: list indices must be integers or... >>> is_bipartite_dfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) True >>> is_bipartite_dfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) - Traceback (most recent call last): - ... - KeyError: 1 + True >>> is_bipartite_dfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) - Traceback (most recent call last): - ... - KeyError: 'b' + True """ def depth_first_search(node: int, color: int) -> bool: @@ -80,6 +67,8 @@ def depth_first_search(node: int, color: int) -> bool: """ if visited[node] == -1: visited[node] = color + if node not in graph: + return True for neighbor in graph[node]: if not depth_first_search(neighbor, 1 - color): return False @@ -92,7 +81,7 @@ def depth_first_search(node: int, color: int) -> bool: return True -def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: +def is_bipartite_bfs(graph: dict[int, list[int]]) -> bool: """ Check if a graph is bipartite using a breadth-first search (BFS). @@ -107,12 +96,9 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: Examples: - >>> # FIXME: This test should pass. - >>> is_bipartite_bfs(defaultdict(list, {0: [1, 2], 1: [0, 3], 2: [0, 4]})) - Traceback (most recent call last): - ... - RuntimeError: dictionary changed size during iteration - >>> is_bipartite_bfs(defaultdict(list, {0: [1, 2], 1: [0, 2], 2: [0, 1]})) + >>> is_bipartite_bfs({0: [1, 2], 1: [0, 3], 2: [0, 4]}) + True + >>> is_bipartite_bfs({0: [1, 2], 1: [0, 2], 2: [0, 1]}) False >>> is_bipartite_bfs({}) True @@ -125,36 +111,26 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) False >>> is_bipartite_bfs({7: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: [0]}) - Traceback (most recent call last): - ... - KeyError: 0 + False >>> # FIXME: This test should fails with KeyError: 4. >>> is_bipartite_bfs({0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 9: [0]}) False >>> is_bipartite_bfs({0: [-1, 3], 1: [0, -2]}) - Traceback (most recent call last): - ... - KeyError: -1 + False >>> is_bipartite_bfs({-1: [0, 2], 0: [-1, 1], 1: [0, 2], 2: [-1, 1]}) True >>> is_bipartite_bfs({0.9: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2]}) - Traceback (most recent call last): - ... - KeyError: 0 + True >>> # FIXME: This test should fails with >>> # TypeError: list indices must be integers or... >>> is_bipartite_bfs({0: [1.0, 3.0], 1.0: [0, 2.0], 2.0: [1.0, 3.0], 3.0: [0, 2.0]}) True >>> is_bipartite_bfs({"a": [1, 3], "b": [0, 2], "c": [1, 3], "d": [0, 2]}) - Traceback (most recent call last): - ... - KeyError: 1 + True >>> is_bipartite_bfs({0: ["b", "d"], 1: ["a", "c"], 2: ["b", "d"], 3: ["a", "c"]}) - Traceback (most recent call last): - ... - KeyError: 'b' + True """ visited: defaultdict[int, int] = defaultdict(lambda: -1) for node in graph: @@ -164,6 +140,8 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: visited[node] = 0 while queue: curr_node = queue.popleft() + if curr_node not in graph: + continue for neighbor in graph[curr_node]: if visited[neighbor] == -1: visited[neighbor] = 1 - visited[curr_node] @@ -173,7 +151,7 @@ def is_bipartite_bfs(graph: defaultdict[int, list[int]]) -> bool: return True -if __name__ == "__main": +if __name__ == "__main__": import doctest result = doctest.testmod() From b0be30443393b019c469684554a3c8748ffeacbe Mon Sep 17 00:00:00 2001 From: John Liu <154310237+john-liu2@users.noreply.github.com> Date: Sun, 24 Aug 2025 14:59:44 -0700 Subject: [PATCH 14/66] Use deque as queue in breadth_first_search_shortest_path_2.py (#12861) * Fixes #12857 Use collections.deque as queue in graphs BFS shortest path 2 * Use collections.deque as queue in the correct syntax: queue = deque([start]) * Fix CI error due to HTTP 404 on https://finance.yahoo.com/quote/GOOG/\?p\=GOOG * Undo change in workflows/build.yml as it's fixed in PR 12864 --------- Co-authored-by: Maxim Smolskiy --- .../breadth_first_search_shortest_path_2.py | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/graphs/breadth_first_search_shortest_path_2.py b/graphs/breadth_first_search_shortest_path_2.py index 4f9b6e65bdf3..efba9b7b6ae6 100644 --- a/graphs/breadth_first_search_shortest_path_2.py +++ b/graphs/breadth_first_search_shortest_path_2.py @@ -1,10 +1,12 @@ -"""Breadth-first search shortest path implementations. +"""Breadth-first search the shortest path implementations. doctest: -python -m doctest -v bfs_shortest_path.py +python -m doctest -v breadth_first_search_shortest_path_2.py Manual test: -python bfs_shortest_path.py +python breadth_first_search_shortest_path_2.py """ +from collections import deque + demo_graph = { "A": ["B", "C", "E"], "B": ["A", "D", "E"], @@ -17,7 +19,7 @@ def bfs_shortest_path(graph: dict, start, goal) -> list[str]: - """Find shortest path between `start` and `goal` nodes. + """Find the shortest path between `start` and `goal` nodes. Args: graph (dict): node/list of neighboring nodes key/value pairs. start: start node. @@ -36,7 +38,7 @@ def bfs_shortest_path(graph: dict, start, goal) -> list[str]: # keep track of explored nodes explored = set() # keep track of all the paths to be checked - queue = [[start]] + queue = deque([[start]]) # return path if start is goal if start == goal: @@ -45,7 +47,7 @@ def bfs_shortest_path(graph: dict, start, goal) -> list[str]: # keeps looping until all possible paths have been checked while queue: # pop the first path from the queue - path = queue.pop(0) + path = queue.popleft() # get the last node from the path node = path[-1] if node not in explored: @@ -68,13 +70,13 @@ def bfs_shortest_path(graph: dict, start, goal) -> list[str]: def bfs_shortest_path_distance(graph: dict, start, target) -> int: - """Find shortest path distance between `start` and `target` nodes. + """Find the shortest path distance between `start` and `target` nodes. Args: graph: node/list of neighboring nodes key/value pairs. start: node to start search from. target: node to search for. Returns: - Number of edges in shortest path between `start` and `target` nodes. + Number of edges in the shortest path between `start` and `target` nodes. -1 if no path exists. Example: >>> bfs_shortest_path_distance(demo_graph, "G", "D") @@ -88,12 +90,12 @@ def bfs_shortest_path_distance(graph: dict, start, target) -> int: return -1 if start == target: return 0 - queue = [start] + queue = deque([start]) visited = set(start) # Keep tab on distances from `start` node. dist = {start: 0, target: -1} while queue: - node = queue.pop(0) + node = queue.popleft() if node == target: dist[target] = ( dist[node] if dist[target] == -1 else min(dist[target], dist[node]) From 8c1c6c1763e8d4695754dceea072cc1ff58308bb Mon Sep 17 00:00:00 2001 From: alejandroaldas <52395149+alejandroaldas@users.noreply.github.com> Date: Sun, 24 Aug 2025 17:16:05 -0500 Subject: [PATCH 15/66] Codex/find and fix a bug (#12782) * Fix enumeration order in FFT string representation * updating DIRECTORY.md * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update radix2_fft.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: alejandroaldas Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- maths/radix2_fft.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/maths/radix2_fft.py b/maths/radix2_fft.py index ccd5cdcc0160..5efbccc7a17d 100644 --- a/maths/radix2_fft.py +++ b/maths/radix2_fft.py @@ -39,14 +39,14 @@ class FFT: >>> x = FFT(A, B) Print product - >>> x.product # 2x + 3x^2 + 8x^3 + 4x^4 + 6x^5 + >>> x.product # 2x + 3x^2 + 8x^3 + 6x^4 + 8x^5 [(-0-0j), (2+0j), (3-0j), (8-0j), (6+0j), (8+0j)] __str__ test >>> print(x) - A = 0*x^0 + 1*x^1 + 2*x^0 + 3*x^2 - B = 0*x^2 + 1*x^3 + 2*x^4 - A*B = 0*x^(-0-0j) + 1*x^(2+0j) + 2*x^(3-0j) + 3*x^(8-0j) + 4*x^(6+0j) + 5*x^(8+0j) + A = 0*x^0 + 1*x^1 + 0*x^2 + 2*x^3 + B = 2*x^0 + 3*x^1 + 4*x^2 + A*B = (-0-0j)*x^0 + (2+0j)*x^1 + (3-0j)*x^2 + (8-0j)*x^3 + (6+0j)*x^4 + (8+0j)*x^5 """ def __init__(self, poly_a=None, poly_b=None): @@ -159,13 +159,13 @@ def __multiply(self): # Overwrite __str__ for print(); Shows A, B and A*B def __str__(self): a = "A = " + " + ".join( - f"{coef}*x^{i}" for coef, i in enumerate(self.polyA[: self.len_A]) + f"{coef}*x^{i}" for i, coef in enumerate(self.polyA[: self.len_A]) ) b = "B = " + " + ".join( - f"{coef}*x^{i}" for coef, i in enumerate(self.polyB[: self.len_B]) + f"{coef}*x^{i}" for i, coef in enumerate(self.polyB[: self.len_B]) ) c = "A*B = " + " + ".join( - f"{coef}*x^{i}" for coef, i in enumerate(self.product) + f"{coef}*x^{i}" for i, coef in enumerate(self.product) ) return f"{a}\n{b}\n{c}" From dc1b2003b44b9fd831b5870d988e4f0af93b3389 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 26 Aug 2025 15:32:39 +0300 Subject: [PATCH 16/66] [pre-commit.ci] pre-commit autoupdate (#12930) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.12.9 → v0.12.10](https://github.com/astral-sh/ruff-pre-commit/compare/v0.12.9...v0.12.10) - [github.com/pre-commit/mirrors-mypy: v1.15.0 → v1.17.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.15.0...v1.17.1) * Update word_break.py * Update word_break.py * Update word_break.py * Update word_break.py * Update word_break.py * Update covid_stats_via_xpath.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pyproject.toml * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update pyproject.toml * Update pyproject.toml * Update covid_stats_via_xpath.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- .pre-commit-config.yaml | 4 ++-- dynamic_programming/word_break.py | 2 +- web_programming/covid_stats_via_xpath.py | 12 +++++++----- 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 252cfebc53a8..6eb0906fb23a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.9 + rev: v0.12.10 hooks: - id: ruff-check - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.15.0 + rev: v1.17.1 hooks: - id: mypy args: diff --git a/dynamic_programming/word_break.py b/dynamic_programming/word_break.py index 4d7ac869080c..c4ba2d7aa976 100644 --- a/dynamic_programming/word_break.py +++ b/dynamic_programming/word_break.py @@ -90,7 +90,7 @@ def is_breakable(index: int) -> bool: if index == len_string: return True - trie_node = trie + trie_node: Any = trie for i in range(index, len_string): trie_node = trie_node.get(string[i], None) diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index f7db51b63169..9c016ba414ea 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -1,5 +1,5 @@ """ -This is to show simple COVID19 info fetching from worldometers site using lxml +This is to show simple COVID19 info fetching from worldometers archive site using lxml * The main motivation to use lxml in place of bs4 is that it is faster and therefore more convenient to use in Python web projects (e.g. Django or Flask-based) """ @@ -19,12 +19,14 @@ class CovidData(NamedTuple): - cases: int - deaths: int - recovered: int + cases: str + deaths: str + recovered: str -def covid_stats(url: str = "https://www.worldometers.info/coronavirus/") -> CovidData: +def covid_stats( + url: str = "https://web.archive.org/web/20250825095350/https://www.worldometers.info/coronavirus/", +) -> CovidData: xpath_str = '//div[@class = "maincounter-number"]/span/text()' return CovidData( *html.fromstring(httpx.get(url, timeout=10).content).xpath(xpath_str) From 55db5a1b8d463a514522dd759053d30357538b62 Mon Sep 17 00:00:00 2001 From: Mindaugas <76015221+mindaugl@users.noreply.github.com> Date: Tue, 26 Aug 2025 21:17:18 +0300 Subject: [PATCH 17/66] Add new solution for the euler project problem 9 (#12771) * Add new solution for the euler project problem 9 - precompute the squares. * Update sol4.py * updating DIRECTORY.md * Update sol4.py * Update sol4.py * Update sol4.py --------- Co-authored-by: Maxim Smolskiy Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + project_euler/problem_009/sol4.py | 60 +++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 project_euler/problem_009/sol4.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 81d6f4c70864..2e3b4398f26e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -956,6 +956,7 @@ * [Sol1](project_euler/problem_009/sol1.py) * [Sol2](project_euler/problem_009/sol2.py) * [Sol3](project_euler/problem_009/sol3.py) + * [Sol4](project_euler/problem_009/sol4.py) * Problem 010 * [Sol1](project_euler/problem_010/sol1.py) * [Sol2](project_euler/problem_010/sol2.py) diff --git a/project_euler/problem_009/sol4.py b/project_euler/problem_009/sol4.py new file mode 100644 index 000000000000..a07d40ccb54d --- /dev/null +++ b/project_euler/problem_009/sol4.py @@ -0,0 +1,60 @@ +""" +Project Euler Problem 9: https://projecteuler.net/problem=9 + +Special Pythagorean triplet + +A Pythagorean triplet is a set of three natural numbers, a < b < c, for which, + + a^2 + b^2 = c^2. + +For example, 3^2 + 4^2 = 9 + 16 = 25 = 5^2. + +There exists exactly one Pythagorean triplet for which a + b + c = 1000. +Find the product abc. + +References: + - https://en.wikipedia.org/wiki/Pythagorean_triple +""" + + +def get_squares(n: int) -> list[int]: + """ + >>> get_squares(0) + [] + >>> get_squares(1) + [0] + >>> get_squares(2) + [0, 1] + >>> get_squares(3) + [0, 1, 4] + >>> get_squares(4) + [0, 1, 4, 9] + """ + return [number * number for number in range(n)] + + +def solution(n: int = 1000) -> int: + """ + Precomputing squares and checking if a^2 + b^2 is the square by set look-up. + + >>> solution(12) + 60 + >>> solution(36) + 1620 + """ + + squares = get_squares(n) + squares_set = set(squares) + for a in range(1, n // 3): + for b in range(a + 1, (n - a) // 2 + 1): + if ( + squares[a] + squares[b] in squares_set + and squares[n - a - b] == squares[a] + squares[b] + ): + return a * b * (n - a - b) + + return -1 + + +if __name__ == "__main__": + print(f"{solution() = }") From 44cf167175cf2a53080006dbedd40b5f5343ef18 Mon Sep 17 00:00:00 2001 From: Sharan Sukesh <77352136+sharansukesh1003@users.noreply.github.com> Date: Tue, 26 Aug 2025 14:42:14 -0400 Subject: [PATCH 18/66] Create cyclic_sort.py (#9256) * Create cyclic_sort.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update cyclic_sort.py * updating DIRECTORY.md * Update cyclic_sort.py * Update cyclic_sort.py * Update cyclic_sort.py * Update cyclic_sort.py * Update cyclic_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + sorts/cyclic_sort.py | 55 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 sorts/cyclic_sort.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 2e3b4398f26e..53c53d208656 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1267,6 +1267,7 @@ * [Comb Sort](sorts/comb_sort.py) * [Counting Sort](sorts/counting_sort.py) * [Cycle Sort](sorts/cycle_sort.py) + * [Cyclic Sort](sorts/cyclic_sort.py) * [Double Sort](sorts/double_sort.py) * [Dutch National Flag Sort](sorts/dutch_national_flag_sort.py) * [Exchange Sort](sorts/exchange_sort.py) diff --git a/sorts/cyclic_sort.py b/sorts/cyclic_sort.py new file mode 100644 index 000000000000..9e81291548d4 --- /dev/null +++ b/sorts/cyclic_sort.py @@ -0,0 +1,55 @@ +""" +This is a pure Python implementation of the Cyclic Sort algorithm. + +For doctests run following command: +python -m doctest -v cyclic_sort.py +or +python3 -m doctest -v cyclic_sort.py +For manual testing run: +python cyclic_sort.py +or +python3 cyclic_sort.py +""" + + +def cyclic_sort(nums: list[int]) -> list[int]: + """ + Sorts the input list of n integers from 1 to n in-place + using the Cyclic Sort algorithm. + + :param nums: List of n integers from 1 to n to be sorted. + :return: The same list sorted in ascending order. + + Time complexity: O(n), where n is the number of integers in the list. + + Examples: + >>> cyclic_sort([]) + [] + >>> cyclic_sort([3, 5, 2, 1, 4]) + [1, 2, 3, 4, 5] + """ + + # Perform cyclic sort + index = 0 + while index < len(nums): + # Calculate the correct index for the current element + correct_index = nums[index] - 1 + # If the current element is not at its correct position, + # swap it with the element at its correct index + if index != correct_index: + nums[index], nums[correct_index] = nums[correct_index], nums[index] + else: + # If the current element is already in its correct position, + # move to the next element + index += 1 + + return nums + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + user_input = input("Enter numbers separated by a comma:\n").strip() + unsorted = [int(item) for item in user_input.split(",")] + print(*cyclic_sort(unsorted), sep=",") From 4961b3aa899f79b02539ec031d8b6e5adafc02d1 Mon Sep 17 00:00:00 2001 From: Wei Jiang <42140605+Jiang15@users.noreply.github.com> Date: Wed, 27 Aug 2025 00:10:22 +0200 Subject: [PATCH 19/66] Enhancement of the knapsack algorithm with memorization and generalisation (#9295) * enhance knapsack problem * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * wei/refactor code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update test_knapsack.py * Update knapsack.py * Update test_knapsack.py * Update knapsack.py * Update knapsack.py * Update knapsack.py * Update knapsack.py * Update knapsack.py * Update test_knapsack.py --------- Co-authored-by: weijiang Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- knapsack/README.md | 2 +- knapsack/knapsack.py | 65 ++++++++++++++++++++++----------- knapsack/tests/test_knapsack.py | 12 +++++- 3 files changed, 55 insertions(+), 24 deletions(-) diff --git a/knapsack/README.md b/knapsack/README.md index f31e5f591412..686ea929255a 100644 --- a/knapsack/README.md +++ b/knapsack/README.md @@ -1,4 +1,4 @@ -# A naive recursive implementation of 0-1 Knapsack Problem +# A recursive implementation of 0-N Knapsack Problem This overview is taken from: diff --git a/knapsack/knapsack.py b/knapsack/knapsack.py index bb507be1ba3c..0648773c919f 100644 --- a/knapsack/knapsack.py +++ b/knapsack/knapsack.py @@ -1,14 +1,23 @@ -"""A naive recursive implementation of 0-1 Knapsack Problem +"""A recursive implementation of 0-N Knapsack Problem https://en.wikipedia.org/wiki/Knapsack_problem """ from __future__ import annotations +from functools import lru_cache -def knapsack(capacity: int, weights: list[int], values: list[int], counter: int) -> int: + +def knapsack( + capacity: int, + weights: list[int], + values: list[int], + counter: int, + allow_repetition=False, +) -> int: """ Returns the maximum value that can be put in a knapsack of a capacity cap, - whereby each weight w has a specific value val. + whereby each weight w has a specific value val + with option to allow repetitive selection of items >>> cap = 50 >>> val = [60, 100, 120] @@ -17,28 +26,40 @@ def knapsack(capacity: int, weights: list[int], values: list[int], counter: int) >>> knapsack(cap, w, val, c) 220 - The result is 220 cause the values of 100 and 120 got the weight of 50 + Given the repetition is NOT allowed, + the result is 220 cause the values of 100 and 120 got the weight of 50 which is the limit of the capacity. + >>> knapsack(cap, w, val, c, True) + 300 + + Given the repetition is allowed, + the result is 300 cause the values of 60*5 (pick 5 times) + got the weight of 10*5 which is the limit of the capacity. """ - # Base Case - if counter == 0 or capacity == 0: - return 0 - - # If weight of the nth item is more than Knapsack of capacity, - # then this item cannot be included in the optimal solution, - # else return the maximum of two cases: - # (1) nth item included - # (2) not included - if weights[counter - 1] > capacity: - return knapsack(capacity, weights, values, counter - 1) - else: - left_capacity = capacity - weights[counter - 1] - new_value_included = values[counter - 1] + knapsack( - left_capacity, weights, values, counter - 1 - ) - without_new_value = knapsack(capacity, weights, values, counter - 1) - return max(new_value_included, without_new_value) + @lru_cache + def knapsack_recur(capacity: int, counter: int) -> int: + # Base Case + if counter == 0 or capacity == 0: + return 0 + + # If weight of the nth item is more than Knapsack of capacity, + # then this item cannot be included in the optimal solution, + # else return the maximum of two cases: + # (1) nth item included only once (0-1), if allow_repetition is False + # nth item included one or more times (0-N), if allow_repetition is True + # (2) not included + if weights[counter - 1] > capacity: + return knapsack_recur(capacity, counter - 1) + else: + left_capacity = capacity - weights[counter - 1] + new_value_included = values[counter - 1] + knapsack_recur( + left_capacity, counter - 1 if not allow_repetition else counter + ) + without_new_value = knapsack_recur(capacity, counter - 1) + return max(new_value_included, without_new_value) + + return knapsack_recur(capacity, counter) if __name__ == "__main__": diff --git a/knapsack/tests/test_knapsack.py b/knapsack/tests/test_knapsack.py index 7bfb8780627b..80378aae4579 100644 --- a/knapsack/tests/test_knapsack.py +++ b/knapsack/tests/test_knapsack.py @@ -30,7 +30,7 @@ def test_base_case(self): def test_easy_case(self): """ - test for the base case + test for the easy case """ cap = 3 val = [1, 2, 3] @@ -48,6 +48,16 @@ def test_knapsack(self): c = len(val) assert k.knapsack(cap, w, val, c) == 220 + def test_knapsack_repetition(self): + """ + test for the knapsack repetition + """ + cap = 50 + val = [60, 100, 120] + w = [10, 20, 30] + c = len(val) + assert k.knapsack(cap, w, val, c, True) == 300 + if __name__ == "__main__": unittest.main() From b486ac60e6e047f94615f0489ae8aaa48d33ff41 Mon Sep 17 00:00:00 2001 From: Om Ashishkumar Soni Date: Thu, 28 Aug 2025 00:32:14 +0530 Subject: [PATCH 20/66] added coordinate_compression (#9317) * added coordinate_compression algorithm * added coordinate_compression & doctest * adding return type hints and utilized enumerate * adding exmaple usage in main function * added type hints, for list and dict * updating DIRECTORY.md * Update other/coordinate_compression.py Co-authored-by: Christian Clauss * Update other/coordinate_compression.py Co-authored-by: Christian Clauss * Update other/coordinate_compression.py Co-authored-by: Christian Clauss * Update other/coordinate_compression.py Co-authored-by: Christian Clauss * Update other/coordinate_compression.py Co-authored-by: Christian Clauss * Update other/coordinate_compression.py Co-authored-by: Christian Clauss * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update other/coordinate_compression.py Co-authored-by: Christian Clauss * Update other/coordinate_compression.py Co-authored-by: Christian Clauss * Update coordinate_compression.py * Create coordinate_compression.py * updating DIRECTORY.md * Delete other/coordinate_compression.py * updating DIRECTORY.md * Update coordinate_compression.py * Update coordinate_compression.py * Update coordinate_compression.py * Update coordinate_compression.py * Update coordinate_compression.py * Update coordinate_compression.py * Update coordinate_compression.py * Update coordinate_compression.py * Update coordinate_compression.py --------- Co-authored-by: Maxim Smolskiy Co-authored-by: MaximSmolskiy Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + data_compression/coordinate_compression.py | 132 +++++++++++++++++++++ 2 files changed, 133 insertions(+) create mode 100644 data_compression/coordinate_compression.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 53c53d208656..0636ba0a7ecc 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -174,6 +174,7 @@ ## Data Compression * [Burrows Wheeler](data_compression/burrows_wheeler.py) + * [Coordinate Compression](data_compression/coordinate_compression.py) * [Huffman](data_compression/huffman.py) * [Lempel Ziv](data_compression/lempel_ziv.py) * [Lempel Ziv Decompress](data_compression/lempel_ziv_decompress.py) diff --git a/data_compression/coordinate_compression.py b/data_compression/coordinate_compression.py new file mode 100644 index 000000000000..9c4ad9a99ac3 --- /dev/null +++ b/data_compression/coordinate_compression.py @@ -0,0 +1,132 @@ +""" +Assumption: + - The values to compress are assumed to be comparable, + values can be sorted and compared with '<' and '>' operators. +""" + + +class CoordinateCompressor: + """ + A class for coordinate compression. + + This class allows you to compress and decompress a list of values. + + Mapping: + In addition to compression and decompression, this class maintains a mapping + between original values and their compressed counterparts using two data + structures: a dictionary `coordinate_map` and a list `reverse_map`: + - `coordinate_map`: A dictionary that maps original values to their compressed + coordinates. Keys are original values, and values are compressed coordinates. + - `reverse_map`: A list used for reverse mapping, where each index corresponds + to a compressed coordinate, and the value at that index is the original value. + + Example of mapping: + Original: 10, Compressed: 0 + Original: 52, Compressed: 1 + Original: 83, Compressed: 2 + Original: 100, Compressed: 3 + + This mapping allows for efficient compression and decompression of values within + the list. + """ + + def __init__(self, arr: list[int | float | str]) -> None: + """ + Initialize the CoordinateCompressor with a list. + + Args: + arr: The list of values to be compressed. + + >>> arr = [100, 10, 52, 83] + >>> cc = CoordinateCompressor(arr) + >>> cc.compress(100) + 3 + >>> cc.compress(52) + 1 + >>> cc.decompress(1) + 52 + """ + + # A dictionary to store compressed coordinates + self.coordinate_map: dict[int | float | str, int] = {} + + # A list to store reverse mapping + self.reverse_map: list[int | float | str] = [-1] * len(arr) + + self.arr = sorted(arr) # The input list + self.n = len(arr) # The length of the input list + self.compress_coordinates() + + def compress_coordinates(self) -> None: + """ + Compress the coordinates in the input list. + + >>> arr = [100, 10, 52, 83] + >>> cc = CoordinateCompressor(arr) + >>> cc.coordinate_map[83] + 2 + >>> cc.coordinate_map[80] # Value not in the original list + Traceback (most recent call last): + ... + KeyError: 80 + >>> cc.reverse_map[2] + 83 + """ + key = 0 + for val in self.arr: + if val not in self.coordinate_map: + self.coordinate_map[val] = key + self.reverse_map[key] = val + key += 1 + + def compress(self, original: float | str) -> int: + """ + Compress a single value. + + Args: + original: The value to compress. + + Returns: + The compressed integer, or -1 if not found in the original list. + + >>> arr = [100, 10, 52, 83] + >>> cc = CoordinateCompressor(arr) + >>> cc.compress(100) + 3 + >>> cc.compress(7) # Value not in the original list + -1 + """ + return self.coordinate_map.get(original, -1) + + def decompress(self, num: int) -> int | float | str: + """ + Decompress a single integer. + + Args: + num: The compressed integer to decompress. + + Returns: + The original value. + + >>> arr = [100, 10, 52, 83] + >>> cc = CoordinateCompressor(arr) + >>> cc.decompress(0) + 10 + >>> cc.decompress(5) # Compressed coordinate out of range + -1 + """ + return self.reverse_map[num] if 0 <= num < len(self.reverse_map) else -1 + + +if __name__ == "__main__": + from doctest import testmod + + testmod() + + arr: list[int | float | str] = [100, 10, 52, 83] + cc = CoordinateCompressor(arr) + + for original in arr: + compressed = cc.compress(original) + decompressed = cc.decompress(compressed) + print(f"Original: {decompressed}, Compressed: {compressed}") From 2fa65c7d9212514a5c8241c7636166f5758c5ff0 Mon Sep 17 00:00:00 2001 From: Shreyash Kashyap <76607993+SYK-08@users.noreply.github.com> Date: Thu, 28 Aug 2025 02:11:04 +0530 Subject: [PATCH 21/66] Improve blockchain/README.md (#9630) * Update README.md * Update blockchain/README.md Co-authored-by: Tianyi Zheng * Update blockchain/README.md Co-authored-by: Tianyi Zheng * Update blockchain/README.md Co-authored-by: Tianyi Zheng * Update blockchain/README.md Co-authored-by: Tianyi Zheng * Update blockchain/README.md Co-authored-by: Tianyi Zheng * Update blockchain/README.md Co-authored-by: Tianyi Zheng * Update README.md --------- Co-authored-by: Maxim Smolskiy Co-authored-by: Tianyi Zheng --- blockchain/README.md | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/blockchain/README.md b/blockchain/README.md index b5fab7b36eaa..ecd784fc2c7d 100644 --- a/blockchain/README.md +++ b/blockchain/README.md @@ -1,8 +1,8 @@ # Blockchain -A Blockchain is a type of **distributed ledger** technology (DLT) that consists of growing list of records, called **blocks**, that are securely linked together using **cryptography**. +A Blockchain is a type of **distributed ledger** technology (DLT) that consists of a growing list of records, called **blocks**, that are securely linked together using **cryptography**. -Let's breakdown the terminologies in the above definition. We find below terminologies, +Let's break down the terminologies in the above definition. We find below terminologies, - Digital Ledger Technology (DLT) - Blocks @@ -10,35 +10,35 @@ Let's breakdown the terminologies in the above definition. We find below termino ## Digital Ledger Technology - It is otherwise called as distributed ledger technology. It is simply the opposite of centralized database. Firstly, what is a **ledger**? A ledger is a book or collection of accounts that records account transactions. +Blockchain is also called distributed ledger technology. It is simply the opposite of a centralized database. Firstly, what is a **ledger**? A ledger is a book or collection of accounts that records account transactions. - *Why is Blockchain addressed as digital ledger if it can record more than account transactions? What other transaction details and information can it hold?* +*Why is Blockchain addressed as a digital ledger if it can record more than account transactions? What other transaction details and information can it hold?* -Digital Ledger Technology is just a ledger which is shared among multiple nodes. This way there exist no need for central authority to hold the info. Okay, how is it differentiated from central database and what are their benefits? +Digital Ledger Technology is just a ledger that is shared among multiple nodes. This way there exists no need for a central authority to hold the info. Okay, how is it differentiated from a central database and what are their benefits? -There is an organization which has 4 branches whose data are stored in a centralized database. So even if one branch needs any data from ledger they need an approval from database in charge. And if one hacks the central database he gets to tamper and control all the data. +Suppose that there is an organization that has 4 branches whose data are stored in a centralized database. So even if one branch needs any data from the ledger it needs approval from the database in charge. And if one hacks the central database he gets to tamper and control all the data. -Now lets assume every branch has a copy of the ledger and then once anything is added to the ledger by anyone branch it is gonna automatically reflect in all other ledgers available in other branch. This is done using Peer-to-peer network. +Now let's assume every branch has a copy of the ledger and then once anything is added to the ledger by any branch it is gonna automatically reflect in all other ledgers available in other branches. This is done using a peer-to-peer network. -So this means even if information is tampered in one branch we can find out. If one branch is hacked we can be alerted ,so we can safeguard other branches. Now, assume these branches as computers or nodes and the ledger is a transaction record or digital receipt. If one ledger is hacked in a node we can detect since there will be a mismatch in comparison with other node information. So this is the concept of Digital Ledger Technology. +This means that even if information is tampered with in one branch we can find out. If one branch is hacked we can be alerted, so we can safeguard other branches. Now, assume these branches as computers or nodes and the ledger is a transaction record or digital receipt. If one ledger is hacked in a node we can detect since there will be a mismatch in comparison with other node information. So this is the concept of Digital Ledger Technology. *Is it required for all nodes to have access to all information in other nodes? Wouldn't this require enormous storage space in each node?* ## Blocks -In short a block is nothing but collections of records with a labelled header. These are connected cryptographically. Once a new block is added to a chain, the previous block is connected, more precisely said as locked and hence, will remain unaltered. We can understand this concept once we get a clear understanding of working mechanism of blockchain. +In short, a block is nothing but a collection of records with a labelled header. These are connected cryptographically. Once a new block is added to a chain, the previous block is connected, more precisely said as locked, and hence will remain unaltered. We can understand this concept once we get a clear understanding of the working mechanism of blockchain. ## Cryptography -It is the practice and study of secure communication techniques in the midst of adversarial behavior. More broadly, cryptography is the creation and analysis of protocols that prevent third parties or the general public from accessing private messages. +Cryptography is the practice and study of secure communication techniques amid adversarial behavior. More broadly, cryptography is the creation and analysis of protocols that prevent third parties or the general public from accessing private messages. *Which cryptography technology is most widely used in blockchain and why?* -So, in general, blockchain technology is a distributed record holder which records the information about ownership of an asset. To define precisely, +So, in general, blockchain technology is a distributed record holder that records the information about ownership of an asset. To define precisely, > Blockchain is a distributed, immutable ledger that makes it easier to record transactions and track assets in a corporate network. An asset could be tangible (such as a house, car, cash, or land) or intangible (such as a business) (intellectual property, patents, copyrights, branding). A blockchain network can track and sell almost anything of value, lowering risk and costs for everyone involved. -So this is all about introduction to blockchain technology. To learn more about the topic refer below links.... +So this is all about the introduction to blockchain technology. To learn more about the topic refer below links.... * * * From 0c39e43af70274944e62e84708585cfd7711d2d3 Mon Sep 17 00:00:00 2001 From: Mary-0165 <146911989+Mary-0165@users.noreply.github.com> Date: Thu, 28 Aug 2025 02:25:27 +0530 Subject: [PATCH 22/66] Algorithm to find unique prime factors (#9935) * algorithm to find unique prime factors * Update prime_factors.py * Update prime_factors.py * Update prime_factors.py --------- Co-authored-by: Maxim Smolskiy --- maths/prime_factors.py | 40 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) diff --git a/maths/prime_factors.py b/maths/prime_factors.py index 47abcf10e618..6eff57d12d17 100644 --- a/maths/prime_factors.py +++ b/maths/prime_factors.py @@ -47,6 +47,46 @@ def prime_factors(n: int) -> list[int]: return factors +def unique_prime_factors(n: int) -> list[int]: + """ + Returns unique prime factors of n as a list. + + >>> unique_prime_factors(0) + [] + >>> unique_prime_factors(100) + [2, 5] + >>> unique_prime_factors(2560) + [2, 5] + >>> unique_prime_factors(10**-2) + [] + >>> unique_prime_factors(0.02) + [] + >>> unique_prime_factors(10**241) + [2, 5] + >>> unique_prime_factors(10**-354) + [] + >>> unique_prime_factors('hello') + Traceback (most recent call last): + ... + TypeError: '<=' not supported between instances of 'int' and 'str' + >>> unique_prime_factors([1,2,'hello']) + Traceback (most recent call last): + ... + TypeError: '<=' not supported between instances of 'int' and 'list' + """ + i = 2 + factors = [] + while i * i <= n: + if not n % i: + while not n % i: + n //= i + factors.append(i) + i += 1 + if n > 1: + factors.append(n) + return factors + + if __name__ == "__main__": import doctest From e6b5d26d5c9cd23942a52c0210cee244f0aba85e Mon Sep 17 00:00:00 2001 From: architmaheshwari99 <99663695+architmaheshwari99@users.noreply.github.com> Date: Thu, 28 Aug 2025 02:37:46 +0530 Subject: [PATCH 23/66] Combination sum fix and test cases (#10193) * fixed_incorrect_test_combination_sum * reverting fn arg * ruff * Update combination_sum.py * Update combination_sum.py --------- Co-authored-by: Maxim Smolskiy --- backtracking/combination_sum.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/backtracking/combination_sum.py b/backtracking/combination_sum.py index 3c6ed81f44f0..3d954f11d2c5 100644 --- a/backtracking/combination_sum.py +++ b/backtracking/combination_sum.py @@ -47,8 +47,18 @@ def combination_sum(candidates: list, target: int) -> list: >>> combination_sum([-8, 2.3, 0], 1) Traceback (most recent call last): ... - RecursionError: maximum recursion depth exceeded + ValueError: All elements in candidates must be non-negative + >>> combination_sum([], 1) + Traceback (most recent call last): + ... + ValueError: Candidates list should not be empty """ + if not candidates: + raise ValueError("Candidates list should not be empty") + + if any(x < 0 for x in candidates): + raise ValueError("All elements in candidates must be non-negative") + path = [] # type: list[int] answer = [] # type: list[int] backtrack(candidates, path, answer, target, 0) From 84f101ca6eba4255b7780120ef772f64be437a67 Mon Sep 17 00:00:00 2001 From: Prathamesh Gadekar <93116210+Pr0-C0der@users.noreply.github.com> Date: Thu, 28 Aug 2025 03:52:12 +0530 Subject: [PATCH 24/66] Add/generate parentheses iterative approach (#10024) * Generate parantheses iterative * Generate parantheses iterative * Generating parantheses code using iterative approach * Update generate_parentheses_iterative.py * updating DIRECTORY.md * Update generate_parentheses_iterative.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update generate_parentheses_iterative.py * Update generate_parentheses_iterative.py * Update generate_parentheses_iterative.py --------- Co-authored-by: nightmare10123 Co-authored-by: Maxim Smolskiy Co-authored-by: MaximSmolskiy Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- DIRECTORY.md | 1 + .../generate_parentheses_iterative.py | 62 +++++++++++++++++++ 2 files changed, 63 insertions(+) create mode 100644 backtracking/generate_parentheses_iterative.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 0636ba0a7ecc..41a2d2e9af03 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -12,6 +12,7 @@ * [Combination Sum](backtracking/combination_sum.py) * [Crossword Puzzle Solver](backtracking/crossword_puzzle_solver.py) * [Generate Parentheses](backtracking/generate_parentheses.py) + * [Generate Parentheses Iterative](backtracking/generate_parentheses_iterative.py) * [Hamiltonian Cycle](backtracking/hamiltonian_cycle.py) * [Knight Tour](backtracking/knight_tour.py) * [Match Word Pattern](backtracking/match_word_pattern.py) diff --git a/backtracking/generate_parentheses_iterative.py b/backtracking/generate_parentheses_iterative.py new file mode 100644 index 000000000000..175941c7ae95 --- /dev/null +++ b/backtracking/generate_parentheses_iterative.py @@ -0,0 +1,62 @@ +def generate_parentheses_iterative(length: int) -> list: + """ + Generate all valid combinations of parentheses (Iterative Approach). + + The algorithm works as follows: + 1. Initialize an empty list to store the combinations. + 2. Initialize a stack to keep track of partial combinations. + 3. Start with empty string and push it onstack along with the counts of '(' and ')'. + 4. While the stack is not empty: + a. Pop a partial combination and its open and close counts from the stack. + b. If the combination length is equal to 2*length, add it to the result. + c. If open count < length, push new combination with added '(' on stack. + d. If close count < open count, push new combination with added ')' on stack. + 5. Return the result containing all valid combinations. + + Args: + length: The desired length of the parentheses combinations + + Returns: + A list of strings representing valid combinations of parentheses + + Time Complexity: + O(2^(2*length)) + + Space Complexity: + O(2^(2*length)) + + >>> generate_parentheses_iterative(3) + ['()()()', '()(())', '(())()', '(()())', '((()))'] + >>> generate_parentheses_iterative(2) + ['()()', '(())'] + >>> generate_parentheses_iterative(1) + ['()'] + >>> generate_parentheses_iterative(0) + [''] + """ + result = [] + stack = [] + + # Each element in stack is a tuple (current_combination, open_count, close_count) + stack.append(("", 0, 0)) + + while stack: + current_combination, open_count, close_count = stack.pop() + + if len(current_combination) == 2 * length: + result.append(current_combination) + + if open_count < length: + stack.append((current_combination + "(", open_count + 1, close_count)) + + if close_count < open_count: + stack.append((current_combination + ")", open_count, close_count + 1)) + + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() + print(generate_parentheses_iterative(3)) From 54aa73fdf091dfaf25f1971db443b33169d799f4 Mon Sep 17 00:00:00 2001 From: Praful Katare <47990928+Kpraful@users.noreply.github.com> Date: Thu, 28 Aug 2025 04:15:05 +0530 Subject: [PATCH 25/66] Fixes bugs in graphs/dijkstra_algorithm.py (#10092) * Fixes bug in PriorityQueue Algorithm; Fixes syntax in code for array. * Update dijkstra_algorithm.py * Update dijkstra_algorithm.py * Update dijkstra_algorithm.py * Update dijkstra_algorithm.py --------- Co-authored-by: Maxim Smolskiy --- graphs/dijkstra_algorithm.py | 26 +++++++------------------- 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/graphs/dijkstra_algorithm.py b/graphs/dijkstra_algorithm.py index 51412b790bac..60646862fca8 100644 --- a/graphs/dijkstra_algorithm.py +++ b/graphs/dijkstra_algorithm.py @@ -52,45 +52,33 @@ def min_heapify(self, idx): >>> priority_queue_test.array = [(5, 'A'), (10, 'B'), (15, 'C')] >>> priority_queue_test.min_heapify(0) - Traceback (most recent call last): - ... - TypeError: 'list' object is not callable >>> priority_queue_test.array [(5, 'A'), (10, 'B'), (15, 'C')] >>> priority_queue_test.array = [(10, 'A'), (5, 'B'), (15, 'C')] >>> priority_queue_test.min_heapify(0) - Traceback (most recent call last): - ... - TypeError: 'list' object is not callable >>> priority_queue_test.array - [(10, 'A'), (5, 'B'), (15, 'C')] + [(5, 'B'), (10, 'A'), (15, 'C')] >>> priority_queue_test.array = [(10, 'A'), (15, 'B'), (5, 'C')] >>> priority_queue_test.min_heapify(0) - Traceback (most recent call last): - ... - TypeError: 'list' object is not callable >>> priority_queue_test.array - [(10, 'A'), (15, 'B'), (5, 'C')] + [(5, 'C'), (15, 'B'), (10, 'A')] >>> priority_queue_test.array = [(10, 'A'), (5, 'B')] >>> priority_queue_test.cur_size = len(priority_queue_test.array) >>> priority_queue_test.pos = {'A': 0, 'B': 1} >>> priority_queue_test.min_heapify(0) - Traceback (most recent call last): - ... - TypeError: 'list' object is not callable >>> priority_queue_test.array - [(10, 'A'), (5, 'B')] + [(5, 'B'), (10, 'A')] """ lc = self.left(idx) rc = self.right(idx) - if lc < self.cur_size and self.array(lc)[0] < self.array[idx][0]: + if lc < self.cur_size and self.array[lc][0] < self.array[idx][0]: smallest = lc else: smallest = idx - if rc < self.cur_size and self.array(rc)[0] < self.array[smallest][0]: + if rc < self.cur_size and self.array[rc][0] < self.array[smallest][0]: smallest = rc if smallest != idx: self.swap(idx, smallest) @@ -130,12 +118,12 @@ def extract_min(self): >>> priority_queue_test.extract_min() 'C' >>> priority_queue_test.array[0] - (15, 'B') + (10, 'A') """ min_node = self.array[0][1] self.array[0] = self.array[self.cur_size - 1] self.cur_size -= 1 - self.min_heapify(1) + self.min_heapify(0) del self.pos[min_node] return min_node From 4394fd93d384cea5cc46ef61d31995ee28872cc9 Mon Sep 17 00:00:00 2001 From: __Aditya <149356523+aditya7balotra@users.noreply.github.com> Date: Fri, 29 Aug 2025 22:03:43 +0530 Subject: [PATCH 26/66] Weierstrass Method (#12877) * Add weierstrass_method for approximating complex roots - Implements Durand-Kerner (Weierstrass) method for polynomial root finding - Accepts user-defined polynomial function and degree - Uses random perturbation of complex roots of unity for initial guesses - Handles validation, overflow clipping, and includes doctest * Update weierstrass_method.py * add more tests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update weierstrass_method.py * Update weierstrass_method.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../numerical_analysis/weierstrass_method.py | 97 +++++++++++++++++++ 1 file changed, 97 insertions(+) create mode 100644 maths/numerical_analysis/weierstrass_method.py diff --git a/maths/numerical_analysis/weierstrass_method.py b/maths/numerical_analysis/weierstrass_method.py new file mode 100644 index 000000000000..b5a767af3a86 --- /dev/null +++ b/maths/numerical_analysis/weierstrass_method.py @@ -0,0 +1,97 @@ +from collections.abc import Callable + +import numpy as np + + +def weierstrass_method( + polynomial: Callable[[np.ndarray], np.ndarray], + degree: int, + roots: np.ndarray | None = None, + max_iter: int = 100, +) -> np.ndarray: + """ + Approximates all complex roots of a polynomial using the + Weierstrass (Durand-Kerner) method. + Args: + polynomial: A function that takes a NumPy array of complex numbers and returns + the polynomial values at those points. + degree: Degree of the polynomial (number of roots to find). Must be ≥ 1. + roots: Optional initial guess as a NumPy array of complex numbers. + Must have length equal to 'degree'. + If None, perturbed complex roots of unity are used. + max_iter: Number of iterations to perform (default: 100). + + Returns: + np.ndarray: Array of approximated complex roots. + + Raises: + ValueError: If degree < 1, or if initial roots length doesn't match the degree. + + Note: + - Root updates are clipped to prevent numerical overflow. + + Example: + >>> import numpy as np + >>> def check(poly, degree, expected): + ... roots = weierstrass_method(poly, degree) + ... return np.allclose(np.sort(roots), np.sort(expected)) + + >>> check( + ... lambda x: x**2 - 1, + ... 2, + ... np.array([-1, 1])) + True + + >>> check( + ... lambda x: x**3 - 4.5*x**2 + 5.75*x - 1.875, + ... 3, + ... np.array([1.5, 0.5, 2.5]) + ... ) + True + + See Also: + https://en.wikipedia.org/wiki/Durand%E2%80%93Kerner_method + """ + + if degree < 1: + raise ValueError("Degree of the polynomial must be at least 1.") + + if roots is None: + # Use perturbed complex roots of unity as initial guesses + rng = np.random.default_rng() + roots = np.array( + [ + np.exp(2j * np.pi * i / degree) * (1 + 1e-3 * rng.random()) + for i in range(degree) + ], + dtype=np.complex128, + ) + + else: + roots = np.asarray(roots, dtype=np.complex128) + if roots.shape[0] != degree: + raise ValueError( + "Length of initial roots must match the degree of the polynomial." + ) + + for _ in range(max_iter): + # Construct the product denominator for each root + denominator = np.array([root - roots for root in roots], dtype=np.complex128) + np.fill_diagonal(denominator, 1.0) # Avoid zero in diagonal + denominator = np.prod(denominator, axis=1) + + # Evaluate polynomial at each root + numerator = polynomial(roots).astype(np.complex128) + + # Compute update and clip to prevent overflow + delta = numerator / denominator + delta = np.clip(delta, -1e10, 1e10) + roots -= delta + + return roots + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 5c5057247683d5851f4d34982ea69de0d648b665 Mon Sep 17 00:00:00 2001 From: Aasheesh <126905285+AasheeshLikePanner@users.noreply.github.com> Date: Sat, 30 Aug 2025 04:47:31 +0530 Subject: [PATCH 27/66] Fixing stock_span_problem.py (#10540) * Adding doctests in simpson_rule.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update stock_span_problem.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update subset_generation.py * Update subset_generation.py * Update data_structures/stacks/stock_span_problem.py Co-authored-by: Christian Clauss * Update stock_span_problem.py * Update data_structures/stacks/stock_span_problem.py Co-authored-by: Christian Clauss * Update stock_span_problem.py * Update stock_span_problem.py * updating DIRECTORY.md * Update stock_span_problem.py * Update stock_span_problem.py * Update stock_span_problem.py * Update stock_span_problem.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss Co-authored-by: Maxim Smolskiy Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + data_structures/stacks/stock_span_problem.py | 34 ++++++++++++++++---- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/DIRECTORY.md b/DIRECTORY.md index 41a2d2e9af03..2df9d56e99b7 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -725,6 +725,7 @@ * [Secant Method](maths/numerical_analysis/secant_method.py) * [Simpson Rule](maths/numerical_analysis/simpson_rule.py) * [Square Root](maths/numerical_analysis/square_root.py) + * [Weierstrass Method](maths/numerical_analysis/weierstrass_method.py) * [Odd Sieve](maths/odd_sieve.py) * [Perfect Cube](maths/perfect_cube.py) * [Perfect Number](maths/perfect_number.py) diff --git a/data_structures/stacks/stock_span_problem.py b/data_structures/stacks/stock_span_problem.py index 5efe58d25798..74c2636784e2 100644 --- a/data_structures/stacks/stock_span_problem.py +++ b/data_structures/stacks/stock_span_problem.py @@ -8,8 +8,29 @@ """ -def calculation_span(price, s): +def calculate_span(price: list[int]) -> list[int]: + """ + Calculate the span values for a given list of stock prices. + Args: + price: List of stock prices. + Returns: + List of span values. + + >>> calculate_span([10, 4, 5, 90, 120, 80]) + [1, 1, 2, 4, 5, 1] + >>> calculate_span([100, 50, 60, 70, 80, 90]) + [1, 1, 2, 3, 4, 5] + >>> calculate_span([5, 4, 3, 2, 1]) + [1, 1, 1, 1, 1] + >>> calculate_span([1, 2, 3, 4, 5]) + [1, 2, 3, 4, 5] + >>> calculate_span([10, 20, 30, 40, 50]) + [1, 2, 3, 4, 5] + >>> calculate_span([100, 80, 60, 70, 60, 75, 85]) + [1, 1, 1, 2, 1, 4, 6] + """ n = len(price) + s = [0] * n # Create a stack and push index of fist element to it st = [] st.append(0) @@ -21,18 +42,20 @@ def calculation_span(price, s): for i in range(1, n): # Pop elements from stack while stack is not # empty and top of stack is smaller than price[i] - while len(st) > 0 and price[st[0]] <= price[i]: + while len(st) > 0 and price[st[-1]] <= price[i]: st.pop() # If stack becomes empty, then price[i] is greater # than all elements on left of it, i.e. price[0], # price[1], ..price[i-1]. Else the price[i] is # greater than elements after top of stack - s[i] = i + 1 if len(st) <= 0 else (i - st[0]) + s[i] = i + 1 if len(st) <= 0 else (i - st[-1]) # Push this element to stack st.append(i) + return s + # A utility function to print elements of array def print_array(arr, n): @@ -42,10 +65,9 @@ def print_array(arr, n): # Driver program to test above function price = [10, 4, 5, 90, 120, 80] -S = [0 for i in range(len(price) + 1)] -# Fill the span values in array S[] -calculation_span(price, S) +# Calculate the span values +S = calculate_span(price) # Print the calculated span values print_array(S, len(price)) From 488f143b8c8b9d1528c2decdc9135d2c749a7a69 Mon Sep 17 00:00:00 2001 From: Venkata Ramana Menda <115451367+RamanaMenda@users.noreply.github.com> Date: Sat, 30 Aug 2025 05:46:21 +0530 Subject: [PATCH 28/66] Performance: 25% faster Project Euler 73 #10503 (#11553) * Seperate slow_solution and solution * Add performance benchmark * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix issues * Update sol1.py * Update sol1.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- project_euler/problem_073/sol1.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/project_euler/problem_073/sol1.py b/project_euler/problem_073/sol1.py index 2b66b7d8769b..c39110252ccd 100644 --- a/project_euler/problem_073/sol1.py +++ b/project_euler/problem_073/sol1.py @@ -36,7 +36,12 @@ def solution(max_d: int = 12_000) -> int: fractions_number = 0 for d in range(max_d + 1): - for n in range(d // 3 + 1, (d + 1) // 2): + n_start = d // 3 + 1 + n_step = 1 + if d % 2 == 0: + n_start += 1 - n_start % 2 + n_step = 2 + for n in range(n_start, (d + 1) // 2, n_step): if gcd(n, d) == 1: fractions_number += 1 return fractions_number From c5de3954ccefaeae76b9ebaca045a396619a6761 Mon Sep 17 00:00:00 2001 From: Sohail khan <62786136+byteninjaa0@users.noreply.github.com> Date: Sat, 30 Aug 2025 05:58:38 +0530 Subject: [PATCH 29/66] Add new test for bucket sort algorithm in sorts/bucket_sort.py (#11605) * Add new test for bucket sort algorithm in sorts/bucket_sort.py * Update fractional_knapsack.py * Update matrix_class.py * Update bucket_sort.py * Update bucket_sort.py --------- Co-authored-by: Maxim Smolskiy --- sorts/bucket_sort.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/sorts/bucket_sort.py b/sorts/bucket_sort.py index 1c1320a58a7d..893c7ff3a23a 100644 --- a/sorts/bucket_sort.py +++ b/sorts/bucket_sort.py @@ -51,12 +51,35 @@ def bucket_sort(my_list: list, bucket_count: int = 10) -> list: >>> collection = random.sample(range(-50, 50), 50) >>> bucket_sort(collection) == sorted(collection) True + >>> data = [1, 2, 2, 1, 1, 3] + >>> bucket_sort(data) == sorted(data) + True + >>> data = [5, 5, 5, 5, 5] + >>> bucket_sort(data) == sorted(data) + True + >>> data = [1000, -1000, 500, -500, 0] + >>> bucket_sort(data) == sorted(data) + True + >>> data = [5.5, 2.2, -1.1, 3.3, 0.0] + >>> bucket_sort(data) == sorted(data) + True + >>> bucket_sort([1]) == [1] + True + >>> data = [-1.1, -1.5, -3.4, 2.5, 3.6, -3.3] + >>> bucket_sort(data) == sorted(data) + True + >>> data = [9, 2, 7, 1, 5] + >>> bucket_sort(data) == sorted(data) + True """ if len(my_list) == 0 or bucket_count <= 0: return [] min_value, max_value = min(my_list), max(my_list) + if min_value == max_value: + return my_list + bucket_size = (max_value - min_value) / bucket_count buckets: list[list] = [[] for _ in range(bucket_count)] @@ -73,3 +96,6 @@ def bucket_sort(my_list: list, bucket_count: int = 10) -> list: testmod() assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5] assert bucket_sort([0, 1, -10, 15, 2, -2]) == [-10, -2, 0, 1, 2, 15] + assert bucket_sort([1.1, 1.2, -1.2, 0, 2.4]) == [-1.2, 0, 1.1, 1.2, 2.4] + assert bucket_sort([5, 5, 5, 5, 5]) == [5, 5, 5, 5, 5] + assert bucket_sort([-5, -1, -6, -2]) == [-6, -5, -2, -1] From 1e0d3173fa4566bc03fbc207168225d948153bb6 Mon Sep 17 00:00:00 2001 From: Abhijit Kumar Singh Date: Sat, 30 Aug 2025 06:07:13 +0530 Subject: [PATCH 30/66] Specify space complexity for merge sort (#11749) * Added space complexity Space complexity of merge sort is a key factor, when compared to quick sort * Update merge_sort.py --------- Co-authored-by: Maxim Smolskiy --- sorts/merge_sort.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sorts/merge_sort.py b/sorts/merge_sort.py index 0628b848b794..11c202788035 100644 --- a/sorts/merge_sort.py +++ b/sorts/merge_sort.py @@ -18,6 +18,7 @@ def merge_sort(collection: list) -> list: :return: The same collection ordered in ascending order. Time Complexity: O(n log n) + Space Complexity: O(n) Examples: >>> merge_sort([0, 5, 3, 2, 2]) From b0920454ccae10414aed0cb7ed4ccc60930f341b Mon Sep 17 00:00:00 2001 From: Ronald Ngounou <74538524+ronaldngounou@users.noreply.github.com> Date: Sat, 30 Aug 2025 01:49:44 -0700 Subject: [PATCH 31/66] Add unit tests to binary_tree_path_sum.py (#11833) * test: Add unit tests * test: Add successful tests in binaree_tree_path_sum * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update binary_tree_path_sum.py * Update binary_tree_path_sum.py * Update binary_tree_path_sum.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- .../binary_tree/binary_tree_path_sum.py | 20 +++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/data_structures/binary_tree/binary_tree_path_sum.py b/data_structures/binary_tree/binary_tree_path_sum.py index a3fe9ca7a7e2..8477690c777a 100644 --- a/data_structures/binary_tree/binary_tree_path_sum.py +++ b/data_structures/binary_tree/binary_tree_path_sum.py @@ -50,6 +50,26 @@ class BinaryTreePathSum: >>> tree.right.right = Node(10) >>> BinaryTreePathSum().path_sum(tree, 8) 2 + >>> BinaryTreePathSum().path_sum(None, 0) + 0 + >>> BinaryTreePathSum().path_sum(tree, 0) + 0 + + The second tree looks like this + 0 + / \ + 5 5 + + >>> tree2 = Node(0) + >>> tree2.left = Node(5) + >>> tree2.right = Node(5) + + >>> BinaryTreePathSum().path_sum(tree2, 5) + 4 + >>> BinaryTreePathSum().path_sum(tree2, -1) + 0 + >>> BinaryTreePathSum().path_sum(tree2, 0) + 1 """ target: int From 501576f90e97de5eedffc7d7196a5fb1fa751028 Mon Sep 17 00:00:00 2001 From: ANIRUDDHA ADAK Date: Sat, 30 Aug 2025 15:41:57 +0530 Subject: [PATCH 32/66] Update README.md (#12345) * Update README.md Add emojis for enhanced visual appeal and readability in README . * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update README.md * Update README.md * Update README.md --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- README.md | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index fe65bb253360..182d36a8d905 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,7 @@

The Algorithms - Python

+ @@ -19,6 +20,7 @@ Gitter chat +
@@ -30,20 +32,21 @@ code style: black + -

All algorithms implemented in Python - for education

+

All algorithms implemented in Python - for education 📚

Implementations are for learning purposes only. They may be less efficient than the implementations in the Python standard library. Use them at your discretion. -## Getting Started +## 🚀 Getting Started -Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribute. +📋 Read through our [Contribution Guidelines](CONTRIBUTING.md) before you contribute. -## Community Channels +## 🌐 Community Channels We are on [Discord](https://the-algorithms.com/discord) and [Gitter](https://gitter.im/TheAlgorithms/community)! Community channels are a great way for you to ask questions and get help. Please join us! -## List of Algorithms +## 📜 List of Algorithms See our [directory](DIRECTORY.md) for easier navigation and a better overview of the project. From e3a263c1ed4ed1efdf5682714b1493f28d926760 Mon Sep 17 00:00:00 2001 From: lorenzo30salgado <79310171+lorenzo30salgado@users.noreply.github.com> Date: Sat, 30 Aug 2025 22:58:54 +0200 Subject: [PATCH 33/66] Adding a 3D plot to the k-means clustering algorithm (#12372) * Adding a 3D plot to the k-means clustering algorithm * Update k_means_clust.py * Update k_means_clust.py --------- Co-authored-by: Maxim Smolskiy --- machine_learning/k_means_clust.py | 22 +++++++++++++++++++++- 1 file changed, 21 insertions(+), 1 deletion(-) diff --git a/machine_learning/k_means_clust.py b/machine_learning/k_means_clust.py index a926362fc18b..a55153628f9c 100644 --- a/machine_learning/k_means_clust.py +++ b/machine_learning/k_means_clust.py @@ -37,7 +37,13 @@ heterogeneity, k ) - 5. Transfers Dataframe into excel format it must have feature called + 5. Plot the labeled 3D data points with centroids. + plot_kmeans( + X, + centroids, + cluster_assignment + ) + 6. Transfers Dataframe into excel format it must have feature called 'Clust' with k means clustering numbers in it. """ @@ -126,6 +132,19 @@ def plot_heterogeneity(heterogeneity, k): plt.show() +def plot_kmeans(data, centroids, cluster_assignment): + ax = plt.axes(projection="3d") + ax.scatter(data[:, 0], data[:, 1], data[:, 2], c=cluster_assignment, cmap="viridis") + ax.scatter( + centroids[:, 0], centroids[:, 1], centroids[:, 2], c="red", s=100, marker="x" + ) + ax.set_xlabel("X") + ax.set_ylabel("Y") + ax.set_zlabel("Z") + ax.set_title("3D K-Means Clustering Visualization") + plt.show() + + def kmeans( data, k, initial_centroids, maxiter=500, record_heterogeneity=None, verbose=False ): @@ -193,6 +212,7 @@ def kmeans( verbose=True, ) plot_heterogeneity(heterogeneity, k) + plot_kmeans(dataset["data"], centroids, cluster_assignment) def report_generator( From 9d52683ecbca6b8dcfa90a2e286d39b66ce5ffef Mon Sep 17 00:00:00 2001 From: ANANT JAIN <139585700+anant-jain01@users.noreply.github.com> Date: Sun, 31 Aug 2025 03:47:55 +0530 Subject: [PATCH 34/66] Create stalin_sort.py (#11989) * Create stalin_sort.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update stalin_sort.py * updating DIRECTORY.md * Update stalin_sort.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy Co-authored-by: MaximSmolskiy --- DIRECTORY.md | 1 + sorts/stalin_sort.py | 47 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 sorts/stalin_sort.py diff --git a/DIRECTORY.md b/DIRECTORY.md index 2df9d56e99b7..36acb3b97f1e 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -1301,6 +1301,7 @@ * [Shell Sort](sorts/shell_sort.py) * [Shrink Shell Sort](sorts/shrink_shell_sort.py) * [Slowsort](sorts/slowsort.py) + * [Stalin Sort](sorts/stalin_sort.py) * [Stooge Sort](sorts/stooge_sort.py) * [Strand Sort](sorts/strand_sort.py) * [Tim Sort](sorts/tim_sort.py) diff --git a/sorts/stalin_sort.py b/sorts/stalin_sort.py new file mode 100644 index 000000000000..6dd5708c7f01 --- /dev/null +++ b/sorts/stalin_sort.py @@ -0,0 +1,47 @@ +""" +Stalin Sort algorithm: Removes elements that are out of order. +Elements that are not greater than or equal to the previous element are discarded. +Reference: https://medium.com/@kaweendra/the-ultimate-sorting-algorithm-6513d6968420 +""" + + +def stalin_sort(sequence: list[int]) -> list[int]: + """ + Sorts a list using the Stalin sort algorithm. + + >>> stalin_sort([4, 3, 5, 2, 1, 7]) + [4, 5, 7] + + >>> stalin_sort([1, 2, 3, 4]) + [1, 2, 3, 4] + + >>> stalin_sort([4, 5, 5, 2, 3]) + [4, 5, 5] + + >>> stalin_sort([6, 11, 12, 4, 1, 5]) + [6, 11, 12] + + >>> stalin_sort([5, 0, 4, 3]) + [5] + + >>> stalin_sort([5, 4, 3, 2, 1]) + [5] + + >>> stalin_sort([1, 2, 3, 4, 5]) + [1, 2, 3, 4, 5] + + >>> stalin_sort([1, 2, 8, 7, 6]) + [1, 2, 8] + """ + result = [sequence[0]] + for element in sequence[1:]: + if element >= result[-1]: + result.append(element) + + return result + + +if __name__ == "__main__": + import doctest + + doctest.testmod() From 0876a87186516f93c8d53ee69a600630905984a9 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 Sep 2025 20:33:56 +0200 Subject: [PATCH 35/66] [pre-commit.ci] pre-commit autoupdate (#12944) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.12.10 → v0.12.11](https://github.com/astral-sh/ruff-pre-commit/compare/v0.12.10...v0.12.11) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 6eb0906fb23a..f60913a743ad 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.10 + rev: v0.12.11 hooks: - id: ruff-check - id: ruff-format From 544f48ff34492eba4cf452fca3eba7bdff1cda5c Mon Sep 17 00:00:00 2001 From: hema_ameh <152301559+PYDIMARRI-HEMA-HARSHINI-23-586@users.noreply.github.com> Date: Thu, 4 Sep 2025 06:19:59 +0530 Subject: [PATCH 36/66] Fix is_palindrome_recursive logic in strings/palindrome.py (#12946) * Fix: is_palindrome_recursive logic for 2-char strings * Update palindrome.py * Update palindrome.py * Update palindrome.py --------- Co-authored-by: Maxim Smolskiy --- strings/palindrome.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/strings/palindrome.py b/strings/palindrome.py index bfdb3ddcf396..e765207e5942 100644 --- a/strings/palindrome.py +++ b/strings/palindrome.py @@ -11,6 +11,8 @@ "BB": True, "ABC": False, "amanaplanacanalpanama": True, # "a man a plan a canal panama" + "abcdba": False, + "AB": False, } # Ensure our test data is valid assert all((key == key[::-1]) is value for key, value in test_data.items()) @@ -61,7 +63,7 @@ def is_palindrome_recursive(s: str) -> bool: >>> all(is_palindrome_recursive(key) is value for key, value in test_data.items()) True """ - if len(s) <= 2: + if len(s) <= 1: return True if s[0] == s[len(s) - 1]: return is_palindrome_recursive(s[1:-1]) From 4ce1185f9e92ecea6805c8ee77404a56d8f70ea9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 5 Sep 2025 08:23:36 +0200 Subject: [PATCH 37/66] Bump actions/setup-python from 5 to 6 (#12952) Bumps [actions/setup-python](https://github.com/actions/setup-python) from 5 to 6. - [Release notes](https://github.com/actions/setup-python/releases) - [Commits](https://github.com/actions/setup-python/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/setup-python dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/directory_writer.yml | 2 +- .github/workflows/project_euler.yml | 4 ++-- .github/workflows/sphinx.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 69192db0c4c6..731e3fad3b85 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -14,7 +14,7 @@ jobs: with: enable-cache: true cache-dependency-glob: uv.lock - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.x allow-prereleases: true diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index f5167f8d1a58..9a4682677c00 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -9,7 +9,7 @@ jobs: - uses: actions/checkout@v5 with: fetch-depth: 0 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.x - name: Write DIRECTORY.md diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 8b8cb2a1e68f..f52ff280b29a 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -16,7 +16,7 @@ jobs: steps: - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.x - run: uv sync --group=euler-validate --group=test @@ -26,7 +26,7 @@ jobs: steps: - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.x - run: uv sync --group=euler-validate --group=test diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index e28fa04f3ab4..bd253dc3de65 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -27,7 +27,7 @@ jobs: steps: - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - - uses: actions/setup-python@v5 + - uses: actions/setup-python@v6 with: python-version: 3.13 allow-prereleases: true From 8f1a6b0ca0cbe563ea3f48c43695a07959b923a1 Mon Sep 17 00:00:00 2001 From: Juan Dupierris Date: Mon, 8 Sep 2025 01:41:07 +0200 Subject: [PATCH 38/66] Adding the function is_proth_number (#12399) * Adding the function isProthNumber(n : int) which returns true if n is a Proth number * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixing the issues of the isprothnumber function * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * New fixes on isprothnumber() * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fixes on isprothnumber() * Fixes on isprothnumber * Fixes on isprothnumber() * Fixes on isprothnumber * Update proth_number.py * Update proth_number.py * Update proth_number.py * Update proth_number.py * Update proth_number.py * Update proth_number.py * Update proth_number.py * Update proth_number.py --------- Co-authored-by: Juanitoupipou Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- maths/special_numbers/proth_number.py | 50 +++++++++++++++++++++++++++ 1 file changed, 50 insertions(+) diff --git a/maths/special_numbers/proth_number.py b/maths/special_numbers/proth_number.py index 47747ed260f7..b9b827b6a5a2 100644 --- a/maths/special_numbers/proth_number.py +++ b/maths/special_numbers/proth_number.py @@ -59,6 +59,50 @@ def proth(number: int) -> int: return proth_list[number - 1] +def is_proth_number(number: int) -> bool: + """ + :param number: positive integer number + :return: true if number is a Proth number, false otherwise + >>> is_proth_number(1) + False + >>> is_proth_number(2) + False + >>> is_proth_number(3) + True + >>> is_proth_number(4) + False + >>> is_proth_number(5) + True + >>> is_proth_number(34) + False + >>> is_proth_number(-1) + Traceback (most recent call last): + ... + ValueError: Input value of [number=-1] must be > 0 + >>> is_proth_number(6.0) + Traceback (most recent call last): + ... + TypeError: Input value of [number=6.0] must be an integer + """ + if not isinstance(number, int): + message = f"Input value of [{number=}] must be an integer" + raise TypeError(message) + + if number <= 0: + message = f"Input value of [{number=}] must be > 0" + raise ValueError(message) + + if number == 1: + return False + + number -= 1 + n = 0 + while number % 2 == 0: + n += 1 + number //= 2 + return number < 2**n + + if __name__ == "__main__": import doctest @@ -73,3 +117,9 @@ def proth(number: int) -> int: continue print(f"The {number}th Proth number: {value}") + + for number in [1, 2, 3, 4, 5, 9, 13, 49, 57, 193, 241, 163, 201]: + if is_proth_number(number): + print(f"{number} is a Proth number") + else: + print(f"{number} is not a Proth number") From 18c853d301eb03ca2ba829250c3d415485e49d8b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 8 Sep 2025 20:46:04 +0200 Subject: [PATCH 39/66] [pre-commit.ci] pre-commit autoupdate (#12961) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.12.11 → v0.12.12](https://github.com/astral-sh/ruff-pre-commit/compare/v0.12.11...v0.12.12) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f60913a743ad..c30442a2a6f6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.11 + rev: v0.12.12 hooks: - id: ruff-check - id: ruff-format From 63180d7e243a95ce28c2b52abcd7c81c452f623f Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Sat, 13 Sep 2025 00:56:14 +0200 Subject: [PATCH 40/66] pre-commit autoupdate 2025-09-11 (#12963) * pre-commit autoupdate 2025-09-11 * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- data_structures/arrays/sudoku_solver.py | 2 +- data_structures/trie/radix_tree.py | 4 ++-- graphs/graph_adjacency_list.py | 10 +++++----- graphs/graph_adjacency_matrix.py | 10 +++++----- knapsack/tests/test_greedy_knapsack.py | 12 +++++++----- linear_algebra/gaussian_elimination.py | 2 +- linear_algebra/jacobi_iteration_method.py | 4 ++-- machine_learning/polynomial_regression.py | 2 +- machine_learning/principle_component_analysis.py | 2 +- maths/chinese_remainder_theorem.py | 2 +- maths/modular_division.py | 4 ++-- neural_network/convolution_neural_network.py | 4 ++-- project_euler/problem_551/sol1.py | 2 +- pyproject.toml | 1 - scheduling/multi_level_feedback_queue.py | 2 +- 16 files changed, 33 insertions(+), 32 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c30442a2a6f6..4af51c08d8a4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.12.12 + rev: v0.13.0 hooks: - id: ruff-check - id: ruff-format diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 4c722f12fd6e..07269e2a69cc 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -149,7 +149,7 @@ def search(values): if all(len(values[s]) == 1 for s in squares): return values ## Solved! ## Chose the unfilled square s with the fewest possibilities - n, s = min((len(values[s]), s) for s in squares if len(values[s]) > 1) + _n, s = min((len(values[s]), s) for s in squares if len(values[s]) > 1) return some(search(assign(values.copy(), s, d)) for d in values[s]) diff --git a/data_structures/trie/radix_tree.py b/data_structures/trie/radix_tree.py index caf566a6ce30..bd2306befa79 100644 --- a/data_structures/trie/radix_tree.py +++ b/data_structures/trie/radix_tree.py @@ -115,7 +115,7 @@ def find(self, word: str) -> bool: if not incoming_node: return False else: - matching_string, remaining_prefix, remaining_word = incoming_node.match( + _matching_string, remaining_prefix, remaining_word = incoming_node.match( word ) # If there is remaining prefix, the word can't be on the tree @@ -144,7 +144,7 @@ def delete(self, word: str) -> bool: if not incoming_node: return False else: - matching_string, remaining_prefix, remaining_word = incoming_node.match( + _matching_string, remaining_prefix, remaining_word = incoming_node.match( word ) # If there is remaining prefix, the word can't be on the tree diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py index 244e59e0e1bf..c901e2cf3dac 100644 --- a/graphs/graph_adjacency_list.py +++ b/graphs/graph_adjacency_list.py @@ -448,7 +448,7 @@ def test_remove_edge(self) -> None: ( undirected_graph, directed_graph, - random_vertices, + _random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) @@ -502,7 +502,7 @@ def test_add_vertex_exception_check(self) -> None: undirected_graph, directed_graph, random_vertices, - random_edges, + _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: @@ -516,7 +516,7 @@ def test_remove_vertex_exception_check(self) -> None: undirected_graph, directed_graph, random_vertices, - random_edges, + _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for i in range(101): @@ -530,7 +530,7 @@ def test_add_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, - random_vertices, + _random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) @@ -569,7 +569,7 @@ def test_contains_edge_exception_check(self) -> None: undirected_graph, directed_graph, random_vertices, - random_edges, + _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: diff --git a/graphs/graph_adjacency_matrix.py b/graphs/graph_adjacency_matrix.py index 8eeeae786513..6dca0fbbcf05 100644 --- a/graphs/graph_adjacency_matrix.py +++ b/graphs/graph_adjacency_matrix.py @@ -469,7 +469,7 @@ def test_remove_edge(self) -> None: ( undirected_graph, directed_graph, - random_vertices, + _random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) @@ -523,7 +523,7 @@ def test_add_vertex_exception_check(self) -> None: undirected_graph, directed_graph, random_vertices, - random_edges, + _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: @@ -537,7 +537,7 @@ def test_remove_vertex_exception_check(self) -> None: undirected_graph, directed_graph, random_vertices, - random_edges, + _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for i in range(101): @@ -551,7 +551,7 @@ def test_add_edge_exception_check(self) -> None: ( undirected_graph, directed_graph, - random_vertices, + _random_vertices, random_edges, ) = self.__generate_graphs(20, 0, 100, 4) @@ -590,7 +590,7 @@ def test_contains_edge_exception_check(self) -> None: undirected_graph, directed_graph, random_vertices, - random_edges, + _random_edges, ) = self.__generate_graphs(20, 0, 100, 4) for vertex in random_vertices: diff --git a/knapsack/tests/test_greedy_knapsack.py b/knapsack/tests/test_greedy_knapsack.py index e6a40084109e..7ebaddd3c99e 100644 --- a/knapsack/tests/test_greedy_knapsack.py +++ b/knapsack/tests/test_greedy_knapsack.py @@ -28,7 +28,7 @@ def test_negative_max_weight(self): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = -15 - pytest.raises(ValueError, match="max_weight must greater than zero.") + pytest.raises(ValueError, match=r"max_weight must greater than zero.") def test_negative_profit_value(self): """ @@ -38,7 +38,7 @@ def test_negative_profit_value(self): # profit = [10, -20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = 15 - pytest.raises(ValueError, match="Weight can not be negative.") + pytest.raises(ValueError, match=r"Weight can not be negative.") def test_negative_weight_value(self): """ @@ -48,7 +48,7 @@ def test_negative_weight_value(self): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, -4, 6, -8, 10, 12] # max_weight = 15 - pytest.raises(ValueError, match="Profit can not be negative.") + pytest.raises(ValueError, match=r"Profit can not be negative.") def test_null_max_weight(self): """ @@ -58,7 +58,7 @@ def test_null_max_weight(self): # profit = [10, 20, 30, 40, 50, 60] # weight = [2, 4, 6, 8, 10, 12] # max_weight = null - pytest.raises(ValueError, match="max_weight must greater than zero.") + pytest.raises(ValueError, match=r"max_weight must greater than zero.") def test_unequal_list_length(self): """ @@ -68,7 +68,9 @@ def test_unequal_list_length(self): # profit = [10, 20, 30, 40, 50] # weight = [2, 4, 6, 8, 10, 12] # max_weight = 100 - pytest.raises(IndexError, match="The length of profit and weight must be same.") + pytest.raises( + IndexError, match=r"The length of profit and weight must be same." + ) if __name__ == "__main__": diff --git a/linear_algebra/gaussian_elimination.py b/linear_algebra/gaussian_elimination.py index 6f4075b710fd..cf816940b0d1 100644 --- a/linear_algebra/gaussian_elimination.py +++ b/linear_algebra/gaussian_elimination.py @@ -33,7 +33,7 @@ def retroactive_resolution( [ 0.5]]) """ - rows, columns = np.shape(coefficients) + rows, _columns = np.shape(coefficients) x: NDArray[float64] = np.zeros((rows, 1), dtype=float) for row in reversed(range(rows)): diff --git a/linear_algebra/jacobi_iteration_method.py b/linear_algebra/jacobi_iteration_method.py index 2cc9c103018b..0f9fcde7af6c 100644 --- a/linear_algebra/jacobi_iteration_method.py +++ b/linear_algebra/jacobi_iteration_method.py @@ -112,7 +112,7 @@ def jacobi_iteration_method( (coefficient_matrix, constant_matrix), axis=1 ) - rows, cols = table.shape + rows, _cols = table.shape strictly_diagonally_dominant(table) @@ -149,7 +149,7 @@ def jacobi_iteration_method( # Here we get 'i_col' - these are the column numbers, for each row # without diagonal elements, except for the last column. - i_row, i_col = np.where(masks) + _i_row, i_col = np.where(masks) ind = i_col.reshape(-1, rows - 1) #'i_col' is converted to a two-dimensional list 'ind', which will be diff --git a/machine_learning/polynomial_regression.py b/machine_learning/polynomial_regression.py index 212f40bea197..f52177df1292 100644 --- a/machine_learning/polynomial_regression.py +++ b/machine_learning/polynomial_regression.py @@ -93,7 +93,7 @@ def _design_matrix(data: np.ndarray, degree: int) -> np.ndarray: ... ValueError: Data must have dimensions N x 1 """ - rows, *remaining = data.shape + _rows, *remaining = data.shape if remaining: raise ValueError("Data must have dimensions N x 1") diff --git a/machine_learning/principle_component_analysis.py b/machine_learning/principle_component_analysis.py index 46ccdb968494..174500d89620 100644 --- a/machine_learning/principle_component_analysis.py +++ b/machine_learning/principle_component_analysis.py @@ -65,7 +65,7 @@ def main() -> None: """ Driver function to execute PCA and display results. """ - data_x, data_y = collect_dataset() + data_x, _data_y = collect_dataset() # Number of principal components to retain n_components = 2 diff --git a/maths/chinese_remainder_theorem.py b/maths/chinese_remainder_theorem.py index 18af63d106e8..b7a7712ae917 100644 --- a/maths/chinese_remainder_theorem.py +++ b/maths/chinese_remainder_theorem.py @@ -65,7 +65,7 @@ def invert_modulo(a: int, n: int) -> int: 1 """ - (b, x) = extended_euclid(a, n) + (b, _x) = extended_euclid(a, n) if b < 0: b = (b % n + n) % n return b diff --git a/maths/modular_division.py b/maths/modular_division.py index 2f8f4479b27d..94f12b3e096e 100644 --- a/maths/modular_division.py +++ b/maths/modular_division.py @@ -31,7 +31,7 @@ def modular_division(a: int, b: int, n: int) -> int: assert n > 1 assert a > 0 assert greatest_common_divisor(a, n) == 1 - (d, t, s) = extended_gcd(n, a) # Implemented below + (_d, _t, s) = extended_gcd(n, a) # Implemented below x = (b * s) % n return x @@ -47,7 +47,7 @@ def invert_modulo(a: int, n: int) -> int: 1 """ - (b, x) = extended_euclid(a, n) # Implemented below + (b, _x) = extended_euclid(a, n) # Implemented below if b < 0: b = (b % n + n) % n return b diff --git a/neural_network/convolution_neural_network.py b/neural_network/convolution_neural_network.py index d4ac360a98de..6b1aa50c7981 100644 --- a/neural_network/convolution_neural_network.py +++ b/neural_network/convolution_neural_network.py @@ -317,7 +317,7 @@ def predict(self, datas_test): print((" - - Shape: Test_Data ", np.shape(datas_test))) for p in range(len(datas_test)): data_test = np.asmatrix(datas_test[p]) - data_focus1, data_conved1 = self.convolute( + _data_focus1, data_conved1 = self.convolute( data_test, self.conv1, self.w_conv1, @@ -339,7 +339,7 @@ def predict(self, datas_test): def convolution(self, data): # return the data of image after convoluting process so we can check it out data_test = np.asmatrix(data) - data_focus1, data_conved1 = self.convolute( + _data_focus1, data_conved1 = self.convolute( data_test, self.conv1, self.w_conv1, diff --git a/project_euler/problem_551/sol1.py b/project_euler/problem_551/sol1.py index 100e9d41dd31..e13cf77a776d 100644 --- a/project_euler/problem_551/sol1.py +++ b/project_euler/problem_551/sol1.py @@ -185,7 +185,7 @@ def solution(n: int = 10**15) -> int: i = 1 dn = 0 while True: - diff, terms_jumped = next_term(digits, 20, i + dn, n) + _diff, terms_jumped = next_term(digits, 20, i + dn, n) dn += terms_jumped if dn == n - i: break diff --git a/pyproject.toml b/pyproject.toml index b680cc0d439e..71eb730f1329 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -124,7 +124,6 @@ lint.ignore = [ "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SIM905", # Consider using a list literal instead of `str.split` -- DO NOT FIX "SLF001", # Private member accessed: `_Iterator` -- FIX ME - "UP038", # Use `X | Y` in `{}` call instead of `(X, Y)` -- DO NOT FIX ] lint.per-file-ignores."data_structures/hashing/tests/test_hash_map.py" = [ diff --git a/scheduling/multi_level_feedback_queue.py b/scheduling/multi_level_feedback_queue.py index abee3c85c5a5..58ba2afa0e67 100644 --- a/scheduling/multi_level_feedback_queue.py +++ b/scheduling/multi_level_feedback_queue.py @@ -255,7 +255,7 @@ def multi_level_feedback_queue(self) -> deque[Process]: # all queues except last one have round_robin algorithm for i in range(self.number_of_queues - 1): - finished, self.ready_queue = self.round_robin( + _finished, self.ready_queue = self.round_robin( self.ready_queue, self.time_slices[i] ) # the last queue has first_come_first_served algorithm From 0ee534edde76462eeace007afdba3c6a4d43dbf0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 15 Sep 2025 20:17:29 +0200 Subject: [PATCH 41/66] [pre-commit.ci] pre-commit autoupdate (#12969) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/mirrors-mypy: v1.17.1 → v1.18.1](https://github.com/pre-commit/mirrors-mypy/compare/v1.17.1...v1.18.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4af51c08d8a4..9fbeb9a08682 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.17.1 + rev: v1.18.1 hooks: - id: mypy args: From 4ec71a303b4794d5c702c50e1c64b9175ed72b71 Mon Sep 17 00:00:00 2001 From: lighting9999 Date: Thu, 18 Sep 2025 21:55:59 +0800 Subject: [PATCH 42/66] fix covid_stats_via_xpath.py (#12975) * fix covid_stats_via_xpath.py Improve error handling. * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix covid_stats_via_xpath.py typo * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix ruff * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * upgrade covid_stats_via_xpath.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update and fix covid_stats_via_xpath.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- web_programming/covid_stats_via_xpath.py | 44 +++++++++++++++++------- 1 file changed, 32 insertions(+), 12 deletions(-) diff --git a/web_programming/covid_stats_via_xpath.py b/web_programming/covid_stats_via_xpath.py index 9c016ba414ea..88a248610441 100644 --- a/web_programming/covid_stats_via_xpath.py +++ b/web_programming/covid_stats_via_xpath.py @@ -1,7 +1,8 @@ """ -This is to show simple COVID19 info fetching from worldometers archive site using lxml -* The main motivation to use lxml in place of bs4 is that it is faster and therefore -more convenient to use in Python web projects (e.g. Django or Flask-based) +This script demonstrates fetching simple COVID-19 statistics from the +Worldometers archive site using lxml. lxml is chosen over BeautifulSoup +for its speed and convenience in Python web projects (such as Django or +Flask). """ # /// script @@ -25,15 +26,34 @@ class CovidData(NamedTuple): def covid_stats( - url: str = "https://web.archive.org/web/20250825095350/https://www.worldometers.info/coronavirus/", + url: str = ( + "https://web.archive.org/web/20250825095350/" + "https://www.worldometers.info/coronavirus/" + ), ) -> CovidData: xpath_str = '//div[@class = "maincounter-number"]/span/text()' - return CovidData( - *html.fromstring(httpx.get(url, timeout=10).content).xpath(xpath_str) + try: + response = httpx.get(url, timeout=10).raise_for_status() + except httpx.TimeoutException: + print( + "Request timed out. Please check your network connection " + "or try again later." + ) + return CovidData("N/A", "N/A", "N/A") + except httpx.HTTPStatusError as e: + print(f"HTTP error occurred: {e}") + return CovidData("N/A", "N/A", "N/A") + data = html.fromstring(response.content).xpath(xpath_str) + if len(data) != 3: + print("Unexpected data format. The page structure may have changed.") + data = "N/A", "N/A", "N/A" + return CovidData(*data) + + +if __name__ == "__main__": + fmt = ( + "Total COVID-19 cases in the world: {}\n" + "Total deaths due to COVID-19 in the world: {}\n" + "Total COVID-19 patients recovered in the world: {}" ) - - -fmt = """Total COVID-19 cases in the world: {} -Total deaths due to COVID-19 in the world: {} -Total COVID-19 patients recovered in the world: {}""" -print(fmt.format(*covid_stats())) + print(fmt.format(*covid_stats())) From e696e4dc9007b56d9e14d40d203c36ef861ab1b2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 23 Sep 2025 02:18:01 +0200 Subject: [PATCH 43/66] [pre-commit.ci] pre-commit autoupdate (#12988) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.13.0 → v0.13.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.13.0...v0.13.1) - [github.com/pre-commit/mirrors-mypy: v1.18.1 → v1.18.2](https://github.com/pre-commit/mirrors-mypy/compare/v1.18.1...v1.18.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9fbeb9a08682..2f7c43b06af2 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -16,7 +16,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.13.0 + rev: v0.13.1 hooks: - id: ruff-check - id: ruff-format @@ -47,7 +47,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.18.1 + rev: v1.18.2 hooks: - id: mypy args: From 8d1fb262dab5d3f04c87f5433e8e2a6b9547ece4 Mon Sep 17 00:00:00 2001 From: kathrynpete <166650430+kathrynpete@users.noreply.github.com> Date: Tue, 23 Sep 2025 02:51:49 -0400 Subject: [PATCH 44/66] Added edit_distance test cases (#12984) * Added edit_distance test cases * Update edit_distance.py --------- Co-authored-by: Maxim Smolskiy --- strings/edit_distance.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/strings/edit_distance.py b/strings/edit_distance.py index e842c8555c8e..77ed23037937 100644 --- a/strings/edit_distance.py +++ b/strings/edit_distance.py @@ -14,6 +14,20 @@ def edit_distance(source: str, target: str) -> int: >>> edit_distance("GATTIC", "GALTIC") 1 + >>> edit_distance("NUM3", "HUM2") + 2 + >>> edit_distance("cap", "CAP") + 3 + >>> edit_distance("Cat", "") + 3 + >>> edit_distance("cat", "cat") + 0 + >>> edit_distance("", "123456789") + 9 + >>> edit_distance("Be@uty", "Beautyyyy!") + 5 + >>> edit_distance("lstring", "lsstring") + 1 """ if len(source) == 0: return len(target) From c0ad5bbde403b8db3097745e500e3f086607ee8f Mon Sep 17 00:00:00 2001 From: Dylanskyep <149001171+Dylanskyep@users.noreply.github.com> Date: Wed, 24 Sep 2025 15:18:44 -0400 Subject: [PATCH 45/66] Add doctests for cross function. Contributes to #9943 (#12991) * Add doctests for cross function. Contributes to #9943 * Update sudoku_solver.py --------- Co-authored-by: Maxim Smolskiy --- data_structures/arrays/sudoku_solver.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/data_structures/arrays/sudoku_solver.py b/data_structures/arrays/sudoku_solver.py index 07269e2a69cc..d2fa43bbf298 100644 --- a/data_structures/arrays/sudoku_solver.py +++ b/data_structures/arrays/sudoku_solver.py @@ -11,6 +11,19 @@ def cross(items_a, items_b): """ Cross product of elements in A and elements in B. + + >>> cross('AB', '12') + ['A1', 'A2', 'B1', 'B2'] + >>> cross('ABC', '123') + ['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3'] + >>> cross('ABC', '1234') + ['A1', 'A2', 'A3', 'A4', 'B1', 'B2', 'B3', 'B4', 'C1', 'C2', 'C3', 'C4'] + >>> cross('', '12') + [] + >>> cross('A', '') + [] + >>> cross('', '') + [] """ return [a + b for a in items_a for b in items_b] From a71618f891da36ae6a8f9b58273f586ca6acf2a4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 29 Sep 2025 20:30:14 +0200 Subject: [PATCH 46/66] [pre-commit.ci] pre-commit autoupdate (#13006) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.13.1 → v0.13.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.13.1...v0.13.2) * ci: autoupdate_schedule: monthly --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .pre-commit-config.yaml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2f7c43b06af2..5c66d306b0e0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,3 +1,6 @@ +ci: + autoupdate_schedule: monthly + repos: - repo: https://github.com/pre-commit/pre-commit-hooks rev: v6.0.0 @@ -16,7 +19,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.13.1 + rev: v0.13.2 hooks: - id: ruff-check - id: ruff-format From 7530a417e872040d35338d5e43ec176320dab462 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 6 Oct 2025 20:49:00 +0200 Subject: [PATCH 47/66] [pre-commit.ci] pre-commit autoupdate (#13286) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.13.2 → v0.13.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.13.2...v0.13.3) - [github.com/tox-dev/pyproject-fmt: v2.6.0 → v2.7.0](https://github.com/tox-dev/pyproject-fmt/compare/v2.6.0...v2.7.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5c66d306b0e0..82a669007945 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,7 +19,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.13.2 + rev: v0.13.3 hooks: - id: ruff-check - id: ruff-format @@ -32,7 +32,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: v2.6.0 + rev: v2.7.0 hooks: - id: pyproject-fmt From 9372040da93cf7f77fc4ec2fd9ce5f2761b8800b Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 7 Oct 2025 18:23:37 +0200 Subject: [PATCH 48/66] Test on Python 3.14 (#12710) --- .github/workflows/build.yml | 9 ++++++++- .github/workflows/directory_writer.yml | 3 ++- .github/workflows/project_euler.yml | 20 ++++++++++++++++++-- .github/workflows/sphinx.yml | 9 ++++++++- ciphers/gronsfeld_cipher.py | 2 +- machine_learning/xgboost_classifier.py | 2 -- maths/largest_of_very_large_numbers.py | 2 +- pyproject.toml | 7 ++++--- 8 files changed, 42 insertions(+), 12 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 731e3fad3b85..666d45b13c1b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -9,6 +9,13 @@ jobs: build: runs-on: ubuntu-latest steps: + - run: + sudo apt-get update && sudo apt-get install -y libtiff5-dev libjpeg8-dev libopenjp2-7-dev + zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python3-tk + libharfbuzz-dev libfribidi-dev libxcb1-dev + libxml2-dev libxslt-dev + libhdf5-dev + libopenblas-dev - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 with: @@ -16,7 +23,7 @@ jobs: cache-dependency-glob: uv.lock - uses: actions/setup-python@v6 with: - python-version: 3.x + python-version: 3.14 allow-prereleases: true - run: uv sync --group=test - name: Run tests diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index 9a4682677c00..866440a37b31 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -11,7 +11,8 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v6 with: - python-version: 3.x + python-version: 3.14 + allow-prereleases: true - name: Write DIRECTORY.md run: | scripts/build_directory_md.py 2>&1 | tee DIRECTORY.md diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index f52ff280b29a..dbea5aeeea02 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -14,21 +14,37 @@ jobs: project-euler: runs-on: ubuntu-latest steps: + - run: + sudo apt-get update && sudo apt-get install -y libtiff5-dev libjpeg8-dev libopenjp2-7-dev + zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python3-tk + libharfbuzz-dev libfribidi-dev libxcb1-dev + libxml2-dev libxslt-dev + libhdf5-dev + libopenblas-dev - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v6 with: - python-version: 3.x + python-version: 3.14 + allow-prereleases: true - run: uv sync --group=euler-validate --group=test - run: uv run pytest --doctest-modules --cov-report=term-missing:skip-covered --cov=project_euler/ project_euler/ validate-solutions: runs-on: ubuntu-latest steps: + - run: + sudo apt-get update && sudo apt-get install -y libtiff5-dev libjpeg8-dev libopenjp2-7-dev + zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python3-tk + libharfbuzz-dev libfribidi-dev libxcb1-dev + libxml2-dev libxslt-dev + libhdf5-dev + libopenblas-dev - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v6 with: - python-version: 3.x + python-version: 3.14 + allow-prereleases: true - run: uv sync --group=euler-validate --group=test - run: uv run pytest scripts/validate_solutions.py env: diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index bd253dc3de65..c12ebb23ded3 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -25,11 +25,18 @@ jobs: build_docs: runs-on: ubuntu-24.04-arm steps: + - run: + sudo apt-get update && sudo apt-get install -y libtiff5-dev libjpeg8-dev libopenjp2-7-dev + zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python3-tk + libharfbuzz-dev libfribidi-dev libxcb1-dev + libxml2-dev libxslt-dev + libhdf5-dev + libopenblas-dev - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v6 - uses: actions/setup-python@v6 with: - python-version: 3.13 + python-version: 3.14 allow-prereleases: true - run: uv sync --group=docs - uses: actions/configure-pages@v5 diff --git a/ciphers/gronsfeld_cipher.py b/ciphers/gronsfeld_cipher.py index 8fbeab4307fc..a72b141bd502 100644 --- a/ciphers/gronsfeld_cipher.py +++ b/ciphers/gronsfeld_cipher.py @@ -20,7 +20,7 @@ def gronsfeld(text: str, key: str) -> str: >>> gronsfeld('yes, ¥€$ - _!@#%?', '') Traceback (most recent call last): ... - ZeroDivisionError: integer modulo by zero + ZeroDivisionError: division by zero """ ascii_len = len(ascii_uppercase) key_len = len(key) diff --git a/machine_learning/xgboost_classifier.py b/machine_learning/xgboost_classifier.py index 1da933cf690f..e845480074b9 100644 --- a/machine_learning/xgboost_classifier.py +++ b/machine_learning/xgboost_classifier.py @@ -42,8 +42,6 @@ def xgboost(features: np.ndarray, target: np.ndarray) -> XGBClassifier: def main() -> None: """ - >>> main() - Url for the algorithm: https://xgboost.readthedocs.io/en/stable/ Iris type dataset is used to demonstrate algorithm. diff --git a/maths/largest_of_very_large_numbers.py b/maths/largest_of_very_large_numbers.py index edee50371e02..e38ab2edb932 100644 --- a/maths/largest_of_very_large_numbers.py +++ b/maths/largest_of_very_large_numbers.py @@ -15,7 +15,7 @@ def res(x, y): >>> res(-1, 5) Traceback (most recent call last): ... - ValueError: math domain error + ValueError: expected a positive input """ if 0 not in (x, y): # We use the relation x^y = y*log10(x), where 10 is the base. diff --git a/pyproject.toml b/pyproject.toml index 71eb730f1329..7e64ad6f150b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -10,16 +10,17 @@ classifiers = [ ] dependencies = [ "beautifulsoup4>=4.12.3", + "cython>=3.1.2", "fake-useragent>=1.5.1", "httpx>=0.28.1", "imageio>=2.36.1", "keras>=3.7", - "lxml>=5.3", + "lxml>=6", "matplotlib>=3.9.3", "numpy>=2.1.3", "opencv-python>=4.10.0.84", "pandas>=2.2.3", - "pillow>=11", + "pillow>=11.3", "rich>=13.9.4", "scikit-learn>=1.5.2", "sphinx-pyproject>=0.3", @@ -32,7 +33,7 @@ dependencies = [ [dependency-groups] test = [ - "pytest>=8.3.4", + "pytest>=8.4.1", "pytest-cov>=6", ] From f0d5949e5a86134b6a6172ea102281a142e9f69f Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 8 Oct 2025 07:41:20 +0200 Subject: [PATCH 49/66] Bump astral-sh/setup-uv from 6 to 7 (#13335) Bumps [astral-sh/setup-uv](https://github.com/astral-sh/setup-uv) from 6 to 7. - [Release notes](https://github.com/astral-sh/setup-uv/releases) - [Commits](https://github.com/astral-sh/setup-uv/compare/v6...v7) --- updated-dependencies: - dependency-name: astral-sh/setup-uv dependency-version: '7' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/project_euler.yml | 4 ++-- .github/workflows/ruff.yml | 2 +- .github/workflows/sphinx.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 666d45b13c1b..43b3b3d9de1b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -17,7 +17,7 @@ jobs: libhdf5-dev libopenblas-dev - uses: actions/checkout@v5 - - uses: astral-sh/setup-uv@v6 + - uses: astral-sh/setup-uv@v7 with: enable-cache: true cache-dependency-glob: uv.lock diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index dbea5aeeea02..05adb43b0bd4 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -22,7 +22,7 @@ jobs: libhdf5-dev libopenblas-dev - uses: actions/checkout@v5 - - uses: astral-sh/setup-uv@v6 + - uses: astral-sh/setup-uv@v7 - uses: actions/setup-python@v6 with: python-version: 3.14 @@ -40,7 +40,7 @@ jobs: libhdf5-dev libopenblas-dev - uses: actions/checkout@v5 - - uses: astral-sh/setup-uv@v6 + - uses: astral-sh/setup-uv@v7 - uses: actions/setup-python@v6 with: python-version: 3.14 diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 7bcc2850782f..b17236ccbff9 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -12,5 +12,5 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v5 - - uses: astral-sh/setup-uv@v6 + - uses: astral-sh/setup-uv@v7 - run: uvx ruff check --output-format=github . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index c12ebb23ded3..b945869e84a7 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -33,7 +33,7 @@ jobs: libhdf5-dev libopenblas-dev - uses: actions/checkout@v5 - - uses: astral-sh/setup-uv@v6 + - uses: astral-sh/setup-uv@v7 - uses: actions/setup-python@v6 with: python-version: 3.14 From 788d95b4101389617ca9f7fd043998089f98df78 Mon Sep 17 00:00:00 2001 From: Anuska Roy Date: Wed, 8 Oct 2025 17:41:28 +0530 Subject: [PATCH 50/66] added rotate_array.py (#13336) * added rotate_array.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed issues * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fixed reverse issue * added doctests * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * changed k to steps for a descriptive name * fixed non-pep --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- data_structures/arrays/rotate_array.py | 80 ++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 data_structures/arrays/rotate_array.py diff --git a/data_structures/arrays/rotate_array.py b/data_structures/arrays/rotate_array.py new file mode 100644 index 000000000000..d5ce4b4078b3 --- /dev/null +++ b/data_structures/arrays/rotate_array.py @@ -0,0 +1,80 @@ +def rotate_array(arr: list[int], steps: int) -> list[int]: + """ + Rotates a list to the right by steps positions. + + Parameters: + arr (List[int]): The list of integers to rotate. + steps (int): Number of positions to rotate. Can be negative for left rotation. + + Returns: + List[int]: Rotated list. + + Examples: + >>> rotate_array([1, 2, 3, 4, 5], 2) + [4, 5, 1, 2, 3] + >>> rotate_array([1, 2, 3, 4, 5], -2) + [3, 4, 5, 1, 2] + >>> rotate_array([1, 2, 3, 4, 5], 7) + [4, 5, 1, 2, 3] + >>> rotate_array([], 3) + [] + """ + + n = len(arr) + if n == 0: + return arr + + steps = steps % n + + if steps < 0: + steps += n + + def reverse(start: int, end: int) -> None: + """ + Reverses a portion of the list in place from index start to end. + + Parameters: + start (int): Starting index of the portion to reverse. + end (int): Ending index of the portion to reverse. + + Returns: + None + + Examples: + >>> example = [1, 2, 3, 4, 5] + >>> def reverse_test(arr, start, end): + ... while start < end: + ... arr[start], arr[end] = arr[end], arr[start] + ... start += 1 + ... end -= 1 + >>> reverse_test(example, 0, 2) + >>> example + [3, 2, 1, 4, 5] + >>> reverse_test(example, 2, 4) + >>> example + [3, 2, 5, 4, 1] + """ + + while start < end: + arr[start], arr[end] = arr[end], arr[start] + start += 1 + end -= 1 + + reverse(0, n - 1) + reverse(0, steps - 1) + reverse(steps, n - 1) + + return arr + + +if __name__ == "__main__": + examples = [ + ([1, 2, 3, 4, 5], 2), + ([1, 2, 3, 4, 5], -2), + ([1, 2, 3, 4, 5], 7), + ([], 3), + ] + + for arr, steps in examples: + rotated = rotate_array(arr.copy(), steps) + print(f"Rotate {arr} by {steps}: {rotated}") From 1562ae1ec39bed716fc3f9da873844747dc0686c Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Tue, 14 Oct 2025 09:33:01 +0200 Subject: [PATCH 51/66] Add a README.md file to the scripts directory (#13480) * Add a README.md file to the scripts directory * updating DIRECTORY.md --------- Co-authored-by: cclauss --- DIRECTORY.md | 1 + scripts/README.md | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 scripts/README.md diff --git a/DIRECTORY.md b/DIRECTORY.md index 36acb3b97f1e..6249b75c4231 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -195,6 +195,7 @@ * [Permutations](data_structures/arrays/permutations.py) * [Prefix Sum](data_structures/arrays/prefix_sum.py) * [Product Sum](data_structures/arrays/product_sum.py) + * [Rotate Array](data_structures/arrays/rotate_array.py) * [Sparse Table](data_structures/arrays/sparse_table.py) * [Sudoku Solver](data_structures/arrays/sudoku_solver.py) * Binary Tree diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 000000000000..92ebf3a7e8ba --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,27 @@ +Dealing with the onslaught of Hacktoberfest +* https://hacktoberfest.com + +Each year, October brings a swarm of new contributors participating in Hacktoberfest. This event has its pros and cons, but it presents a monumental workload for the few active maintainers of this repo. The maintainer workload is further impacted by a new version of CPython being released in the first week of each October. + +To help make our algorithms more valuable to visitors, our CONTRIBUTING.md file outlines several strict requirements, such as tests, type hints, descriptive names, functions, and/or classes. Maintainers reviewing pull requests should try to encourage improvements to meet these goals, but when the workload becomes overwhelming (esp. in October), pull requests that do not meet these goals should be closed. + +Below are a few [`gh`](https://cli.github.com) scripts that should close pull requests that do not match the definition of an acceptable algorithm as defined in CONTRIBUTING.md. I tend to run these scripts in the following order. + +* close_pull_requests_with_require_descriptive_names.sh +* close_pull_requests_with_require_tests.sh +* close_pull_requests_with_require_type_hints.sh +* close_pull_requests_with_failing_tests.sh +* close_pull_requests_with_awaiting_changes.sh +* find_git_conflicts.sh + +### Run on 14 Oct 2025: 107 of 541 (19.77%) pull requests closed. + +Script run | Open pull requests | Pull requests closed +--- | --- | --- +None | 541 | 0 +require_descriptive_names | 515 | 26 +require_tests | 498 | 17 +require_type_hints | 496 | 2 +failing_tests | 438 | ___58___ +awaiting_changes | 434 | 4 +git_conflicts | [ broken ] | 0 From 709c18ee9f1a19659a2187bb3f022037328de09a Mon Sep 17 00:00:00 2001 From: Khansa435 Date: Tue, 14 Oct 2025 16:14:22 +0500 Subject: [PATCH 52/66] Add t stochastic neighbour embedding using Iris dataset (#13476) * Added t-SNE with Iris dataset example * Added t-SNE with Iris dataset example * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Updated with descriptive variables * Add descriptive variable names * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Add Descriptive Variable names * Adding Descriptive variable names * Update machine_learning/t_stochastic_neighbour_embedding.py Co-authored-by: Christian Clauss * Update machine_learning/t_stochastic_neighbour_embedding.py Co-authored-by: Christian Clauss * Improved line formatting * Adding URL for t-SNE Wikipedia * Apply suggestion from @cclauss --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Christian Clauss --- .../t_stochastic_neighbour_embedding.py | 178 ++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 machine_learning/t_stochastic_neighbour_embedding.py diff --git a/machine_learning/t_stochastic_neighbour_embedding.py b/machine_learning/t_stochastic_neighbour_embedding.py new file mode 100644 index 000000000000..d6f630149087 --- /dev/null +++ b/machine_learning/t_stochastic_neighbour_embedding.py @@ -0,0 +1,178 @@ +""" +t-distributed stochastic neighbor embedding (t-SNE) + +For more details, see: +https://en.wikipedia.org/wiki/T-distributed_stochastic_neighbor_embedding +""" + +import doctest + +import numpy as np +from numpy import ndarray +from sklearn.datasets import load_iris + + +def collect_dataset() -> tuple[ndarray, ndarray]: + """ + Load the Iris dataset and return features and labels. + + Returns: + tuple[ndarray, ndarray]: Feature matrix and target labels. + + >>> features, targets = collect_dataset() + >>> features.shape + (150, 4) + >>> targets.shape + (150,) + """ + iris_dataset = load_iris() + return np.array(iris_dataset.data), np.array(iris_dataset.target) + + +def compute_pairwise_affinities(data_matrix: ndarray, sigma: float = 1.0) -> ndarray: + """ + Compute high-dimensional affinities (P matrix) using a Gaussian kernel. + + Args: + data_matrix: Input data of shape (n_samples, n_features). + sigma: Gaussian kernel bandwidth. + + Returns: + ndarray: Symmetrized probability matrix. + + >>> x = np.array([[0.0, 0.0], [1.0, 0.0]]) + >>> probabilities = compute_pairwise_affinities(x) + >>> float(round(probabilities[0, 1], 3)) + 0.25 + """ + n_samples = data_matrix.shape[0] + squared_sum = np.sum(np.square(data_matrix), axis=1) + squared_distance = np.add( + np.add(-2 * np.dot(data_matrix, data_matrix.T), squared_sum).T, squared_sum + ) + + affinity_matrix = np.exp(-squared_distance / (2 * sigma**2)) + np.fill_diagonal(affinity_matrix, 0) + + affinity_matrix /= np.sum(affinity_matrix) + return (affinity_matrix + affinity_matrix.T) / (2 * n_samples) + + +def compute_low_dim_affinities(embedding_matrix: ndarray) -> tuple[ndarray, ndarray]: + """ + Compute low-dimensional affinities (Q matrix) using a Student-t distribution. + + Args: + embedding_matrix: Low-dimensional embedding of shape (n_samples, n_components). + + Returns: + tuple[ndarray, ndarray]: (Q probability matrix, numerator matrix). + + >>> y = np.array([[0.0, 0.0], [1.0, 0.0]]) + >>> q_matrix, numerators = compute_low_dim_affinities(y) + >>> q_matrix.shape + (2, 2) + """ + squared_sum = np.sum(np.square(embedding_matrix), axis=1) + numerator_matrix = 1 / ( + 1 + + np.add( + np.add(-2 * np.dot(embedding_matrix, embedding_matrix.T), squared_sum).T, + squared_sum, + ) + ) + np.fill_diagonal(numerator_matrix, 0) + + q_matrix = numerator_matrix / np.sum(numerator_matrix) + return q_matrix, numerator_matrix + + +def apply_tsne( + data_matrix: ndarray, + n_components: int = 2, + learning_rate: float = 200.0, + n_iter: int = 500, +) -> ndarray: + """ + Apply t-SNE for dimensionality reduction. + + Args: + data_matrix: Original dataset (features). + n_components: Target dimension (2D or 3D). + learning_rate: Step size for gradient descent. + n_iter: Number of iterations. + + Returns: + ndarray: Low-dimensional embedding of the data. + + >>> features, _ = collect_dataset() + >>> embedding = apply_tsne(features, n_components=2, n_iter=50) + >>> embedding.shape + (150, 2) + """ + if n_components < 1 or n_iter < 1: + raise ValueError("n_components and n_iter must be >= 1") + + n_samples = data_matrix.shape[0] + rng = np.random.default_rng() + embedding = rng.standard_normal((n_samples, n_components)) * 1e-4 + + high_dim_affinities = compute_pairwise_affinities(data_matrix) + high_dim_affinities = np.maximum(high_dim_affinities, 1e-12) + + embedding_increment = np.zeros_like(embedding) + momentum = 0.5 + + for iteration in range(n_iter): + low_dim_affinities, numerator_matrix = compute_low_dim_affinities(embedding) + low_dim_affinities = np.maximum(low_dim_affinities, 1e-12) + + affinity_diff = high_dim_affinities - low_dim_affinities + + gradient = 4 * ( + np.dot((affinity_diff * numerator_matrix), embedding) + - np.multiply( + np.sum(affinity_diff * numerator_matrix, axis=1)[:, np.newaxis], + embedding, + ) + ) + + embedding_increment = momentum * embedding_increment - learning_rate * gradient + embedding += embedding_increment + + if iteration == int(n_iter / 4): + momentum = 0.8 + + return embedding + + +def main() -> None: + """ + Run t-SNE on the Iris dataset and display the first 5 embeddings. + + >>> main() # doctest: +ELLIPSIS + t-SNE embedding (first 5 points): + [[... + """ + features, _labels = collect_dataset() + embedding = apply_tsne(features, n_components=2, n_iter=300) + + if not isinstance(embedding, np.ndarray): + raise TypeError("t-SNE embedding must be an ndarray") + + print("t-SNE embedding (first 5 points):") + print(embedding[:5]) + + # Optional visualization (Ruff/mypy compliant) + + # import matplotlib.pyplot as plt + # plt.scatter(embedding[:, 0], embedding[:, 1], c=labels, cmap="viridis") + # plt.title("t-SNE Visualization of the Iris Dataset") + # plt.xlabel("Dimension 1") + # plt.ylabel("Dimension 2") + # plt.show() + + +if __name__ == "__main__": + doctest.testmod() + main() From e731514bd5f6111c1859895c6b19fae0de551513 Mon Sep 17 00:00:00 2001 From: iddu <127777022+1drie5@users.noreply.github.com> Date: Wed, 15 Oct 2025 19:33:33 +0530 Subject: [PATCH 53/66] Fix typo and function call in maths module (#13515) --- maths/factorial.py | 2 +- maths/fibonacci.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/maths/factorial.py b/maths/factorial.py index aaf90f384bb9..ba61447c7564 100644 --- a/maths/factorial.py +++ b/maths/factorial.py @@ -56,7 +56,7 @@ def factorial_recursive(n: int) -> int: raise ValueError("factorial() only accepts integral values") if n < 0: raise ValueError("factorial() not defined for negative values") - return 1 if n in {0, 1} else n * factorial(n - 1) + return 1 if n in {0, 1} else n * factorial_recursive(n - 1) if __name__ == "__main__": diff --git a/maths/fibonacci.py b/maths/fibonacci.py index 24b2d7ae449e..71ff479f9cc2 100644 --- a/maths/fibonacci.py +++ b/maths/fibonacci.py @@ -183,7 +183,7 @@ def fib_memoization(n: int) -> list[int]: """ if n < 0: raise ValueError("n is negative") - # Cache must be outside recursuive function + # Cache must be outside recursive function # other it will reset every time it calls itself. cache: dict[int, int] = {0: 0, 1: 1, 2: 1} # Prefilled cache From 9902c23e14fd90f163ee95d557d374fc44793cd9 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Wed, 15 Oct 2025 19:06:25 +0200 Subject: [PATCH 54/66] Delete requirements.txt because dependencies are in pyproject.toml (#13486) --- .github/workflows/build.yml | 9 ++------- DIRECTORY.md | 1 + pyproject.toml | 1 + requirements.txt | 19 ------------------- 4 files changed, 4 insertions(+), 26 deletions(-) delete mode 100644 requirements.txt diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 43b3b3d9de1b..9a97424c56c4 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -9,13 +9,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - run: - sudo apt-get update && sudo apt-get install -y libtiff5-dev libjpeg8-dev libopenjp2-7-dev - zlib1g-dev libfreetype6-dev liblcms2-dev libwebp-dev tcl8.6-dev tk8.6-dev python3-tk - libharfbuzz-dev libfribidi-dev libxcb1-dev - libxml2-dev libxslt-dev - libhdf5-dev - libopenblas-dev + - run: sudo apt-get update && sudo apt-get install -y libhdf5-dev - uses: actions/checkout@v5 - uses: astral-sh/setup-uv@v7 with: @@ -32,6 +26,7 @@ jobs: --ignore=computer_vision/cnn_classification.py --ignore=docs/conf.py --ignore=dynamic_programming/k_means_clustering_tensorflow.py + --ignore=machine_learning/local_weighted_learning/local_weighted_learning.py --ignore=machine_learning/lstm/lstm_prediction.py --ignore=neural_network/input_data.py --ignore=project_euler/ diff --git a/DIRECTORY.md b/DIRECTORY.md index 6249b75c4231..0f9859577493 100644 --- a/DIRECTORY.md +++ b/DIRECTORY.md @@ -624,6 +624,7 @@ * [Sequential Minimum Optimization](machine_learning/sequential_minimum_optimization.py) * [Similarity Search](machine_learning/similarity_search.py) * [Support Vector Machines](machine_learning/support_vector_machines.py) + * [T Stochastic Neighbour Embedding](machine_learning/t_stochastic_neighbour_embedding.py) * [Word Frequency Functions](machine_learning/word_frequency_functions.py) * [Xgboost Classifier](machine_learning/xgboost_classifier.py) * [Xgboost Regressor](machine_learning/xgboost_regressor.py) diff --git a/pyproject.toml b/pyproject.toml index 7e64ad6f150b..537ba79bd5f1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -23,6 +23,7 @@ dependencies = [ "pillow>=11.3", "rich>=13.9.4", "scikit-learn>=1.5.2", + "scipy>=1.16.2", "sphinx-pyproject>=0.3", "statsmodels>=0.14.4", "sympy>=1.13.3", diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 66b5d8a6b94e..000000000000 --- a/requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -beautifulsoup4 -fake-useragent -httpx -imageio -keras -lxml -matplotlib -numpy -opencv-python -pandas -pillow -rich -scikit-learn -sphinx-pyproject -statsmodels -sympy -tweepy -typing_extensions -xgboost From 85e67302d8ad4e813b939e18cfd734824d8f966a Mon Sep 17 00:00:00 2001 From: Matt Ryan <44824894+mattryanmtl@users.noreply.github.com> Date: Wed, 15 Oct 2025 13:22:51 -0400 Subject: [PATCH 55/66] Test on 3.14 (#13473) Tested on 3.14. --- sorts/binary_insertion_sort.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sorts/binary_insertion_sort.py b/sorts/binary_insertion_sort.py index 50653a99e7ce..b928316a849d 100644 --- a/sorts/binary_insertion_sort.py +++ b/sorts/binary_insertion_sort.py @@ -56,7 +56,7 @@ def binary_insertion_sort(collection: list) -> list: return collection -if __name__ == "__main": +if __name__ == "__main__": user_input = input("Enter numbers separated by a comma:\n").strip() try: unsorted = [int(item) for item in user_input.split(",")] From 3cea94179d40bc80e94fc4191808de86eadf6642 Mon Sep 17 00:00:00 2001 From: Omkaar <79257339+Ombucha@users.noreply.github.com> Date: Wed, 15 Oct 2025 23:05:55 +0530 Subject: [PATCH 56/66] Fix a few typos (#13346) * Fix typo in spheres intersection print statement * Fix typo in CONTRIBUTING.md * Improve comments in comb_sort.py * pyproject.toml: tool.ruff.target-version = "py314" * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Fix comment formatting in lint.ignore section --------- Co-authored-by: Christian Clauss Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- CONTRIBUTING.md | 2 +- maths/volume.py | 2 +- pyproject.toml | 8 ++++---- sorts/comb_sort.py | 3 +-- 4 files changed, 7 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3df39f95b784..35de0bf75ed5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -99,7 +99,7 @@ We want your work to be readable by others; therefore, we encourage you to note ruff check ``` -- Original code submission require docstrings or comments to describe your work. +- Original code submissions require docstrings or comments to describe your work. - More on docstrings and comments: diff --git a/maths/volume.py b/maths/volume.py index 08bdf72b013b..1715c9c300d5 100644 --- a/maths/volume.py +++ b/maths/volume.py @@ -555,7 +555,7 @@ def main(): print(f"Torus: {vol_torus(2, 2) = }") # ~= 157.9 print(f"Conical Frustum: {vol_conical_frustum(2, 2, 4) = }") # ~= 58.6 print(f"Spherical cap: {vol_spherical_cap(1, 2) = }") # ~= 5.24 - print(f"Spheres intersetion: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21 + print(f"Spheres intersection: {vol_spheres_intersect(2, 2, 1) = }") # ~= 21.21 print(f"Spheres union: {vol_spheres_union(2, 2, 1) = }") # ~= 45.81 print( f"Hollow Circular Cylinder: {vol_hollow_circular_cylinder(1, 2, 3) = }" diff --git a/pyproject.toml b/pyproject.toml index 537ba79bd5f1..60ba0d3b65d9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -3,10 +3,9 @@ name = "thealgorithms-python" version = "0.0.1" description = "TheAlgorithms in Python" authors = [ { name = "TheAlgorithms Contributors" } ] -requires-python = ">=3.13" +requires-python = ">=3.14" classifiers = [ "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.13", ] dependencies = [ "beautifulsoup4>=4.12.3", @@ -49,7 +48,7 @@ euler-validate = [ ] [tool.ruff] -target-version = "py313" +target-version = "py314" output-format = "full" lint.select = [ @@ -110,7 +109,7 @@ lint.ignore = [ # `ruff rule S101` for a description of that rule "B904", # Within an `except` clause, raise exceptions with `raise ... from err` -- FIX ME "B905", # `zip()` without an explicit `strict=` parameter -- FIX ME - "EM101", # Exception must not use a string literal, assign to variable first + "EM101", # Exception must not use a string literal, assign to a variable first "EXE001", # Shebang is present but file is not executable -- DO NOT FIX "G004", # Logging statement uses f-string "ISC001", # Conflicts with ruff format -- DO NOT FIX @@ -126,6 +125,7 @@ lint.ignore = [ "S311", # Standard pseudo-random generators are not suitable for cryptographic purposes -- FIX ME "SIM905", # Consider using a list literal instead of `str.split` -- DO NOT FIX "SLF001", # Private member accessed: `_Iterator` -- FIX ME + "UP037", # FIX ME ] lint.per-file-ignores."data_structures/hashing/tests/test_hash_map.py" = [ diff --git a/sorts/comb_sort.py b/sorts/comb_sort.py index 3c8b1e99a454..94ad8f533328 100644 --- a/sorts/comb_sort.py +++ b/sorts/comb_sort.py @@ -5,8 +5,7 @@ Comb sort improves on bubble sort algorithm. In bubble sort, distance (or gap) between two compared elements is always one. Comb sort improvement is that gap can be much more than 1, in order to prevent slowing -down by small values -at the end of a list. +down by small values at the end of a list. More info on: https://en.wikipedia.org/wiki/Comb_sort From 8edc478a19dda14a63300fc86b3520cc037621f9 Mon Sep 17 00:00:00 2001 From: Gunish Mukherji Date: Fri, 17 Oct 2025 06:06:38 +0530 Subject: [PATCH 57/66] Adding missing return type to pi_estimator function (#13427) - Add -> None return type annotation to pi_estimator function - Improves code clarity and follows Python type hinting best practices - Function already had proper type hints for parameters Co-authored-by: Gunish Mukherji Co-authored-by: Maxim Smolskiy --- maths/monte_carlo.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maths/monte_carlo.py b/maths/monte_carlo.py index d174a0b188a2..5eb176238ffb 100644 --- a/maths/monte_carlo.py +++ b/maths/monte_carlo.py @@ -8,7 +8,7 @@ from statistics import mean -def pi_estimator(iterations: int): +def pi_estimator(iterations: int) -> None: """ An implementation of the Monte Carlo method used to find pi. 1. Draw a 2x2 square centred at (0,0). From c79034ca2114e56ede887a473c2853b8c6d49257 Mon Sep 17 00:00:00 2001 From: Harsh Pathak <156679457+HarshPathak310@users.noreply.github.com> Date: Fri, 17 Oct 2025 06:30:44 +0530 Subject: [PATCH 58/66] Update logical issue in decision_tree.py (#13303) Co-authored-by: Maxim Smolskiy --- machine_learning/decision_tree.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/machine_learning/decision_tree.py b/machine_learning/decision_tree.py index 72970431c3fc..b4df64796bb1 100644 --- a/machine_learning/decision_tree.py +++ b/machine_learning/decision_tree.py @@ -146,14 +146,13 @@ def predict(self, x): """ if self.prediction is not None: return self.prediction - elif self.left or self.right is not None: + elif self.left is not None and self.right is not None: if x >= self.decision_boundary: return self.right.predict(x) else: return self.left.predict(x) else: - print("Error: Decision tree not yet trained") - return None + raise ValueError("Decision tree not yet trained") class TestDecisionTree: @@ -201,4 +200,4 @@ def main(): main() import doctest - doctest.testmod(name="mean_squarred_error", verbose=True) + doctest.testmod(name="mean_squared_error", verbose=True) From 3b08413ab32bb5526c6043681db6e3ae9df4fd4a Mon Sep 17 00:00:00 2001 From: Tejasrahane <161036451+Tejasrahane@users.noreply.github.com> Date: Mon, 20 Oct 2025 02:21:11 +0530 Subject: [PATCH 59/66] Add doctest for circular queue overflow condition (#13590) * Add doctest for circular queue overflow condition Added a doctest to test the QUEUE IS FULL exception when attempting to enqueue an element into a full circular queue. This improves test coverage for line 67 in data_structures/queues/circular_queue.py. Fixes #9943 * Update circular_queue.py * Update circular_queue.py * Update circular_queue.py * Update circular_queue.py --------- Co-authored-by: Maxim Smolskiy --- data_structures/queues/circular_queue.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/data_structures/queues/circular_queue.py b/data_structures/queues/circular_queue.py index efbf1efdc42d..e9cb2cac4fd8 100644 --- a/data_structures/queues/circular_queue.py +++ b/data_structures/queues/circular_queue.py @@ -17,7 +17,7 @@ def __len__(self) -> int: >>> len(cq) 0 >>> cq.enqueue("A") # doctest: +ELLIPSIS - >>> cq.array ['A', None, None, None, None] >>> len(cq) @@ -51,17 +51,24 @@ def enqueue(self, data): """ This function inserts an element at the end of the queue using self.rear value as an index. + >>> cq = CircularQueue(5) >>> cq.enqueue("A") # doctest: +ELLIPSIS - >>> (cq.size, cq.first()) (1, 'A') >>> cq.enqueue("B") # doctest: +ELLIPSIS - >>> cq.array ['A', 'B', None, None, None] >>> (cq.size, cq.first()) (2, 'A') + >>> cq.enqueue("C").enqueue("D").enqueue("E") # doctest: +ELLIPSIS + + >>> cq.enqueue("F") + Traceback (most recent call last): + ... + Exception: QUEUE IS FULL """ if self.size >= self.n: raise Exception("QUEUE IS FULL") @@ -75,6 +82,7 @@ def dequeue(self): """ This function removes an element from the queue using on self.front value as an index and returns it + >>> cq = CircularQueue(5) >>> cq.dequeue() Traceback (most recent call last): From 154cd3e4002d22756cf192c76db0a9ac8a918867 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Neto?= Date: Sun, 19 Oct 2025 19:21:00 -0300 Subject: [PATCH 60/66] feat: optimizing the prune function at the apriori_algorithm.py archive (#12992) * feat: optimizing the prune function at the apriori_algorithm.py archive * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: fixing the unsorted importing statment * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * fix: fixing the key structure to a tuple that can be an hashable structure * Update apriori_algorithm.py * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update apriori_algorithm.py --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Maxim Smolskiy --- machine_learning/apriori_algorithm.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/machine_learning/apriori_algorithm.py b/machine_learning/apriori_algorithm.py index 09a89ac236bd..5c3e2baba2c2 100644 --- a/machine_learning/apriori_algorithm.py +++ b/machine_learning/apriori_algorithm.py @@ -11,6 +11,7 @@ Examples: https://www.kaggle.com/code/earthian/apriori-association-rules-mining """ +from collections import Counter from itertools import combinations @@ -44,11 +45,16 @@ def prune(itemset: list, candidates: list, length: int) -> list: >>> prune(itemset, candidates, 3) [] """ + itemset_counter = Counter(tuple(item) for item in itemset) pruned = [] for candidate in candidates: is_subsequence = True for item in candidate: - if item not in itemset or itemset.count(item) < length - 1: + item_tuple = tuple(item) + if ( + item_tuple not in itemset_counter + or itemset_counter[item_tuple] < length - 1 + ): is_subsequence = False break if is_subsequence: From 1b0bd167290bbdd5cb56972f1c6fb8d18698c839 Mon Sep 17 00:00:00 2001 From: michaelmccamy <149010657+michaelmccamy@users.noreply.github.com> Date: Sun, 19 Oct 2025 20:29:59 -0400 Subject: [PATCH 61/66] Add doctest for add_vertex in GraphAdjacencyList. Contributes to #9943 (#13143) * Add doctest for add_vertex in GraphAdjacencyList. Contributes to #9943 * Update graph_adjacency_list.py --------- Co-authored-by: Maxim Smolskiy --- graphs/graph_adjacency_list.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/graphs/graph_adjacency_list.py b/graphs/graph_adjacency_list.py index c901e2cf3dac..34014d69dfb8 100644 --- a/graphs/graph_adjacency_list.py +++ b/graphs/graph_adjacency_list.py @@ -61,6 +61,15 @@ def add_vertex(self, vertex: T) -> None: """ Adds a vertex to the graph. If the given vertex already exists, a ValueError will be thrown. + + >>> g = GraphAdjacencyList(vertices=[], edges=[], directed=False) + >>> g.add_vertex("A") + >>> g.adj_list + {'A': []} + >>> g.add_vertex("A") + Traceback (most recent call last): + ... + ValueError: Incorrect input: A is already in the graph. """ if self.contains_vertex(vertex): msg = f"Incorrect input: {vertex} is already in the graph." From e2a78d4e76adbce1f6b93f649820982165d6092d Mon Sep 17 00:00:00 2001 From: Md Mahiuddin <68785084+mahiuddin-dev@users.noreply.github.com> Date: Mon, 20 Oct 2025 06:59:36 +0600 Subject: [PATCH 62/66] Add test for non-integer input to factorial function (#13024) * Add test for non-integer input to factorial function * Update test_factorial.py --------- Co-authored-by: Maxim Smolskiy --- maths/test_factorial.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/maths/test_factorial.py b/maths/test_factorial.py index d80d88add745..1795ebba194f 100644 --- a/maths/test_factorial.py +++ b/maths/test_factorial.py @@ -33,5 +33,11 @@ def test_negative_number(function): function(-3) +@pytest.mark.parametrize("function", [factorial, factorial_recursive]) +def test_float_number(function): + with pytest.raises(ValueError): + function(1.5) + + if __name__ == "__main__": pytest.main(["-v", __file__]) From af17867f409ec80f4d0cc499943c455da4307cd4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Nov 2025 21:02:38 +0100 Subject: [PATCH 63/66] [pre-commit.ci] pre-commit autoupdate (#13860) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [pre-commit.ci] pre-commit autoupdate updates: - [github.com/astral-sh/ruff-pre-commit: v0.13.3 → v0.14.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.13.3...v0.14.3) - [github.com/tox-dev/pyproject-fmt: v2.7.0 → v2.11.0](https://github.com/tox-dev/pyproject-fmt/compare/v2.7.0...v2.11.0) * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- pyproject.toml | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 82a669007945..e305772298d3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,7 +19,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.13.3 + rev: v0.14.3 hooks: - id: ruff-check - id: ruff-format @@ -32,7 +32,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: v2.7.0 + rev: v2.11.0 hooks: - id: pyproject-fmt diff --git a/pyproject.toml b/pyproject.toml index 60ba0d3b65d9..f1559d6bc1b1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -6,6 +6,7 @@ authors = [ { name = "TheAlgorithms Contributors" } ] requires-python = ">=3.14" classifiers = [ "Programming Language :: Python :: 3 :: Only", + "Programming Language :: Python :: 3.14", ] dependencies = [ "beautifulsoup4>=4.12.3", From ae68a7800883021b4bb5f40ccfb3773a189a8d09 Mon Sep 17 00:00:00 2001 From: Christian Clauss Date: Thu, 6 Nov 2025 13:11:49 +0100 Subject: [PATCH 64/66] uv run --with=pytest-run-parallel --iterations=8 --parallel-threads=auto (#13863) https://github.com/Quansight-Labs/pytest-run-parallel * https://py-free-threading.github.io * https://www.python.org/downloads/release/python-3140/ --- .github/workflows/build.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9a97424c56c4..bbe4b782a00a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -22,7 +22,8 @@ jobs: - run: uv sync --group=test - name: Run tests # TODO: #8818 Re-enable quantum tests - run: uv run pytest + run: uv run --with=pytest-run-parallel pytest + --iterations=8 --parallel-threads=auto --ignore=computer_vision/cnn_classification.py --ignore=docs/conf.py --ignore=dynamic_programming/k_means_clustering_tensorflow.py From a051ab5b0957c9a42559cc4bad30463af5708771 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Sat, 22 Nov 2025 08:11:22 +0100 Subject: [PATCH 65/66] Bump actions/checkout from 5 to 6 (#13937) Bumps [actions/checkout](https://github.com/actions/checkout) from 5 to 6. - [Release notes](https://github.com/actions/checkout/releases) - [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md) - [Commits](https://github.com/actions/checkout/compare/v5...v6) --- updated-dependencies: - dependency-name: actions/checkout dependency-version: '6' dependency-type: direct:production update-type: version-update:semver-major ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/build.yml | 2 +- .github/workflows/devcontainer_ci.yml | 2 +- .github/workflows/directory_writer.yml | 2 +- .github/workflows/project_euler.yml | 4 ++-- .github/workflows/ruff.yml | 2 +- .github/workflows/sphinx.yml | 2 +- 6 files changed, 7 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bbe4b782a00a..2bb8e1d69217 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,7 @@ jobs: runs-on: ubuntu-latest steps: - run: sudo apt-get update && sudo apt-get install -y libhdf5-dev - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: astral-sh/setup-uv@v7 with: enable-cache: true diff --git a/.github/workflows/devcontainer_ci.yml b/.github/workflows/devcontainer_ci.yml index 71623e5e6e69..d1b81593866f 100644 --- a/.github/workflows/devcontainer_ci.yml +++ b/.github/workflows/devcontainer_ci.yml @@ -12,7 +12,7 @@ jobs: build: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: devcontainers/ci@v0.3 with: push: never diff --git a/.github/workflows/directory_writer.yml b/.github/workflows/directory_writer.yml index 866440a37b31..deffbe9e364f 100644 --- a/.github/workflows/directory_writer.yml +++ b/.github/workflows/directory_writer.yml @@ -6,7 +6,7 @@ jobs: directory_writer: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 with: fetch-depth: 0 - uses: actions/setup-python@v6 diff --git a/.github/workflows/project_euler.yml b/.github/workflows/project_euler.yml index 05adb43b0bd4..591b2163cc1a 100644 --- a/.github/workflows/project_euler.yml +++ b/.github/workflows/project_euler.yml @@ -21,7 +21,7 @@ jobs: libxml2-dev libxslt-dev libhdf5-dev libopenblas-dev - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: astral-sh/setup-uv@v7 - uses: actions/setup-python@v6 with: @@ -39,7 +39,7 @@ jobs: libxml2-dev libxslt-dev libhdf5-dev libopenblas-dev - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: astral-sh/setup-uv@v7 - uses: actions/setup-python@v6 with: diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index b17236ccbff9..13df19c8d743 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -11,6 +11,6 @@ jobs: ruff: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: astral-sh/setup-uv@v7 - run: uvx ruff check --output-format=github . diff --git a/.github/workflows/sphinx.yml b/.github/workflows/sphinx.yml index b945869e84a7..bf0a74a239c8 100644 --- a/.github/workflows/sphinx.yml +++ b/.github/workflows/sphinx.yml @@ -32,7 +32,7 @@ jobs: libxml2-dev libxslt-dev libhdf5-dev libopenblas-dev - - uses: actions/checkout@v5 + - uses: actions/checkout@v6 - uses: astral-sh/setup-uv@v7 - uses: actions/setup-python@v6 with: From 8934babb34b60d94c1a3b6a0409e2942d791c35a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 22:08:41 +0100 Subject: [PATCH 66/66] [pre-commit.ci] pre-commit autoupdate (#13979) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.14.3 → v0.14.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.14.3...v0.14.7) - [github.com/tox-dev/pyproject-fmt: v2.11.0 → v2.11.1](https://github.com/tox-dev/pyproject-fmt/compare/v2.11.0...v2.11.1) - [github.com/pre-commit/mirrors-mypy: v1.18.2 → v1.19.0](https://github.com/pre-commit/mirrors-mypy/compare/v1.18.2...v1.19.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e305772298d3..57f92ce941d9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -19,7 +19,7 @@ repos: - id: auto-walrus - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.14.3 + rev: v0.14.7 hooks: - id: ruff-check - id: ruff-format @@ -32,7 +32,7 @@ repos: - tomli - repo: https://github.com/tox-dev/pyproject-fmt - rev: v2.11.0 + rev: v2.11.1 hooks: - id: pyproject-fmt @@ -50,7 +50,7 @@ repos: - id: validate-pyproject - repo: https://github.com/pre-commit/mirrors-mypy - rev: v1.18.2 + rev: v1.19.0 hooks: - id: mypy args: