Skip to content

Commit

Permalink
don't run extras on 3.13
Browse files Browse the repository at this point in the history
  • Loading branch information
epinzur committed Feb 5, 2025
1 parent f05141d commit 4e2422e
Show file tree
Hide file tree
Showing 7 changed files with 83 additions and 49 deletions.
23 changes: 17 additions & 6 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,13 @@ jobs:
with:
python-version: ${{ matrix.python-version }}

- name: Install Python dependencies
- name: (non-3.12) Install Python dependencies (without extras)
if: ${{ success() && matrix.python-version != '3.12' }}
run: uv sync --frozen --all-packages
shell: bash

- name: (3.12 only) Install Python dependencies (including all extras)
if: ${{ success() && matrix.python-version == '3.12' }}
run: uv sync --frozen --all-packages --all-extras
shell: bash

Expand All @@ -78,21 +84,26 @@ jobs:
uv run coverage run -m pytest -vs packages/graph-retriever
--junitxml=junit/test-results-gr-${{ matrix.python-version }}.xml

- name: (non-3.12) Test langchain-graph-retriever (In-Memory Stores)
if: ${{ success() && matrix.python-version != '3.12' }}
run: uv run pytest -vs packages/langchain-graph-retriever
- name: (3.10, 3.11) Test langchain-graph-retriever (In-Memory Stores) with extras
if: ${{ success() && matrix.python-version == '3.10' || matrix.python-version == '3.11' }}
run: uv run pytest -vs --runextras packages/langchain-graph-retriever
--junitxml=junit/test-results-lgr-${{ matrix.python-version }}.xml

- name: (3.12 only) Test langchain-graph-retriever (All Stores)
- name: (3.12) Test langchain-graph-retriever (All Stores) with extras
if: ${{ success() && matrix.python-version == '3.12' }}
id: test
run: uv run coverage run -a -m pytest -vs packages/langchain-graph-retriever --stores=all
run: uv run coverage run -a -m pytest -vs --runextras packages/langchain-graph-retriever --stores=all
--junitxml=junit/test-results-lgr-${{ matrix.python-version }}.xml
env:
ASTRA_DB_APPLICATION_TOKEN: ${{ secrets.ASTRA_DB_APPLICATION_TOKEN }}
ASTRA_DB_API_ENDPOINT: ${{ secrets.ASTRA_DB_API_ENDPOINT }}
ASTRA_DB_KEYSPACE: ci_${{ github.run_id }}_${{ strategy.job-index }}_${{ github.run_attempt }}

- name: (3.13) Test langchain-graph-retriever (In-Memory Stores) without extras
if: ${{ success() && matrix.python-version == '3.13' }}
run: uv run pytest -vs packages/langchain-graph-retriever
--junitxml=junit/test-results-lgr-${{ matrix.python-version }}.xml

- name: Drop Astra Keyspace
# Even though it seems redundant, the `always() &&` is necessary to signal to
# GitHub actions that we want this to run even if the job is cancelled.
Expand Down
19 changes: 19 additions & 0 deletions packages/langchain-graph-retriever/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,25 @@ def pytest_addoption(parser: Parser):
choices=TESTCONTAINER_STORES + ["none"],
help="which stores to run testcontainers for (default: 'all')",
)
parser.addoption(
"--runextras", action="store_true", default=False, help="run tests for extras"
)


def pytest_configure(config):
config.addinivalue_line(
"markers", "extra: mark test as requiring an `extra` package"
)


def pytest_collection_modifyitems(config, items):
if config.getoption("--runextras"):
# --runextras given in cli: do not skip extras
return
skip_extras = pytest.mark.skip(reason="need --runextras option to run")
for item in items:
if "extra" in item.keywords:
item.add_marker(skip_extras)


@pytest.fixture(scope="session")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,22 +1,23 @@
from typing import Any

import pytest
from gliner import GLiNER # type: ignore
from langchain_core.documents import Document
from langchain_graph_retriever.transformers.gliner import GLiNERTransformer


class FakeGLiNER(GLiNER):
def __init__(self):
pass
@pytest.mark.extra
def test_transform_documents(animal_docs: list[Document]):
from gliner import GLiNER # type: ignore

def batch_predict_entities(
self, texts: list[str], **kwargs: Any
) -> list[list[dict[str, str]]]:
return [[{"text": text.split()[0], "label": "first"}] for text in texts]
class FakeGLiNER(GLiNER):
def __init__(self):
pass

def batch_predict_entities(
self, texts: list[str], **kwargs: Any
) -> list[list[dict[str, str]]]:
return [[{"text": text.split()[0], "label": "first"}] for text in texts]

def test_transform_documents(animal_docs: list[Document]):
fake_model = FakeGLiNER()
transformer = GLiNERTransformer(
["first"], model=fake_model, metadata_key_prefix="prefix_"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
"""


@pytest.mark.extra
def test_transform_documents():
doc = Document(
id="animal_html",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,25 +1,26 @@
from typing import Any

import pytest
from keybert import KeyBERT # type: ignore
from langchain_core.documents import Document
from langchain_graph_retriever.transformers.keybert import KeyBERTTransformer


class FakeKeyBERT(KeyBERT):
def __init__(self):
pass
@pytest.mark.extra
def test_transform_documents(animal_docs: list[Document]):
from keybert import KeyBERT # type: ignore

def extract_keywords(
self, docs: list[str], **kwargs: Any
) -> list[list[tuple[str, float]]]:
return [
[(word, len(word)) for word in set(doc.split()) if len(word) > 5]
for doc in docs
]
class FakeKeyBERT(KeyBERT):
def __init__(self):
pass

def extract_keywords(
self, docs: list[str], **kwargs: Any
) -> list[list[tuple[str, float]]]:
return [
[(word, len(word)) for word in set(doc.split()) if len(word) > 5]
for doc in docs
]

def test_transform_documents(animal_docs: list[Document]):
fake_model = FakeKeyBERT()
transformer = KeyBERTTransformer(model=fake_model, metadata_key="keybert")

Expand Down
39 changes: 20 additions & 19 deletions packages/langchain-graph-retriever/tests/transformers/test_spacy.py
Original file line number Diff line number Diff line change
@@ -1,28 +1,29 @@
import pytest
from langchain_core.documents import Document
from langchain_graph_retriever.transformers.spacy import SpacyNERTransformer
from spacy.language import Language
from spacy.tokens import Doc, Span
from spacy.vocab import Vocab


class FakeLanguage(Language):
def __init__(self):
pass

def __call__(self, text: str | Doc, **kwargs) -> Doc:
vocab = Vocab()
assert isinstance(text, str)
doc = Doc(vocab=vocab, words=text.split())
doc.ents = [
Span(doc, start=0, end=1, label="first"),
Span(doc, start=1, end=2, label="second"),
Span(doc, start=2, end=3, label="third"),
]
return doc


@pytest.mark.extra
def test_transform_documents(animal_docs: list[Document]):
from spacy.language import Language
from spacy.tokens import Doc, Span
from spacy.vocab import Vocab

class FakeLanguage(Language):
def __init__(self):
pass

def __call__(self, text: str | Doc, **kwargs) -> Doc:
vocab = Vocab()
assert isinstance(text, str)
doc = Doc(vocab=vocab, words=text.split())
doc.ents = [
Span(doc, start=0, end=1, label="first"),
Span(doc, start=1, end=2, label="second"),
Span(doc, start=2, end=3, label="third"),
]
return doc

fake_model = FakeLanguage()

transformer = SpacyNERTransformer(model=fake_model, metadata_key="spacey")
Expand Down
6 changes: 3 additions & 3 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -139,14 +139,14 @@ help = "Runs all formatting, lints, and checks (fixing where possible)"
sequence = [ "lock-fix", "fmt-fix", "lint-fix", "type-check", "dep-check"]

[tool.poe.tasks.test-gr]
help = "Runs all tests (against in-memory stores)"
help = "Runs graph-retriever tests (against in-memory stores)"
cwd = "packages/graph-retriever"
cmd = "uv run pytest -vs ."

[tool.poe.tasks.test-lgr]
help = "Runs all tests (against in-memory stores)"
help = "Runs langchain-graph-retriever tests (against in-memory stores)"
cwd = "packages/langchain-graph-retriever"
cmd = "uv run pytest -vs ."
cmd = "uv run pytest -vs --runextras ."

[tool.poe.tasks.test]
help = "Runs all tests (against in-memory stores)"
Expand Down

0 comments on commit 4e2422e

Please sign in to comment.