DocsSdk

Python SDK

The quell.sdk.Quell class provides a clean Python API for all of Quell's functionality. Use it to integrate Quell into scripts, notebooks, CI tools, or AI agents.

Install

pip install quelltest

Basic usage

from quell import Quell

q = Quell()

# Find requirement gaps
result = q.check("src/")
print(f"Score: {result.score:.0%} | Gaps: {len(result.uncovered)}")

# Reproduce a bug
q.reproduce("payment accepts zero amount silently")

# Project score
score = q.score()
print(f"Project: {score.percentage}%")

Constructor

Quell(
    llm: str = "anthropic",
    model: str | None = None,
    project_root: Path = Path("."),
)
ParameterDefaultDescription
llm"anthropic"LLM provider: "anthropic", "openai", "ollama"
modelNoneModel name override (e.g. "claude-sonnet-4-6")
project_rootPath(".")Root directory of the project

Methods

check

def check(
    target: str | Path,
    sources: list[str] = ["docstring", "type"],
    fix: bool = False,
) -> CheckResult

Scan specs, find requirement gaps, optionally generate verified tests.

result = q.check("src/payments.py")
print(f"Score: {result.score:.0%}")
print(f"Covered: {len(result.covered)}/{len(result.requirements)}")

for req in result.uncovered:
    print(f"  Gap: {req.target_function} — {req.description}")

With fix=True, Quell generates a verified test for each gap and writes it to the test file:

result = q.check("src/", fix=True)

reproduce

def reproduce(
    description: str,
    file: str | None = None,
) -> bool

Convert a bug description into a verified failing test.

written = q.reproduce("payment accepts zero amount silently")
if written:
    print("Bug reproduction test written — fix the code, then run tests.")

Returns True if a verified test was written, False otherwise.

prove

def prove(
    file: str | Path,
    function: str | None = None,
) -> float

Return the requirement coverage score (0.0–1.0) for a file or function.

score = q.prove("src/payments.py")
print(f"Coverage: {score:.0%}")

# For a specific function
score = q.prove("src/payments.py", function="process_payment")

score

def score() -> ProjectScore

Return the project-wide requirement coverage score.

project_score = q.score()
print(f"Project: {project_score.percentage}%")

for fs in project_score.files:
    print(f"  {fs.file_path.name}: {fs.percentage}% ({fs.grade})")

Return types

CheckResult

class CheckResult(BaseModel):
    requirements: list[Requirement]
    covered: list[Requirement]
    uncovered: list[Requirement]
    score: float                    # 0.0–1.0

ProjectScore

class ProjectScore(BaseModel):
    files: list[FileScore]
    generated_at: datetime

    @property
    def total_score(self) -> float: ...   # 0.0–1.0
    @property
    def percentage(self) -> int: ...      # 0–100

FileScore

class FileScore(BaseModel):
    file_path: Path
    total_requirements: int
    covered_requirements: int
    quell_score: float              # 0.0–1.0

    @property
    def percentage(self) -> int: ...
    @property
    def grade(self) -> str: ...     # A/B/C/F

Complete example

import os
from pathlib import Path
from quell import Quell

os.environ["ANTHROPIC_API_KEY"] = "sk-ant-..."

q = Quell(
    llm="anthropic",
    model="claude-sonnet-4-6",
    project_root=Path("/my/project"),
)

# Check current coverage
result = q.check("src/")
print(f"Score: {result.score:.0%} ({len(result.uncovered)} gaps)")

# Auto-fix all gaps
result = q.check("src/", fix=True)

# Check score after fix
project_score = q.score()
print(f"Project: {project_score.percentage}%")
for fs in project_score.files:
    print(f"  {fs.file_path.name}: {fs.percentage}% {fs.grade}")