Complete Python tutorial with code

Screenshot API in Python:
The Complete 2026 Guide

From first screenshot to production-grade batch processing — everything you need to capture web pages programmatically in Python without managing a headless browser.

Get Free API Key View Docs →

Why Use an API Instead of Playwright/Selenium?

Playwright and Selenium are powerful but they require installing a browser binary, managing browser instances, handling crashes, and dealing with 300-500MB Docker images. For the most common use case — taking a screenshot of a URL — a REST API is faster to integrate, cheaper to run, and requires zero ongoing maintenance.

This guide covers SnapAPI — a screenshot API with a Python SDK, free tier, and four endpoints (screenshot, scrape, extract, PDF). Everything runs from standard Python, no browser install required.

1. Your First Screenshot in 60 Seconds

pip install requests
import requests

API_KEY = 'YOUR_API_KEY'  # Get free at snapapi.pics/register

response = requests.get(
    'https://api.snapapi.pics/v1/screenshot',
    headers={'X-API-Key': API_KEY},
    params={
        'url': 'https://example.com',
        'format': 'png',
        'full_page': True,
        'width': 1280
    }
)

with open('screenshot.png', 'wb') as f:
    f.write(response.content)

print(f'Saved {len(response.content):,} bytes to screenshot.png')

That is the complete working script. No browser install, no async context manager, no process management. The response is raw PNG bytes — save directly, upload to S3, or encode as base64 for an API response.

2. Common Screenshot Parameters

import requests

def screenshot(url, **kwargs):
    r = requests.get(
        'https://api.snapapi.pics/v1/screenshot',
        headers={'X-API-Key': 'YOUR_KEY'},
        params={'url': url, **kwargs}
    )
    r.raise_for_status()
    return r.content

# Full-page PNG at 1280px wide
img = screenshot('https://example.com', format='png', full_page=True, width=1280)

# Viewport screenshot (above-the-fold only)
img = screenshot('https://example.com', format='png', full_page=False, width=1200, height=630)

# JPEG at 80% quality (smaller file size)
img = screenshot('https://example.com', format='jpeg', quality=80, full_page=True)

# Wait 2 seconds for lazy-load and animations to settle
img = screenshot('https://example.com', format='png', delay=2000, full_page=True)

# Block cookie banners automatically
img = screenshot('https://example.com', format='png', block_cookie_banners=True)

# Mobile viewport
img = screenshot('https://example.com', format='png', width=375, height=812, mobile=True)

3. Async Batch Processing with aiohttp

import asyncio, aiohttp
from pathlib import Path

API_KEY = 'YOUR_KEY'
CONCURRENCY = 5  # Parallel requests

URLS = [
    'https://example.com',
    'https://github.com',
    'https://python.org',
    # ... up to hundreds of URLs
]

async def screenshot_one(session, semaphore, url):
    async with semaphore:
        async with session.get(
            'https://api.snapapi.pics/v1/screenshot',
            headers={'X-API-Key': API_KEY},
            params={'url': url, 'format': 'png', 'full_page': True, 'width': 1280}
        ) as r:
            if r.status == 200:
                content = await r.read()
                slug = url.replace('https://', '').replace('/', '_')[:50]
                Path(f'screenshots/{slug}.png').write_bytes(content)
                print(f'OK  {url}')
            else:
                print(f'ERR {url}: {r.status}')

async def batch_screenshot(urls):
    Path('screenshots').mkdir(exist_ok=True)
    sem = asyncio.Semaphore(CONCURRENCY)
    async with aiohttp.ClientSession() as session:
        tasks = [screenshot_one(session, sem, url) for url in urls]
        await asyncio.gather(*tasks)

asyncio.run(batch_screenshot(URLS))
print(f'Done: {len(URLS)} screenshots')

4. PDF Generation from URL or HTML

import requests

API_KEY = 'YOUR_KEY'

# PDF from URL
def url_to_pdf(url, output_path='output.pdf'):
    r = requests.post(
        'https://api.snapapi.pics/v1/pdf',
        headers={'X-API-Key': API_KEY},
        json={
            'url': url,
            'format': 'A4',
            'print_background': True,
            'margin': {'top': '20mm', 'bottom': '20mm', 'left': '15mm', 'right': '15mm'}
        }
    )
    r.raise_for_status()
    with open(output_path, 'wb') as f:
        f.write(r.content)
    return output_path

# PDF from raw HTML (no URL needed)
def html_to_pdf(html_string, output_path='invoice.pdf'):
    r = requests.post(
        'https://api.snapapi.pics/v1/pdf',
        headers={'X-API-Key': API_KEY},
        json={'html': html_string, 'format': 'A4', 'print_background': True}
    )
    r.raise_for_status()
    with open(output_path, 'wb') as f:
        f.write(r.content)
    return output_path

# Example usage
url_to_pdf('https://docs.python.org/3/tutorial/', 'python_tutorial.pdf')

invoice_html = """<html><head><style>
  body { font-family: Inter, sans-serif; padding: 40px; }
  h1 { color: #1e293b; }
  table { width: 100%; border-collapse: collapse; }
  td, th { padding: 12px; border: 1px solid #e2e8f0; }
</style></head>
<body><h1>Invoice #1001</h1>...</body></html>"""
html_to_pdf(invoice_html, 'invoice_1001.pdf')

5. Web Scraping and Text Extraction

import requests

API_KEY = 'YOUR_KEY'

def extract_text(url, fmt='markdown'):
    """Extract clean text from any URL (strips ads, nav, boilerplate)."""
    r = requests.get(
        'https://api.snapapi.pics/v1/extract',
        headers={'X-API-Key': API_KEY},
        params={'url': url, 'format': fmt}
    )
    r.raise_for_status()
    return r.text

def scrape_html(url, selector=None):
    """Get fully-rendered HTML after JavaScript execution."""
    params = {'url': url}
    if selector:
        params['selector'] = selector
    r = requests.get(
        'https://api.snapapi.pics/v1/scrape',
        headers={'X-API-Key': API_KEY},
        params=params
    )
    r.raise_for_status()
    return r.text

# Extract article text for LLM processing
article = extract_text('https://techcrunch.com/2026/04/01/ai-agents-news/')
print(article[:500])  # Clean markdown, ready for embedding

# Scrape specific element (e.g., pricing table)
price_html = scrape_html('https://competitor.com/pricing', selector='.pricing-table')
print(price_html)

6. Production-Ready: Retry and Error Handling

import requests, time, logging
from functools import wraps

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

def with_retry(max_attempts=3, base_delay=1.0):
    def decorator(fn):
        @wraps(fn)
        def wrapper(*args, **kwargs):
            for attempt in range(max_attempts):
                try:
                    return fn(*args, **kwargs)
                except requests.HTTPError as e:
                    if e.response.status_code == 429:  # Rate limit
                        wait = base_delay * (2 ** attempt)
                        logger.warning(f'Rate limited. Waiting {wait}s...')
                        time.sleep(wait)
                    elif e.response.status_code >= 500:
                        wait = base_delay * (2 ** attempt)
                        logger.warning(f'Server error {e.response.status_code}. Retry {attempt+1}/{max_attempts}...')
                        time.sleep(wait)
                    else:
                        raise  # 4xx errors don't retry
                except requests.Timeout:
                    logger.warning(f'Timeout on attempt {attempt+1}')
                    if attempt == max_attempts - 1:
                        raise
            raise Exception(f'Failed after {max_attempts} attempts')
        return wrapper
    return decorator

@with_retry(max_attempts=3)
def screenshot(url, **params):
    r = requests.get(
        'https://api.snapapi.pics/v1/screenshot',
        headers={'X-API-Key': 'YOUR_KEY'},
        params={'url': url, **params},
        timeout=30
    )
    r.raise_for_status()
    return r.content

# Now safe for production use
img = screenshot('https://example.com', format='png', full_page=True)

7. Using the Official Python SDK

pip install snapapi-python
from snapapi import SnapAPI

client = SnapAPI(api_key='YOUR_KEY')

# All methods return bytes (screenshot/pdf) or str (extract/scrape)
png = client.screenshot(url='https://example.com', format='png', full_page=True)
pdf = client.pdf(url='https://example.com', format='A4')
text = client.extract(url='https://example.com', format='markdown')
html = client.scrape(url='https://example.com')

# Save screenshot
with open('out.png', 'wb') as f:
    f.write(png)

FAQ

Do I need to install Chromium on my machine?

No. SnapAPI runs Chromium on its servers. Your Python script just makes HTTP requests. The only pip install you need is requests or aiohttp.

How do I screenshot pages that need authentication?

Pass a cookies list or headers dict in the request params. SnapAPI forwards them to Chromium before navigating, enabling screenshots of logged-in dashboards.

What is the rate limit?

No per-minute rate limit — only a monthly quota (200 free, 5K Starter, 50K Growth). Burst to 50 concurrent requests without 429s. Use a Semaphore in async code to stay within your plan.

Can I use this with Django or Flask?

Yes. The requests library works in any Python framework. For async Flask (via ASGI) or FastAPI, use aiohttp or httpx. The SDK supports both sync and async usage.

Start Screenshotting in Python — Free

200 free API calls. No browser install. No Docker bloat. pip install requests and you are done.

Get Free API Key Read the Docs →

Advanced Pattern: Webhook-Based Async Screenshots

For high-volume pipelines, polling for results is inefficient. A better pattern is to kick off a screenshot request and have SnapAPI call your webhook when it's ready. This works especially well with FastAPI or Django where you can expose a simple POST endpoint to receive results.

from fastapi import FastAPI, BackgroundTasks, Request
import httpx, asyncio

app = FastAPI()
API_KEY = "your_snap_api_key"

@app.post("/trigger-screenshot")
async def trigger_screenshot(url: str, background_tasks: BackgroundTasks):
    """Kick off a screenshot job; result will arrive at /webhook."""
    background_tasks.add_task(request_screenshot, url)
    return {"status": "queued", "url": url}

async def request_screenshot(url: str):
    async with httpx.AsyncClient() as client:
        resp = await client.get(
            "https://api.snapapi.pics/v1/screenshot",
            headers={"X-API-Key": API_KEY},
            params={"url": url, "format": "png", "width": 1280},
            timeout=30
        )
        resp.raise_for_status()
        # save to disk / upload to S3 / notify downstream
        with open(f"/tmp/shot_{hash(url)}.png", "wb") as f:
            f.write(resp.content)

@app.post("/webhook")
async def receive_result(request: Request):
    payload = await request.json()
    # process incoming screenshot notification
    return {"ok": True}

Structured Data Extraction in Python

The extract endpoint is designed for AI pipelines, price monitoring, and competitive intelligence. Pass a Pydantic-compatible schema and get back typed JSON. Here's a practical example for extracting product data:

import requests

API_KEY = "your_snap_api_key"

def extract_product(url: str) -> dict:
    schema = {
        "type": "object",
        "properties": {
            "name":          {"type": "string"},
            "price":         {"type": "number"},
            "currency":      {"type": "string"},
            "in_stock":      {"type": "boolean"},
            "rating":        {"type": "number"},
            "review_count":  {"type": "integer"},
            "description":   {"type": "string"}
        }
    }

    resp = requests.post(
        "https://api.snapapi.pics/v1/extract",
        headers={"X-API-Key": API_KEY},
        json={"url": url, "schema": schema}
    )
    resp.raise_for_status()
    return resp.json()

# Example
product = extract_product("https://www.amazon.com/dp/B0EXAMPLE")
print(f"{product['name']} — ${product['price']} ({'In stock' if product['in_stock'] else 'Out of stock'})")

Common Python Integration Patterns

The most common Python integration patterns for SnapAPI in production systems include: scheduled screenshot monitoring jobs using APScheduler or Celery that compare visual diffs over time; Django management commands that generate PDF exports of reports on a cron schedule; FastAPI endpoints that proxy screenshot requests and cache results in Redis for 24 hours; and data pipelines using Pandas that extract structured data from hundreds of product pages and load results into a database.

All of these patterns work with the same API key and the same base URL. The SnapAPI Python SDK (pip install snapapi-python) wraps all four endpoints with type hints, automatic retries with exponential backoff, and streaming support for large PDFs or high-resolution screenshots.

Python Screenshot API FAQ

Do I need to install a browser to use SnapAPI from Python?

No. SnapAPI is a REST API — you call it with requests or httpx like any other HTTP service. No Playwright, no Chromium, no browser drivers required.

Does the Python SDK work with async frameworks like FastAPI?

Yes. Use httpx.AsyncClient directly or the async methods in the SDK. Both return proper awaitables that work with asyncio, FastAPI, and Starlette.

What's the rate limit?

All plans support concurrent requests. The free tier allows up to 2 concurrent calls; paid plans allow up to 10. For higher concurrency, contact support for a custom plan.