Company Surge Report

Identify companies with increased website activity over 7 days

Use Case

  • Identify companies with increased website activity over the past 7 days
  • Surface accounts showing buying signals for sales prioritization
  • Weekly report of companies visiting your site, ranked by visit frequency

Prerequisites


Steps

1. Get Daily Company Counts

Query event_counts with distinct=company_domain for each of the last 7 days to see the trend.

import time
import requests

TOKEN = "your_jwt_token"
ORG_ID = "your_organization_id"
PIXEL_ID = "your_pixel_id"

HEADERS = {
    "Authorization": f"Bearer {TOKEN}",
    "x-organization-id": ORG_ID,
}

ONE_DAY_MS = 24 * 60 * 60 * 1000
now_ms = int(time.time() * 1000)

print("Unique companies per day:")
for day in range(7):
    end_ms = now_ms - (day * ONE_DAY_MS)
    start_ms = end_ms - ONE_DAY_MS

    resp = requests.get(
        "https://apiv3.delivr.ai/api/v1/event_counts",
        headers=HEADERS,
        params={
            "pixel_id": PIXEL_ID,
            "start_ms": start_ms,
            "end_ms": end_ms,
            "filter": "resolved:eq:true",
            "distinct": "company_domain",
        },
    )
    count = resp.json().get("count", 0)
    print(f"  Day -{day}: {count} companies")

2. Get Company Details for the Week

Fetch resolved events grouped by company across the full 7-day window. Since the API has a 25-hour limit, loop through each day and aggregate.

from collections import Counter, defaultdict
import datetime

company_events = Counter()
company_info = {}

for day in range(7):
    end_ms = now_ms - (day * ONE_DAY_MS)
    start_ms = end_ms - ONE_DAY_MS
    offset = 0

    while True:
        resp = requests.get(
            "https://apiv3.delivr.ai/api/v1/events",
            headers=HEADERS,
            params={
                "pixel_id": PIXEL_ID,
                "start_ms": start_ms,
                "end_ms": end_ms,
                "limit": 100,
                "offset": offset,
                "filter": "resolved:eq:true,company_domain:not_null",
                "select": "company_name,company_domain,company_industry,company_employee_count_range,first_name,last_name,job_title",
            },
        )
        rows = resp.json().get("rows", [])
        for row in rows:
            domain = row.get("company_domain", "")
            if domain:
                company_events[domain] += 1
                # Keep the most recent info for each company
                if domain not in company_info:
                    company_info[domain] = row

        if len(rows) < 100:
            break
        offset += 100

3. Rank Companies by Activity

print(f"
Top companies by event count (7 days):")
print(f"{'Company':<30} {'Domain':<25} {'Events':>6}  {'Industry'}")
print("-" * 90)

for domain, count in company_events.most_common(20):
    info = company_info.get(domain, {})
    name = info.get("company_name", domain)
    industry = info.get("company_industry", "")
    print(f"{name:<30} {domain:<25} {count:>6}  {industry}")

Example output:

Company                        Domain                    Events  Industry
------------------------------------------------------------------------------------------
Bank of America                bankofamerica.com             12  Banking
Prudential Financial           prudential.com                 8  Financial Services
Houston ISD                    houstonisd.org                 5  Government Administration

Complete Script

import csv
import time
from collections import Counter
import requests

TOKEN = "your_jwt_token"
ORG_ID = "your_organization_id"
PIXEL_ID = "your_pixel_id"

HEADERS = {
    "Authorization": f"Bearer {TOKEN}",
    "x-organization-id": ORG_ID,
}

ONE_DAY_MS = 24 * 60 * 60 * 1000
DAYS_BACK = 7
now_ms = int(time.time() * 1000)

# Collect all resolved events with company data
company_events = Counter()
company_info = {}
company_contacts = {}  # domain -> list of contacts

for day in range(DAYS_BACK):
    end_ms = now_ms - (day * ONE_DAY_MS)
    start_ms = end_ms - ONE_DAY_MS
    offset = 0

    while True:
        resp = requests.get(
            "https://apiv3.delivr.ai/api/v1/events",
            headers=HEADERS,
            params={
                "pixel_id": PIXEL_ID,
                "start_ms": start_ms,
                "end_ms": end_ms,
                "limit": 100,
                "offset": offset,
                "filter": "resolved:eq:true,company_domain:not_null",
                "select": "company_name,company_domain,company_industry,"
                          "company_employee_count_range,first_name,"
                          "last_name,job_title,email",
            },
        )
        rows = resp.json().get("rows", [])
        for row in rows:
            domain = row.get("company_domain", "")
            if domain:
                company_events[domain] += 1
                if domain not in company_info:
                    company_info[domain] = row
                # Track unique contacts per company
                name = f"{row.get('first_name', '')} {row.get('last_name', '')}".strip()
                if name and domain not in company_contacts:
                    company_contacts[domain] = set()
                if name:
                    company_contacts[domain].add(name)

        if len(rows) < 100:
            break
        offset += 100

# Write surge report CSV
with open("company_surge.csv", "w", newline="") as f:
    writer = csv.writer(f)
    writer.writerow([
        "Company", "Domain", "Industry", "Size",
        "Events (7d)", "Unique Contacts", "Sample Contacts",
    ])
    for domain, count in company_events.most_common():
        info = company_info.get(domain, {})
        contacts = company_contacts.get(domain, set())
        writer.writerow([
            info.get("company_name", domain),
            domain,
            info.get("company_industry", ""),
            info.get("company_employee_count_range", ""),
            count,
            len(contacts),
            "; ".join(list(contacts)[:3]),
        ])

print(f"Exported {len(company_events)} companies to company_surge.csv")
print(f"
Top 10:")
for domain, count in company_events.most_common(10):
    info = company_info.get(domain, {})
    contacts = len(company_contacts.get(domain, set()))
    print(f"  {info.get('company_name', domain)} ({domain}): {count} events, {contacts} contacts")

Variations

Filter by Company Size

Add a filter to focus on enterprise accounts:

"filter": "resolved:eq:true,company_domain:not_null,company_employee_count_range:not_null"

High-Intent Pages Only

Combine with URL filtering to find companies visiting pricing or demo pages:

"filter": "resolved:eq:true,company_domain:not_null,event_url:like:%25pricing%25"

Unique Companies Only (Quick Count)

Use distinct=company_domain with has_valuable_data=true to get one row per company:

resp = requests.get(
    "https://apiv3.delivr.ai/api/v1/events",
    headers=HEADERS,
    params={
        "pixel_id": PIXEL_ID,
        "start_ms": start_ms,
        "end_ms": end_ms,
        "limit": 100,
        "filter": "resolved:eq:true",
        "distinct": "company_domain",
        "has_valuable_data": "true",
        "select": "company_name,company_domain,company_industry,"
                  "company_employee_count_range,first_name,last_name,job_title",
    },
)

Notes

  • The Events API has a 25-hour maximum time window per request. The script loops through daily windows to cover 7 days.
  • company_domain:not_null excludes events where the company wasn't identified. Without it, you'll get rows with empty company fields.
  • The surge report shows raw event counts, not unique visitors. Multiple page views from the same person at the same company will inflate the count. Use distinct=hem if you want unique people per company instead.

Next Steps