jonathanagustin commited on
Commit
b6ac854
·
verified ·
1 Parent(s): f9eba6a

Clone from jonathanagustin/courtlistener-bulk

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .claude/hooks/session-start.sh +71 -0
  2. .gitattributes +2 -58
  3. CLAUDE.md +225 -0
  4. README.md +58 -63
  5. data/citation-map/shard_00000.parquet +3 -0
  6. data/citation-map/shard_00001.parquet +3 -0
  7. data/citation-map/shard_00002.parquet +3 -0
  8. data/citation-map/shard_00003.parquet +3 -0
  9. data/citation-map/shard_00004.parquet +3 -0
  10. data/citation-map/shard_00005.parquet +3 -0
  11. data/citation-map/shard_00006.parquet +3 -0
  12. data/citation-map/shard_00007.parquet +3 -0
  13. data/citation-map/shard_00008.parquet +3 -0
  14. data/citation-map/shard_00009.parquet +3 -0
  15. data/citation-map/shard_00010.parquet +3 -0
  16. data/citation-map/shard_00011.parquet +3 -0
  17. data/citation-map/shard_00012.parquet +3 -0
  18. data/citation-map/shard_00013.parquet +3 -0
  19. data/citation-map/shard_00014.parquet +3 -0
  20. data/citation-map/shard_00015.parquet +3 -0
  21. data/citation-map/shard_00016.parquet +3 -0
  22. data/citation-map/shard_00017.parquet +3 -0
  23. data/citation-map/shard_00018.parquet +3 -0
  24. data/citation-map/shard_00019.parquet +3 -0
  25. data/citation-map/shard_00020.parquet +3 -0
  26. data/citation-map/shard_00021.parquet +3 -0
  27. data/citation-map/shard_00022.parquet +3 -0
  28. data/citation-map/shard_00023.parquet +3 -0
  29. data/citation-map/shard_00024.parquet +3 -0
  30. data/citation-map/shard_00025.parquet +3 -0
  31. data/citation-map/shard_00026.parquet +3 -0
  32. data/citation-map/shard_00027.parquet +3 -0
  33. data/citation-map/shard_00028.parquet +3 -0
  34. data/citation-map/shard_00029.parquet +3 -0
  35. data/citation-map/shard_00030.parquet +3 -0
  36. data/citation-map/shard_00031.parquet +3 -0
  37. data/citation-map/shard_00032.parquet +3 -0
  38. data/citation-map/shard_00033.parquet +3 -0
  39. data/citation-map/shard_00034.parquet +3 -0
  40. data/citation-map/shard_00035.parquet +3 -0
  41. data/citation-map/shard_00036.parquet +3 -0
  42. data/citation-map/shard_00037.parquet +3 -0
  43. data/citation-map/shard_00038.parquet +3 -0
  44. data/citation-map/shard_00039.parquet +3 -0
  45. data/citation-map/shard_00040.parquet +3 -0
  46. data/citation-map/shard_00041.parquet +3 -0
  47. data/citation-map/shard_00042.parquet +3 -0
  48. data/citation-map/shard_00043.parquet +3 -0
  49. data/citation-map/shard_00044.parquet +3 -0
  50. data/citation-map/shard_00045.parquet +3 -0
.claude/hooks/session-start.sh ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ set -euo pipefail
3
+
4
+ # Only run in Claude Code on the Web (remote environment)
5
+ if [ "${CLAUDE_CODE_REMOTE:-}" != "true" ]; then
6
+ echo "Skipping session-start hook (not running in Claude Code on the Web)"
7
+ exit 0
8
+ fi
9
+
10
+ echo "========================================"
11
+ echo "CourtListener Bulk Sync - Session Start"
12
+ echo "========================================"
13
+ echo ""
14
+
15
+ # Navigate to project root
16
+ cd "${CLAUDE_PROJECT_DIR}"
17
+
18
+ # Install dependencies with uv
19
+ echo "Installing dependencies..."
20
+ if command -v uv >/dev/null 2>&1; then
21
+ uv sync 2>&1 || echo "Warning: uv sync had issues"
22
+ else
23
+ echo "Warning: uv not found. Install with: curl -LsSf https://astral.sh/uv/install.sh | sh"
24
+ fi
25
+
26
+ # Check for HF_TOKEN
27
+ if [ -z "${HF_TOKEN:-}" ]; then
28
+ echo ""
29
+ echo "WARNING: HF_TOKEN not set!"
30
+ echo "Set it with: export HF_TOKEN=<your-token>"
31
+ echo ""
32
+ fi
33
+
34
+ # Show sync status
35
+ echo ""
36
+ echo "Checking sync state..."
37
+ if [ -f "sync_state.json" ]; then
38
+ echo "Sync state found. Currently synced tables:"
39
+ python3 -c "
40
+ import json
41
+ try:
42
+ with open('sync_state.json') as f:
43
+ state = json.load(f)
44
+ for table, info in state.get('tables', {}).items():
45
+ rows = info.get('rows', 'unknown')
46
+ synced = info.get('synced_at', 'unknown')[:10] if info.get('synced_at') else 'unknown'
47
+ print(f' - {table}: {rows} rows (synced {synced})')
48
+ except Exception as e:
49
+ print(f' Error reading state: {e}')
50
+ " 2>/dev/null || echo " (unable to parse sync_state.json)"
51
+ else
52
+ echo "No sync_state.json found - fresh start."
53
+ fi
54
+
55
+ # Show available tables
56
+ echo ""
57
+ echo "Available tables (by size):"
58
+ echo " Small: courts, people, financial-disclosures"
59
+ echo " Medium: citations, parentheticals, citation-map"
60
+ echo " Large: clusters, dockets, opinions"
61
+
62
+ echo ""
63
+ echo "========================================"
64
+ echo "Ready to sync!"
65
+ echo "========================================"
66
+ echo ""
67
+ echo "Commands:"
68
+ echo " uv run python sync.py --dry-run # See what will sync"
69
+ echo " uv run python sync.py --tables courts # Sync one table"
70
+ echo " uv run python sync.py --xet-high-perf # Sync all (fast mode)"
71
+ echo ""
.gitattributes CHANGED
@@ -1,59 +1,3 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.lz4 filter=lfs diff=lfs merge=lfs -text
12
- *.mds filter=lfs diff=lfs merge=lfs -text
13
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
14
- *.model filter=lfs diff=lfs merge=lfs -text
15
- *.msgpack filter=lfs diff=lfs merge=lfs -text
16
- *.npy filter=lfs diff=lfs merge=lfs -text
17
- *.npz filter=lfs diff=lfs merge=lfs -text
18
- *.onnx filter=lfs diff=lfs merge=lfs -text
19
- *.ot filter=lfs diff=lfs merge=lfs -text
20
  *.parquet filter=lfs diff=lfs merge=lfs -text
21
- *.pb filter=lfs diff=lfs merge=lfs -text
22
- *.pickle filter=lfs diff=lfs merge=lfs -text
23
- *.pkl filter=lfs diff=lfs merge=lfs -text
24
- *.pt filter=lfs diff=lfs merge=lfs -text
25
- *.pth filter=lfs diff=lfs merge=lfs -text
26
- *.rar filter=lfs diff=lfs merge=lfs -text
27
- *.safetensors filter=lfs diff=lfs merge=lfs -text
28
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
29
- *.tar.* filter=lfs diff=lfs merge=lfs -text
30
- *.tar filter=lfs diff=lfs merge=lfs -text
31
- *.tflite filter=lfs diff=lfs merge=lfs -text
32
- *.tgz filter=lfs diff=lfs merge=lfs -text
33
- *.wasm filter=lfs diff=lfs merge=lfs -text
34
- *.xz filter=lfs diff=lfs merge=lfs -text
35
- *.zip filter=lfs diff=lfs merge=lfs -text
36
- *.zst filter=lfs diff=lfs merge=lfs -text
37
- *tfevents* filter=lfs diff=lfs merge=lfs -text
38
- # Audio files - uncompressed
39
- *.pcm filter=lfs diff=lfs merge=lfs -text
40
- *.sam filter=lfs diff=lfs merge=lfs -text
41
- *.raw filter=lfs diff=lfs merge=lfs -text
42
- # Audio files - compressed
43
- *.aac filter=lfs diff=lfs merge=lfs -text
44
- *.flac filter=lfs diff=lfs merge=lfs -text
45
- *.mp3 filter=lfs diff=lfs merge=lfs -text
46
- *.ogg filter=lfs diff=lfs merge=lfs -text
47
- *.wav filter=lfs diff=lfs merge=lfs -text
48
- # Image files - uncompressed
49
- *.bmp filter=lfs diff=lfs merge=lfs -text
50
- *.gif filter=lfs diff=lfs merge=lfs -text
51
- *.png filter=lfs diff=lfs merge=lfs -text
52
- *.tiff filter=lfs diff=lfs merge=lfs -text
53
- # Image files - compressed
54
- *.jpg filter=lfs diff=lfs merge=lfs -text
55
- *.jpeg filter=lfs diff=lfs merge=lfs -text
56
- *.webp filter=lfs diff=lfs merge=lfs -text
57
- # Video files - compressed
58
- *.mp4 filter=lfs diff=lfs merge=lfs -text
59
- *.webm filter=lfs diff=lfs merge=lfs -text
 
1
+ # Xet storage for large data files
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  *.parquet filter=lfs diff=lfs merge=lfs -text
3
+ data/**/*.parquet filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
CLAUDE.md ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # CLAUDE.md
2
+
3
+ Instructions for Claude Code to continue syncing CourtListener bulk data.
4
+
5
+ ## Project Overview
6
+
7
+ This is a HuggingFace dataset repository that mirrors CourtListener's bulk legal data as Parquet files. The project has two main scripts:
8
+
9
+ 1. **sync.py** - Downloads CSV data from CourtListener's S3 bucket, converts to Parquet with zstd compression, and uploads to HuggingFace with Xet storage.
10
+
11
+ 2. **embed.py** - Generates embeddings for legal documents using BGE-large via HuggingFace Inference API. Stores embeddings as Parquet files in `embeddings/`.
12
+
13
+ ## Quick Start
14
+
15
+ ```bash
16
+ # Install dependencies
17
+ uv sync
18
+
19
+ # Set HuggingFace token
20
+ export HF_TOKEN=<your-token>
21
+
22
+ # Run sync (dry-run first to see what will sync)
23
+ uv run python sync.py --dry-run
24
+
25
+ # Sync specific tables (start with smaller ones)
26
+ uv run python sync.py --tables courts,people,financial-disclosures
27
+
28
+ # Sync with high-performance Xet mode
29
+ uv run python sync.py --xet-high-perf --tables citations
30
+ ```
31
+
32
+ ## Tables by Size (smallest first)
33
+
34
+ | Table | Compressed Size | Priority |
35
+ |-------|----------------|----------|
36
+ | courts | ~81 KB | 1 |
37
+ | people | ~455 KB | 2 |
38
+ | financial-disclosures | ~516 KB | 3 |
39
+ | citations | ~127 MB | 4 |
40
+ | parentheticals | ~271 MB | 5 |
41
+ | citation-map | ~515 MB | 6 |
42
+ | clusters | ~2.4 GB | 7 |
43
+ | dockets | ~4.9 GB | 8 |
44
+ | opinions | ~53.6 GB | 9 |
45
+
46
+ ## Sync State
47
+
48
+ The script maintains `sync_state.json` to track what has been synced. Each table entry includes:
49
+ - `source_key`: S3 key of the synced file
50
+ - `etag`: S3 ETag for change detection
51
+ - `synced_at`: Timestamp of last sync
52
+ - `rows`: Number of rows in the table
53
+
54
+ The script is fully idempotent - running it again will skip already-synced tables unless `--force` is used.
55
+
56
+ ## Disk Space Requirements
57
+
58
+ The script processes one table at a time and cleans up after each:
59
+ 1. Downloads compressed CSV to temp dir
60
+ 2. Decompresses and converts to Parquet
61
+ 3. Uploads to HuggingFace
62
+ 4. Deletes local files
63
+
64
+ Minimum disk space needed: ~2x the largest table you're syncing.
65
+ - For citations (127MB): ~300MB free
66
+ - For opinions (53.6GB): ~120GB free
67
+
68
+ ## Environment Variables
69
+
70
+ | Variable | Required | Default | Description |
71
+ |----------|----------|---------|-------------|
72
+ | `HF_TOKEN` | Yes | - | HuggingFace token with write access |
73
+ | `HF_XET_HIGH_PERFORMANCE` | No | off | Saturate network + all CPU cores |
74
+ | `HF_XET_NUM_CONCURRENT_RANGE_GETS` | No | 16 | Parallel downloads per file |
75
+ | `HF_XET_CHUNK_CACHE_SIZE_BYTES` | No | 0 | Chunk cache for cross-file dedup |
76
+ | `HF_XET_SHARD_CACHE_SIZE_LIMIT` | No | 4GB | Shard cache to avoid re-uploads |
77
+ | `HF_XET_RECONSTRUCT_WRITE_SEQUENTIALLY` | No | off | Sequential writes for HDD |
78
+ | `HF_XET_CACHE` | No | ~/.cache/huggingface/xet | Xet cache directory |
79
+
80
+ ## CLI Options
81
+
82
+ ```
83
+ --tables TABLE1,TABLE2 Sync specific tables (comma-separated)
84
+ --dry-run Show what would sync without doing it
85
+ --force Re-sync even if already complete
86
+ --list List available tables and exit
87
+ --work-dir PATH Custom temp directory
88
+
89
+ Xet Performance Options:
90
+ --xet-high-perf Max network/CPU utilization
91
+ --xet-concurrent N Parallel downloads (default 16, try 32-64)
92
+ --xet-chunk-cache SIZE Enable chunk cache (e.g., "10GB") for dedup
93
+ --xet-hdd Sequential writes for spinning disks
94
+ ```
95
+
96
+ ## Performance Tuning Examples
97
+
98
+ ```bash
99
+ # Fast network, SSD storage (recommended for cloud VMs)
100
+ uv run python sync.py --xet-high-perf --xet-concurrent 64
101
+
102
+ # Enable chunk cache for better dedup across legal datasets
103
+ uv run python sync.py --xet-chunk-cache 10GB --tables citations,parentheticals
104
+
105
+ # Slow network or HDD storage
106
+ uv run python sync.py --xet-hdd --xet-concurrent 8
107
+
108
+ # Maximum performance (fast network + SSD + lots of RAM for cache)
109
+ uv run python sync.py --xet-high-perf --xet-concurrent 64 --xet-chunk-cache 20GB
110
+ ```
111
+
112
+ ## Resuming a Sync
113
+
114
+ If a sync is interrupted:
115
+ 1. The script automatically resumes from where it left off
116
+ 2. Completed tables are tracked in `sync_state.json`
117
+ 3. Partially uploaded files are not tracked (will re-sync)
118
+
119
+ To check current state:
120
+ ```bash
121
+ cat sync_state.json
122
+ ```
123
+
124
+ ## Source Data
125
+
126
+ - **S3 Bucket**: `com-courtlistener-storage`
127
+ - **URL Pattern**: `https://com-courtlistener-storage.s3.amazonaws.com/bulk-data/{table}-{date}.csv.bz2`
128
+ - **Update Frequency**: Daily (files are dated)
129
+
130
+ ## HuggingFace Repository
131
+
132
+ - **Repo ID**: `jonathanagustin/courtlistener-bulk`
133
+ - **Visibility**: Private
134
+ - **Storage**: Xet (chunk-level deduplication)
135
+
136
+ ---
137
+
138
+ ## Embeddings
139
+
140
+ ### Embedding Model
141
+
142
+ The project uses **BAAI/bge-large-en-v1.5** for generating embeddings:
143
+ - **Dimensions**: 1024
144
+ - **Context window**: 512 tokens
145
+ - **License**: MIT (commercially usable)
146
+ - **Provider**: BAAI (Beijing Academy of AI)
147
+
148
+ ### Quick Start (Embeddings)
149
+
150
+ ```bash
151
+ # First sync the source data
152
+ uv run python sync.py --tables opinions
153
+
154
+ # Generate embeddings (dry-run first)
155
+ uv run python embed.py --dry-run
156
+
157
+ # Embed specific tables
158
+ uv run python embed.py --tables opinions,clusters
159
+
160
+ # Custom batch size (lower if hitting rate limits)
161
+ uv run python embed.py --batch-size 16 --tables parentheticals
162
+ ```
163
+
164
+ ### Embedding Tables
165
+
166
+ | Table | Source | Text Columns | Priority |
167
+ |-------|--------|--------------|----------|
168
+ | opinions | data/opinions.parquet | plain_text, html_lawbox, html_columbia, html | 1 |
169
+ | clusters | data/clusters.parquet | case_name, syllabus, attorneys, posture | 2 |
170
+ | parentheticals | data/parentheticals.parquet | text | 3 |
171
+ | dockets | data/dockets.parquet | case_name, cause, nature_of_suit | 4 |
172
+
173
+ ### Embedding State
174
+
175
+ The script maintains `embed_state.json` to track progress:
176
+ - `rows_embedded`: Number of rows processed
177
+ - `source_rows`: Total rows in source table
178
+ - `model`: Model used for embeddings
179
+ - `status`: complete/in_progress/failed
180
+
181
+ Embedding is resumable - if interrupted, it picks up from the last checkpoint.
182
+
183
+ ### Output Format
184
+
185
+ Embeddings are stored as Parquet files in `embeddings/`:
186
+ ```
187
+ embeddings/
188
+ ├── opinions.parquet # id (int), embedding (list<float32>)
189
+ ├── clusters.parquet
190
+ ├── parentheticals.parquet
191
+ └── dockets.parquet
192
+ ```
193
+
194
+ Each row contains:
195
+ - `id`: The source document ID
196
+ - `embedding`: 1024-dimensional float32 vector
197
+
198
+ ### Embedding CLI Options
199
+
200
+ ```
201
+ --tables TABLE1,TABLE2 Embed specific tables (comma-separated)
202
+ --dry-run Show what would embed without doing it
203
+ --force Re-embed even if already complete
204
+ --batch-size N Texts per API call (default: 32)
205
+ --model MODEL Override embedding model
206
+ --rate-limit N Max requests per minute (default: 30, 0=unlimited)
207
+ --min-interval N Minimum seconds between requests (default: 0)
208
+ --list List available tables and exit
209
+ ```
210
+
211
+ ### Rate Limits
212
+
213
+ The HF Inference API has rate limits. If you hit them:
214
+ 1. Reduce batch size: `--batch-size 16` or `--batch-size 8`
215
+ 2. The script automatically retries with exponential backoff
216
+ 3. Progress is saved periodically, so interruptions are safe
217
+
218
+ ### Use Cases
219
+
220
+ Once embeddings are generated, they enable:
221
+ - **Semantic search**: Find cases by meaning, not just keywords
222
+ - **Similar case discovery**: Find related precedents
223
+ - **Citation recommendation**: Suggest relevant citations
224
+ - **Clustering**: Group similar legal documents
225
+ - **Classification**: Categorize documents by topic
README.md CHANGED
@@ -1,50 +1,39 @@
1
  ---
2
- license: cc-by-4.0
3
  task_categories:
4
- - text-classification
5
  - question-answering
6
  language:
7
  - en
8
  tags:
9
  - legal
10
- - law
11
  - court-opinions
 
12
  - courtlistener
13
- - free-law-project
 
14
  size_categories:
15
- - 10M<n<100M
16
- configs:
17
- - config_name: courts
18
- data_files:
19
- - split: train
20
- path: data/courts/*.parquet
21
  ---
22
 
23
  # CourtListener Bulk Data
24
 
25
- This dataset contains bulk data from [CourtListener](https://www.courtlistener.com/),
26
- maintained by the [Free Law Project](https://free.law/).
27
 
28
- ## Dataset Description
29
 
30
- CourtListener is a free legal research website containing millions of legal opinions
31
- from federal and state courts. This dataset provides structured access to:
32
 
33
- - **Courts**: Metadata about courts in the US legal system
34
- - **Clusters**: Opinion clusters (cases) with metadata
35
- - **Dockets**: Case docket information
36
- - **Opinions**: Full text of judicial opinions
37
- - **Citations**: Citation relationships between opinions
38
- - **People**: Judge and attorney information
39
- - **Financial Disclosures**: Judicial financial disclosure data
40
-
41
- ## Statistics
42
-
43
- | Table | Rows | Files | Size (MB) |
44
- |-------|------|-------|-----------|
45
- | courts | 3,355 | 1 | 0.2 | ✅
46
-
47
- **Total**: 3,355 rows, 0.2 MB
48
 
49
  ## Usage
50
 
@@ -52,51 +41,57 @@ from federal and state courts. This dataset provides structured access to:
52
  from datasets import load_dataset
53
 
54
  # Load a specific table
55
- courts = load_dataset("drengskapur/courtlistener", "courts", split="train")
56
- clusters = load_dataset("drengskapur/courtlistener", "clusters", split="train")
57
-
58
- # Query via DuckDB (recommended for large tables)
59
- import duckdb
60
- conn = duckdb.connect()
61
- conn.execute("INSTALL httpfs; LOAD httpfs;")
62
- result = conn.execute("""
63
- SELECT * FROM 'hf://datasets/drengskapur/courtlistener/data/clusters/*.parquet'
64
- WHERE case_name ILIKE '%miranda%'
65
- LIMIT 10
66
- """).fetchdf()
67
  ```
68
 
69
- ## HuggingFace Datasets Server API
70
 
71
- Query directly via HTTP without downloading:
72
 
73
  ```bash
74
- # Get rows
75
- curl "https://datasets-server.huggingface.co/rows?dataset=drengskapur/courtlistener&config=courts&split=train&length=10"
76
 
77
- # Search
78
- curl "https://datasets-server.huggingface.co/search?dataset=drengskapur/courtlistener&config=clusters&split=train&query=miranda"
 
 
 
79
  ```
80
 
81
- ## Data Source
 
 
 
 
 
 
 
82
 
83
- Data is sourced from CourtListener's public S3 bulk data exports:
84
- `s3://com-courtlistener-storage/bulk-data/`
 
 
85
 
86
- Updated daily via automated sync.
 
 
87
 
88
  ## License
89
 
90
- This data is provided under the [CC-BY 4.0](https://creativecommons.org/licenses/by/4.0/) license.
91
- Original data is from CourtListener / Free Law Project.
92
 
93
- ## Citation
94
 
95
- ```bibtex
96
- @misc{courtlistener,
97
- title={CourtListener},
98
- author={Free Law Project},
99
- year={2024},
100
- url={https://www.courtlistener.com/}
101
- }
102
- ```
 
1
  ---
2
+ license: pddl
3
  task_categories:
4
+ - text-generation
5
  - question-answering
6
  language:
7
  - en
8
  tags:
9
  - legal
 
10
  - court-opinions
11
+ - case-law
12
  - courtlistener
13
+ - pacer
14
+ - recap
15
  size_categories:
16
+ - 1M<n<10M
 
 
 
 
 
17
  ---
18
 
19
  # CourtListener Bulk Data
20
 
21
+ Private mirror of [CourtListener](https://www.courtlistener.com/) bulk data, converted to Parquet format for efficient querying and ML workflows.
 
22
 
23
+ ## Data Sources
24
 
25
+ This dataset syncs from CourtListener's official bulk data exports:
 
26
 
27
+ | Table | Description | Source |
28
+ |-------|-------------|--------|
29
+ | `courts` | Court metadata (700+ courts) | Core reference table |
30
+ | `clusters` | Opinion clusters with case metadata | Case law groupings |
31
+ | `opinions` | Full opinion text | Largest table (~9M decisions) |
32
+ | `citations` | Citation graph between opinions | Network analysis |
33
+ | `parentheticals` | Court-written case summaries | Compact case descriptions |
34
+ | `people` | Judge information | 16,000+ judges |
35
+ | `financial_disclosures` | Judge financial disclosures | 1.7M investment records |
36
+ | `dockets` | RECAP docket metadata | Federal court filings |
 
 
 
 
 
37
 
38
  ## Usage
39
 
 
41
  from datasets import load_dataset
42
 
43
  # Load a specific table
44
+ courts = load_dataset("jonathanagustin/courtlistener-bulk", data_files="data/courts.parquet")
45
+ opinions = load_dataset("jonathanagustin/courtlistener-bulk", data_files="data/opinions/*.parquet")
46
+
47
+ # Stream large tables
48
+ from datasets import load_dataset
49
+ opinions = load_dataset(
50
+ "jonathanagustin/courtlistener-bulk",
51
+ data_files="data/opinions/*.parquet",
52
+ streaming=True
53
+ )
54
+ for opinion in opinions["train"]:
55
+ print(opinion["case_name"])
56
  ```
57
 
58
+ ## Sync Script
59
 
60
+ This dataset includes an idempotent sync script that can be run anywhere:
61
 
62
  ```bash
63
+ # Sync all tables
64
+ python sync.py
65
 
66
+ # Sync specific tables only
67
+ python sync.py --tables courts,citations
68
+
69
+ # Check what would be synced (dry run)
70
+ python sync.py --dry-run
71
  ```
72
 
73
+ The script:
74
+ - Downloads from CourtListener bulk data API
75
+ - Converts CSV to Parquet with optimal compression
76
+ - Uploads to HuggingFace with Xet deduplication
77
+ - Tracks sync state to resume interrupted runs
78
+ - Cleans up local files after upload to minimize disk usage
79
+
80
+ ## Storage
81
 
82
+ This repository uses [Xet storage](https://huggingface.co/docs/hub/en/xet) for:
83
+ - 10x faster uploads/downloads
84
+ - Chunk-level deduplication (efficient incremental updates)
85
+ - Support for files >1TB
86
 
87
+ ## Update Frequency
88
+
89
+ CourtListener updates their bulk data regularly. Run `sync.py` to pull latest changes.
90
 
91
  ## License
92
 
93
+ Public Domain Dedication and License (PDDL) - same as CourtListener source data.
 
94
 
95
+ ## Attribution
96
 
97
+ Data provided by [Free Law Project](https://free.law/) via [CourtListener](https://www.courtlistener.com/).
 
 
 
 
 
 
 
data/citation-map/shard_00000.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c7d347e7fb4a888d32fa52ae94dfb3b34e8cf4f14c2f1d2b0660e9587c9fa9da
3
+ size 4031405
data/citation-map/shard_00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd1f7aa8109c020bb9430b5d1ca6d3e650234050fce1c0bb0396018f4ae8664b
3
+ size 4049755
data/citation-map/shard_00002.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ce2d769eae89c75b53e9bcdfe57171e94cbb9523c8afac5e783981d206bae660
3
+ size 4003163
data/citation-map/shard_00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20bc53522040bd4945574953e20c5a8553bf6186c013f16def62fbde35f0f317
3
+ size 4039647
data/citation-map/shard_00004.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:153c33d7272ae91352e09df0404928358e3de104cbe6c4876ae4feabcad5b02c
3
+ size 4056931
data/citation-map/shard_00005.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d58271348aabe1895a0fe2af3b174d2b3fc7f4da9fefbdff0ab0635a6e9821f
3
+ size 4046229
data/citation-map/shard_00006.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90469c630b5e238190cd872fd2027fdd5ab5868d4a9ca8cc3bce18613f3b796f
3
+ size 3990822
data/citation-map/shard_00007.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff11c6c402fe2f6a0ea2214ee45723fcd6c7de85c22bb975d5231f4116f9ed5c
3
+ size 3955459
data/citation-map/shard_00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a769ce334b85e9a33499e3d7ab1a13fe9e87ffbdb9204df8b881a295bcc117cf
3
+ size 4020589
data/citation-map/shard_00009.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8b5020497ab004f71d8c1986196d1fc097408452ea292c48bfd42dafc6f0abac
3
+ size 4014322
data/citation-map/shard_00010.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:016273e56734fc457599b45aff868818e1aaddadd84f7e0d49e5e84fe02c9ac2
3
+ size 4023483
data/citation-map/shard_00011.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b8d46286054a0258bd8c30b57357312467a542ca4590dd9bdf0de3c3c8c81001
3
+ size 4006521
data/citation-map/shard_00012.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b22658fa0f21a045b9ffe45ac068140b0d0936943dd9e2db657815004a0f128
3
+ size 4048528
data/citation-map/shard_00013.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:efd96d023b58f550ffba635b427cd86f0e8684afb3fc49ffdcaf9ad8b4cef0cb
3
+ size 4011946
data/citation-map/shard_00014.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:168d9a1d43fd96645b113d69a8d36a5be0c80391891741f4f5a20cc897edddc6
3
+ size 3990585
data/citation-map/shard_00015.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edc4975e5d3c65d7088ed4f459fe13458b2ba9f68e02afdcee5aaedf0ad6ceeb
3
+ size 3956004
data/citation-map/shard_00016.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c3553176758593ce938e462b0107cfec6ef8e5e7628ef45a7ebe58bace156890
3
+ size 4019564
data/citation-map/shard_00017.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba32e44adb7d13d4dedfda8f3806dc276d26250532c28b34c9c188b789ff4a85
3
+ size 4006936
data/citation-map/shard_00018.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d87ebd034ea65d93717d5dec1de50d674ec0fc2e2bf0b111b244e96e85863484
3
+ size 4002442
data/citation-map/shard_00019.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1cce0458a41c4fc04d5136395380189b0da7956801d95449d32ccbc6894592f2
3
+ size 3954754
data/citation-map/shard_00020.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:78dd3c1b4e546e2cdb3a0aecf1958cf19dd0a2a6b5849652ee10f0e199cc03fb
3
+ size 3923510
data/citation-map/shard_00021.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7101661b52dc6987a083205cdb47f290e5bf9980d597d1d7dbe291aecfacc988
3
+ size 3883211
data/citation-map/shard_00022.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ba7b9fbbc5966818c45fc61e8b4c311e42624aa4185416cd2917024da9afe40d
3
+ size 3846976
data/citation-map/shard_00023.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:526521600841b720a5dda6185da0e9f870b7b5f4efe43ce0c329b4b46741167a
3
+ size 3736283
data/citation-map/shard_00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3e75af009db598f8d6fb63571b44fded3c7381cf1f2e46f0b20d2300f4a6cefa
3
+ size 3742760
data/citation-map/shard_00025.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fce57a7a7946b3a376a3458e3e1a7ce5240d3975f072c5f1879a4dd3c36625a
3
+ size 3825918
data/citation-map/shard_00026.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f03185ab848bacb4a3470f401601e4a357405d78d03034de69d1faa3abf1af6
3
+ size 3806457
data/citation-map/shard_00027.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a2b7ac6f5db73e1df0a2a3995cd4d8fbf4bd3de1a01618b2f79a10540a21f3fd
3
+ size 3818699
data/citation-map/shard_00028.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5a1d7cba4f8a2cd14b7025043be4bad421db1beadd39255b0980217a9b239cd
3
+ size 3729960
data/citation-map/shard_00029.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97a3e1b192ea45c2e22bb0a60000c4084424cb1da733601aa6eaa9c6dd9ff84f
3
+ size 4055647
data/citation-map/shard_00030.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:31b044421f4f52281efddc0f91834799b0a21831e39017622af76c595080c16a
3
+ size 3666082
data/citation-map/shard_00031.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ebc2350fef433e5333608949d97f78e600a3077955319d848056fdc8826a99cf
3
+ size 4179088
data/citation-map/shard_00032.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22af90507e2c40656ace3c05ef74d14cf219c6dc153d9b32f68a22193fc00a0e
3
+ size 3875198
data/citation-map/shard_00033.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7692282c0e6808c130ee4e25ef8d4267451dd9ac60ba66e198fccb82b0517922
3
+ size 3851605
data/citation-map/shard_00034.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:424f29c25b3e7ae510ca18a5c1658691cfba6a23df3f7238d0ab695a44d0175a
3
+ size 3851584
data/citation-map/shard_00035.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:447d348231d865982230f1e2227ff031c62064cf77724ac9c33f9c977deed459
3
+ size 3815527
data/citation-map/shard_00036.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cdf4134d1cf61289ea2636b9d22e01cb61e1d340ae0b0536f1a942f94d76fec9
3
+ size 3811038
data/citation-map/shard_00037.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b3497158d2afae75573941470ae5dc6315c8ec7f398a56fa2a07b08b7dd54b46
3
+ size 3872711
data/citation-map/shard_00038.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d35c19f02db16e5f4ec52d7d9aff173d8deff1caf7d0e90a25fbb3aab21c3347
3
+ size 3874715
data/citation-map/shard_00039.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f0c37960bddd6b0f64da08a235b1e4aeba1afb1826f87edf6527bf0f1870070
3
+ size 3961661
data/citation-map/shard_00040.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:da705471e7f6d8852fc46056bddd2ef3b2a72669d7f31913b92e5461d16723eb
3
+ size 3992810
data/citation-map/shard_00041.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c593139a3920683708f453e0a9b97594af18805dd2c9b58a43a67e3fb7cc6ca5
3
+ size 3969148
data/citation-map/shard_00042.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:30556304cc5f72991e4aae5f9f81d9abd9fa557b0a74cee48ea0633975a7dcba
3
+ size 3917610
data/citation-map/shard_00043.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfdf7cd639e113d61ee8013a41ebd9335c528bf1da2efebe322e2cea2d184417
3
+ size 3864201
data/citation-map/shard_00044.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d08b6683394990bb13dc74bcaea50da166be455573226bc289a3d42e0a26da7d
3
+ size 3722499
data/citation-map/shard_00045.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d27e22eb06d29c9c31c2e5df7f8f90547bf8bd59e568281e659871d547137a0e
3
+ size 3873227