davanstrien HF Staff commited on
Commit
a8d2ecd
·
1 Parent(s): 7d840d3

fix: Use sink_parquet for true streaming (minimal memory)

Browse files
Files changed (1) hide show
  1. long-context-pdfs.py +15 -41
long-context-pdfs.py CHANGED
@@ -29,31 +29,11 @@ Examples:
29
  """
30
 
31
  import argparse
32
- import polars as pl
33
- from datasets import Features, IterableDataset, Value
34
-
35
- FEATURES = Features(
36
- {
37
- "id": Value("string"),
38
- "url": Value("string"),
39
- "text": Value("string"),
40
- "language": Value("string"),
41
- "token_count": Value("int64"),
42
- "dump": Value("string"),
43
- "page_average_lid_score": Value("float64"),
44
- }
45
- )
46
-
47
 
48
- def polars_to_generator(lf: pl.LazyFrame, chunk_size: int = 100):
49
- """Stream LazyFrame as row generator with controlled memory.
50
-
51
- Args:
52
- lf: LazyFrame to stream
53
- chunk_size: Rows per batch (lower = less memory). Default 100 for large text docs.
54
- """
55
- for batch_df in lf.collect_batches(chunk_size=chunk_size):
56
- yield from batch_df.iter_rows(named=True)
57
 
58
 
59
  def main():
@@ -81,18 +61,9 @@ def main():
81
  parser.add_argument("--limit", type=int, help="Limit rows")
82
  parser.add_argument("--output", type=str, help="Output dataset repo")
83
  parser.add_argument("--private", action="store_true")
84
- parser.add_argument(
85
- "--chunk-size",
86
- type=int,
87
- default=100,
88
- help="Rows per batch (lower = less memory, default: 100)",
89
- )
90
 
91
  args = parser.parse_args()
92
 
93
- # Reduce streaming chunk size for large text documents (default can cause OOM)
94
- pl.Config.set_streaming_chunk_size(1000)
95
-
96
  source = f"hf://datasets/HuggingFaceFW/finepdfs/data/{args.lang}/train/*.parquet"
97
 
98
  print("=" * 60)
@@ -145,7 +116,7 @@ def main():
145
  print("\nNo --output specified. Use --output to push to Hub.")
146
  return
147
 
148
- # Rebuild query for streaming
149
  lf = (
150
  pl.scan_parquet(source)
151
  .filter(
@@ -168,14 +139,17 @@ def main():
168
  if args.limit:
169
  lf = lf.limit(args.limit)
170
 
171
- print(f"\nStreaming to dataset (chunk_size={args.chunk_size})...")
172
- ds = IterableDataset.from_generator(
173
- lambda: polars_to_generator(lf, chunk_size=args.chunk_size),
174
- features=FEATURES,
175
- )
 
 
 
 
 
176
 
177
- print(f"\nPushing to {args.output} (streaming)...")
178
- ds.push_to_hub(args.output, private=args.private)
179
  print(f"\nDone! https://huggingface.co/datasets/{args.output}")
180
 
181
 
 
29
  """
30
 
31
  import argparse
32
+ import tempfile
33
+ from pathlib import Path
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
+ import polars as pl
36
+ from datasets import Dataset
 
 
 
 
 
 
 
37
 
38
 
39
  def main():
 
61
  parser.add_argument("--limit", type=int, help="Limit rows")
62
  parser.add_argument("--output", type=str, help="Output dataset repo")
63
  parser.add_argument("--private", action="store_true")
 
 
 
 
 
 
64
 
65
  args = parser.parse_args()
66
 
 
 
 
67
  source = f"hf://datasets/HuggingFaceFW/finepdfs/data/{args.lang}/train/*.parquet"
68
 
69
  print("=" * 60)
 
116
  print("\nNo --output specified. Use --output to push to Hub.")
117
  return
118
 
119
+ # Rebuild query for streaming to parquet
120
  lf = (
121
  pl.scan_parquet(source)
122
  .filter(
 
139
  if args.limit:
140
  lf = lf.limit(args.limit)
141
 
142
+ # Use sink_parquet for true streaming (minimal memory)
143
+ with tempfile.TemporaryDirectory() as tmpdir:
144
+ output_path = Path(tmpdir) / "data.parquet"
145
+ print("\nStreaming to parquet (sink_parquet)...")
146
+ lf.sink_parquet(output_path)
147
+
148
+ print(f"\nLoading parquet and pushing to {args.output}...")
149
+ ds = Dataset.from_parquet(str(output_path))
150
+ print(f"Dataset: {len(ds)} rows")
151
+ ds.push_to_hub(args.output, private=args.private)
152
 
 
 
153
  print(f"\nDone! https://huggingface.co/datasets/{args.output}")
154
 
155