File size: 1,892 Bytes
d74eab0
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import glob, pandas, sys, os, gzip, json, time, lzma

try: fast_mode = (sys.argv[2] == "fast")
except: fast_mode = False

offset = 10
index = int(sys.argv[1]) * offset

dumps = glob.glob("CC-MAIN-*")
for dump in dumps:
	all_buckets = glob.glob(f"{dump}/CC-MAIN-*")
	buckets = all_buckets[index : index + offset]
	print("\n\n- - - - -\n", dump, "has", len(all_buckets), flush=True)
	print(fast_mode, index, offset, buckets, flush=True) #DEBUG
	for bucket in buckets:
		files = glob.glob(f"{bucket}/*.parquet")
		bucket = bucket.replace(f"{dump}/", f"{dump}_")
		bucket = bucket.replace("CC-MAIN-", "")
		output_file = f"jsonl/{bucket}.jsonl.lzma"
		print(output_file, flush=True)

		added_file_ids = set()
		if os.path.exists(output_file):
			ti_mcur = os.path.getmtime(output_file)
			ti_mmax = 0
			remains = []
			for file in files:
				ti_m = os.path.getmtime(file)
				if ti_m > ti_mmax: ti_mmax = ti_m
				if fast_mode and ti_m > ti_mcur:
					remains.append(file)

			if ti_mcur > ti_mmax: continue

			if fast_mode:
				files = remains
			else:
				skip_token = '"id": "000000000."'
				with lzma.open(output_file, "rt") as fin:
					for count, line in enumerate(fin):
						if skip_token not in line:
							file_id = line.split('"id": "')[1].split(".")[0] # "id": "{file_id}.{line_count}"
							skip_token = f'"id": "{file_id}.'
							assert file_id not in added_file_ids
							added_file_ids.add(file_id)

		with lzma.open(output_file, "at") as fout:
			for file in files:
				file_id = file.split("/")[-1].replace(".parquet", "")
				if file_id in added_file_ids: continue
				print(f"Adding {file_id} to {output_file}...", flush=True)
				df = pandas.read_parquet(file)
				for line_count, row in df.iterrows():
					idd = f"{file_id}.{line_count}"
					ss = json.dumps({"text": row[0], "id": idd}, ensure_ascii=False)
					fout.write(ss + "\n")

print("fast_mode", fast_mode)