File size: 1,523 Bytes
0b9f9a6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import pandas as pd
import time
import os
from buster.documents_manager import DeepLakeDocumentsManager
from deeplake.core.vectorstore import VectorStore

DEEPLAKE_DATASET = os.getenv("DEEPLAKE_DATASET", "dev_vector_store")
DEEPLAKE_ORG = os.getenv("DEEPLAKE_ORG", "towards_ai")

df1 = pd.read_csv("./data/llm_course.csv")
df2 = pd.read_csv("./data/hf_transformers.csv")
df3 = pd.read_csv("./data/langchain_course.csv")
df4 = pd.read_csv("./data/filtered_tai_v2.csv")
df5 = pd.read_csv("./data/wiki.csv")  # , encoding="ISO-8859-1")


dataset_path = f"hub://{DEEPLAKE_ORG}/{DEEPLAKE_DATASET}"

dm = DeepLakeDocumentsManager(
    vector_store_path=dataset_path,
    overwrite=True,
    required_columns=["url", "content", "source", "title"],
)

dm.batch_add(
    df=df1,
    batch_size=3000,
    min_time_interval=60,
    num_workers=32,
    csv_filename="embeddings.csv",
    csv_overwrite=False,
)

dm.batch_add(
    df=df2,
    batch_size=3000,
    min_time_interval=60,
    num_workers=32,
    csv_filename="embeddings.csv",
    csv_overwrite=False,
)

dm.batch_add(
    df=df3,
    batch_size=3000,
    min_time_interval=60,
    num_workers=32,
    csv_filename="embeddings.csv",
    csv_overwrite=False,
)

dm.batch_add(
    df=df4,
    batch_size=3000,
    min_time_interval=60,
    num_workers=32,
    csv_filename="embeddings.csv",
    csv_overwrite=False,
)

dm.batch_add(
    df=df5,
    batch_size=3000,
    min_time_interval=60,
    num_workers=32,
    csv_filename="embeddings.csv",
    csv_overwrite=False,
)