import pandas as pd import time import os from buster.documents_manager import DeepLakeDocumentsManager from deeplake.core.vectorstore import VectorStore DEEPLAKE_DATASET = os.getenv("DEEPLAKE_DATASET", "dev_vector_store") DEEPLAKE_ORG = os.getenv("DEEPLAKE_ORG", "towards_ai") df1 = pd.read_csv("./data/llm_course.csv") df2 = pd.read_csv("./data/hf_transformers.csv") df3 = pd.read_csv("./data/langchain_course.csv") df4 = pd.read_csv("./data/filtered_tai_v2.csv") df5 = pd.read_csv("./data/wiki.csv") # , encoding="ISO-8859-1") dataset_path = f"hub://{DEEPLAKE_ORG}/{DEEPLAKE_DATASET}" dm = DeepLakeDocumentsManager( vector_store_path=dataset_path, overwrite=True, required_columns=["url", "content", "source", "title"], ) dm.batch_add( df=df1, batch_size=3000, min_time_interval=60, num_workers=32, csv_filename="embeddings.csv", csv_overwrite=False, ) dm.batch_add( df=df2, batch_size=3000, min_time_interval=60, num_workers=32, csv_filename="embeddings.csv", csv_overwrite=False, ) dm.batch_add( df=df3, batch_size=3000, min_time_interval=60, num_workers=32, csv_filename="embeddings.csv", csv_overwrite=False, ) dm.batch_add( df=df4, batch_size=3000, min_time_interval=60, num_workers=32, csv_filename="embeddings.csv", csv_overwrite=False, ) dm.batch_add( df=df5, batch_size=3000, min_time_interval=60, num_workers=32, csv_filename="embeddings.csv", csv_overwrite=False, )