my-phy-dataset / translate.py
StarThomas1002's picture
Upload folder using huggingface_hub
8c23886 verified
raw
history blame contribute delete
No virus
2.29 kB
import pandas as pd
import openai
from utils.openai import load_client
client = load_client('<path-to-your-key-file>')
# 初始化OpenAI API密钥
openai.api_key = 'dE6qTITLy3WCoMPMCPr8tUFBHBaec5wN'
# 指定parquet文件的路径
file_path = '/home/yiyangai/stephenqs/datasets/physics_big/data/combined_images_non_empty.parquet'
# 读取parquet文件
df = pd.read_parquet(file_path)
# 定义翻译函数,调用GPT-4 API
def translate_text(text):
if isinstance(text, list): # 如果是列表,递归处理每个元素
return [translate_text(item) for item in text]
elif isinstance(text, dict): # 如果是字典,递归处理字典中的值
return {key: translate_text(value) for key, value in text.items()}
elif isinstance(text, str): # 如果是字符串,翻译
response = openai.ChatCompletion.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"Translate this Russian text to English: {text}"}
]
)
return response.choices[0].message['content'].strip()
else:
return text
# 初始化一个空的DataFrame来存储翻译后的结果
df_translated = pd.DataFrame(columns=df.columns)
# 每次翻译20条记录
batch_size = 20
# 逐批次翻译数据
for i in range(0, len(df), batch_size):
print(f"正在翻译样本 {i+1}{min(i+batch_size, len(df))}...")
batch = df.iloc[i:i+batch_size].copy()
# 对批次内的每一列进行翻译
for column in batch.columns:
batch[column] = batch[column].apply(translate_text)
# 将翻译后的批次数据添加到翻译结果的DataFrame中
df_translated = pd.concat([df_translated, batch], ignore_index=True)
# 保存中间结果(可选,以防脚本中途失败)
df_translated.to_parquet('/home/yiyangai/stephenqs/datasets/physics_big/data/partial_translation.parquet')
# 最终保存完整的翻译结果到新的Parquet文件
output_file_path = '/home/yiyangai/stephenqs/datasets/physics_big/data/translated_combined_images_non_empty.parquet'
df_translated.to_parquet(output_file_path)
print(f"翻译完成,已将结果保存到 {output_file_path}")