翻译是语言模型的强项之一,我们可以用 Ollama 构建一个实用的翻译工具。
import ollama
class Translator:
def __init__(self, model='llama3.2'):
self.model = model
def translate(self, text, target_lang='英文'):
response = ollama.chat(
model=self.model,
messages=[
{
'role': 'system',
'content': f'你是一个专业翻译,将用户输入翻译成{target_lang}。只输出翻译结果,不要解释。'
},
{'role': 'user', 'content': text}
]
)
return response['message']['content']
def translate_with_style(self, text, target_lang, style='正式'):
response = ollama.chat(
model=self.model,
messages=[
{
'role': 'system',
'content': f'你是一个专业翻译,将用户输入翻译成{target_lang},使用{style}风格。只输出翻译结果。'
},
{'role': 'user', 'content': text}
]
)
return response['message']['content']
# 使用
translator = Translator()
result = translator.translate('你好,世界!', '英文')
print(result)
result = translator.translate_with_style('你好,世界!', '英文', '口语化')
print(result)
import ollama
class MultiTranslator:
LANGUAGES = {
'zh': '中文',
'en': '英文',
'ja': '日文',
'ko': '韩文',
'fr': '法文',
'de': '德文',
'es': '西班牙文',
'ru': '俄文'
}
def __init__(self, model='llama3.2'):
self.model = model
def detect_language(self, text):
response = ollama.chat(
model=self.model,
messages=[
{
'role': 'system',
'content': '识别文本的语言,只输出语言代码(如 zh、en、ja)。'
},
{'role': 'user', 'content': text}
]
)
return response['message']['content'].strip().lower()
def translate(self, text, target_lang):
target_name = self.LANGUAGES.get(target_lang, target_lang)
response = ollama.chat(
model=self.model,
messages=[
{
'role': 'system',
'content': f'你是专业翻译,将文本翻译成{target_name}。只输出翻译结果。'
},
{'role': 'user', 'content': text}
]
)
return response['message']['content']
def auto_translate(self, text, target_lang):
source_lang = self.detect_language(text)
if source_lang == target_lang:
return text
return self.translate(text, target_lang)
def batch_translate(self, texts, target_lang):
results = []
for text in texts:
results.append(self.translate(text, target_lang))
return results
# 使用
translator = MultiTranslator()
print(translator.translate('Hello, world!', 'zh'))
print(translator.translate('你好,世界!', 'en'))
print(translator.auto_translate('Bonjour', 'zh'))
import ollama
import argparse
import sys
def translate_text(text, target_lang, model='llama3.2'):
response = ollama.chat(
model=model,
messages=[
{
'role': 'system',
'content': f'你是专业翻译,将文本翻译成{target_lang}。只输出翻译结果。'
},
{'role': 'user', 'content': text}
]
)
return response['message']['content']
def interactive_mode(model, target_lang):
print(f"翻译工具(目标语言: {target_lang})")
print("输入文本进行翻译,输入 /exit 退出\n")
while True:
try:
text = input("> ").strip()
except KeyboardInterrupt:
print("\n再见!")
break
if text == '/exit':
break
if text:
result = translate_text(text, target_lang, model)
print(f"\n{result}\n")
def main():
parser = argparse.ArgumentParser(description='翻译工具')
parser.add_argument('text', nargs='?', help='要翻译的文本')
parser.add_argument('-t', '--target', default='英文', help='目标语言')
parser.add_argument('-m', '--model', default='llama3.2', help='模型名称')
parser.add_argument('-i', '--interactive', action='store_true', help='交互模式')
args = parser.parse_args()
if args.interactive:
interactive_mode(args.model, args.target)
elif args.text:
result = translate_text(args.text, args.target, args.model)
print(result)
else:
parser.print_help()
if __name__ == '__main__':
main()
from fastapi import FastAPI
from pydantic import BaseModel
import ollama
app = FastAPI(title="翻译 API")
class TranslateRequest(BaseModel):
text: str
target_lang: str = '英文'
source_lang: str = None
style: str = None
class TranslateResponse(BaseModel):
translation: str
source_lang: str = None
@app.post("/translate", response_model=TranslateResponse)
async def translate(request: TranslateRequest):
system_content = f'你是专业翻译,将文本翻译成{request.target_lang}。只输出翻译结果。'
if request.style:
system_content += f'使用{request.style}风格。'
response = ollama.chat(
model='llama3.2',
messages=[
{'role': 'system', 'content': system_content},
{'role': 'user', 'content': request.text}
]
)
return TranslateResponse(
translation=response['message']['content'],
source_lang=request.source_lang
)
@app.post("/detect")
async def detect_language(text: str):
response = ollama.chat(
model='llama3.2',
messages=[
{
'role': 'system',
'content': '识别文本语言,输出语言名称。'
},
{'role': 'user', 'content': text}
]
)
return {'language': response['message']['content']}
if __name__ == '__main__':
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)
import ollama
import re
class DocumentTranslator:
def __init__(self, model='llama3.2'):
self.model = model
def translate_paragraph(self, text, target_lang):
response = ollama.chat(
model=self.model,
messages=[
{
'role': 'system',
'content': f'你是专业翻译,将文本翻译成{target_lang}。保持原文格式,只输出翻译结果。'
},
{'role': 'user', 'content': text}
]
)
return response['message']['content']
def translate_document(self, text, target_lang, chunk_size=1000):
paragraphs = text.split('\n\n')
translated = []
for para in paragraphs:
if not para.strip():
translated.append('')
continue
if len(para) > chunk_size:
chunks = [para[i:i+chunk_size] for i in range(0, len(para), chunk_size)]
chunk_translated = [
self.translate_paragraph(chunk, target_lang)
for chunk in chunks
]
translated.append(' '.join(chunk_translated))
else:
translated.append(self.translate_paragraph(para, target_lang))
return '\n\n'.join(translated)
def translate_file(self, input_file, output_file, target_lang):
with open(input_file, 'r', encoding='utf-8') as f:
text = f.read()
translated = self.translate_document(text, target_lang)
with open(output_file, 'w', encoding='utf-8') as f:
f.write(translated)
print(f"翻译完成: {output_file}")
# 使用
translator = DocumentTranslator()
translator.translate_file('input.txt', 'output_en.txt', '英文')