一、Openai的接口调用
pip包下载
pip install openai
配置sk,url
OPENAI_API_KEY = sk-xxxxx
OPENAI_BASE_URL = https://api.openai.com/v1
接口调用
import os from flask import Flask, jsonify from openai import OpenAI
config = configparser.ConfigParser()
config.read("config.cfg", encoding="utf-8")
OPENAI_API_KEY = config.get("default", "OPENAI_API_KEY", fallback=None)
OPENAI_BASE_URL = config.get("default", "OPENAI_BASE_URL", fallback=None)
@app.route("/gpt_test")
def gpt_test():"""简单调用一次 GPT,返回一个固定问题的回答"""if not OPENAI_API_KEY:return jsonify({"error": "OPENAI_API_KEY 未配置"}), 500try:# 这里用的是 chat.completions.create 风格resp = client.chat.completions.create(model="gpt-4.1-mini", # 或者你有的任意模型,比如 gpt-4.1, gpt-4o 等messages=[{"role": "system", "content": "你是一个简洁回答的助手。"},{"role": "user", "content": "简单用一句话介绍一下你自己。"},],)answer = resp.choices[0].message.contentreturn jsonify({"answer": answer})except Exception as e:print("GPT 调用异常:", repr(e))return jsonify({"error": str(e)}), 500
二、阿里通义
安装官方sdk
pip install dashscope
使用dashscope.Generation.call基本可以复用
ALIYUN_API_KEY = config.get("default", "ALIYUN_API_KEY", fallback=None)
@app.route("/llm_test/")
def llm_test():
"""测试与大模型的对话功能"""
try:
messages = [
{'role': 'system', 'content': 'You are a helpful assistant.'},
{'role': 'user', 'content': '你是谁?'}
]
answer = chat_with_model(messages)
return jsonify({"answer": answer})
except Exception as e:
print("LLM error:", repr(e))
return jsonify({"error": str(e)}), 500
这里有几类模型id都可以使用:
- qwen3-max
- qwen-plus
- qwen-turbo
参考:阿里云百炼
如果需要使用到prompt,比如我们有路径app/prompt_store/底下的prompt文件:doc-llm-latest.md
首先按照字符串处理的思路,先读取出来:
from pathlib import Path# run.py 所在目录
BASE_DIR = Path(__file__).resolve().parent
PROMPT_DIR = BASE_DIR / "app" / "prompt_store"
PROMPT_LATEST_FILE = PROMPT_DIR / "doc-llm-latest.md"def load_latest_prompt() -> str | None:"""读取 doc-llm-latest.md 的内容"""try:with PROMPT_LATEST_FILE.open("r", encoding="utf-8") as f:return f.read()except FileNotFoundError:print(f"[WARN] Prompt file not found: {PROMPT_LATEST_FILE}")return Noneexcept Exception as e:print(f"[ERROR] Failed to read prompt: {e!r}")return None
然后message格式补充
@app.route("/llm_with_prompt/")def llm_with_prompt():"""使用最新的Prompt与大模型对话"""prompt = load_latest_prompt()if not prompt:return jsonify({"error": "No prompt available"}), 500try:messages = [{'role': 'system', 'content': prompt},{'role': 'user', 'content': "请用一两句话,概括一下这个文档测试规范的核心目标。"}]answer = chat_with_model(messages)return jsonify({"answer": answer})except Exception as e:print("LLM with prompt error:", repr(e))return jsonify({"error": str(e)}), 500