-
Notifications
You must be signed in to change notification settings - Fork 17
/
example_config.py
81 lines (50 loc) · 2.02 KB
/
example_config.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
# ─── Models Accessed Through API ──────────────────────────────────────────────
# Note: If you want to use the models below, you need to apply for an API key for each model.
# Baichuan 53B
# Visit: https://platform.baichuan-ai.com/docs/api
Baichuan2_53B_url = ''
Baichuan2_53B_api_key = ''
Baichuan2_53B_secret_key = ''
# GPT series
# https://platform.openai.com/docs/guides/text-generation
GPT_api_key = ''
GPT_api_base = '' # Leave this blank if you're not using a custom base URL
# ─── Models Accessed Through Remote Deployment ────────────────────────────────
# Note: Remote models are only for internal use.
Aquila_url = ''
Aquila_token = ''
Baichuan2_13B_url = ''
Baichuan2_13B_token = ''
ChatGLM2_url = ''
ChatGLM2_token = ''
GPT_transit_url = ''
GPT_transit_token = ''
InternLM_url = ''
InternLM_token = ''
Qwen_url = ''
Qwen_token = ''
Xinyu_7B_url = ''
Xinyu_7B_token = ''
Xinyu_70B_url = ''
Xinyu_70B_token = ''
# ─── Models Accessed Through Local Deployment ────────────────────────────────
# Note: Use vLLM to deploy models.
# Link: https://docs.vllm.ai/en/latest/getting_started/quickstart.html
BloomZ_3B_vllm_url = ''
Gemma_2B_Chat_vllm_url = ''
InternLM2_1_8B_Chat_vllm_url = ''
LLaMA2_13B_Chat_vllm_url = ''
LLaMA2_70B_Chat_vllm_url = ''
LLaMA2_7B_Chat_vllm_url = ''
NewModel_vllm_url = ''
OPT_vllm_url = ''
PHI2_vllm_url = ''
Qwen1_5_4B_Chat_vllm_url = ''
# ─── Models Accessed Through Local Path ────────────────────────────────
# Note: Ensure model's files are in Huggingface format.
Aquila_local_path = ''
Baichuan2_13b_local_path = ''
ChatGLM3_local_path = ''
InternLM_local_path = ''
MiniCPM_local_path = ''
Qwen_local_path = ''