mirror of
https://github.com/langgenius/dify.git
synced 2026-03-23 15:27:53 +08:00
Signed-off-by: majiayu000 <1835304752@qq.com> Signed-off-by: dependabot[bot] <support@github.com> Signed-off-by: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Signed-off-by: -LAN- <laipz8200@outlook.com> Signed-off-by: yihong0618 <zouzou0208@gmail.com> Co-authored-by: QuantumGhost <obelisk.reg+git@gmail.com> Co-authored-by: 盐粒 Yanli <yanli@dify.ai> Co-authored-by: wangxiaolei <fatelei@gmail.com> Co-authored-by: Stephen Zhou <38493346+hyoban@users.noreply.github.com> Co-authored-by: gemini-code-assist[bot] <176961590+gemini-code-assist[bot]@users.noreply.github.com> Co-authored-by: Cursx <33718736+Cursx@users.noreply.github.com> Co-authored-by: autofix-ci[bot] <114827586+autofix-ci[bot]@users.noreply.github.com> Co-authored-by: lif <1835304752@qq.com> Co-authored-by: 非法操作 <hjlarry@163.com> Co-authored-by: Asuka Minato <i@asukaminato.eu.org> Co-authored-by: fenglin <790872612@qq.com> Co-authored-by: qiaofenglin <qiaofenglin@baidu.com> Co-authored-by: -LAN- <laipz8200@outlook.com> Co-authored-by: TomoOkuyama <49631611+TomoOkuyama@users.noreply.github.com> Co-authored-by: Tomo Okuyama <tomo.okuyama@intersystems.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: zyssyz123 <916125788@qq.com> Co-authored-by: hj24 <mambahj24@gmail.com> Co-authored-by: Coding On Star <447357187@qq.com> Co-authored-by: CodingOnStar <hanxujiang@dify.ai> Co-authored-by: yyh <92089059+lyzno1@users.noreply.github.com> Co-authored-by: Xiangxuan Qu <fghpdf@outlook.com> Co-authored-by: fghpdf <fghpdf@users.noreply.github.com> Co-authored-by: coopercoder <whitetiger0127@163.com> Co-authored-by: zhaiguangpeng <zhaiguangpeng@didiglobal.com> Co-authored-by: Junyan Qin (Chin) <rockchinq@gmail.com> Co-authored-by: E.G <146701565+GlobalStar117@users.noreply.github.com> Co-authored-by: GlobalStar117 <GlobalStar117@users.noreply.github.com> Co-authored-by: Claude Haiku 4.5 <noreply@anthropic.com> Co-authored-by: CodingOnStar <hanxujiang@dify.com> Co-authored-by: crazywoola <100913391+crazywoola@users.noreply.github.com> Co-authored-by: heyszt <270985384@qq.com> Co-authored-by: NeatGuyCoding <15627489+NeatGuyCoding@users.noreply.github.com> Co-authored-by: Yeuoly <45712896+Yeuoly@users.noreply.github.com> Co-authored-by: zxhlyh <jasonapring2015@outlook.com> Co-authored-by: moonpanda <chuanzegao@163.com> Co-authored-by: warlocgao <warlocgao@tencent.com> Co-authored-by: github-actions[bot] <41898282+github-actions[bot]@users.noreply.github.com> Co-authored-by: claude[bot] <41898282+claude[bot]@users.noreply.github.com> Co-authored-by: KVOJJJin <jzongcode@gmail.com> Co-authored-by: eux <euxx@users.noreply.github.com> Co-authored-by: bangjiehan <bangjiehan@gmail.com> Co-authored-by: FFXN <31929997+FFXN@users.noreply.github.com> Co-authored-by: Jyong <76649700+JohnJyong@users.noreply.github.com> Co-authored-by: Nie Ronghua <nieronghua@sf-express.com> Co-authored-by: JQSevenMiao <141806521+JQSevenMiao@users.noreply.github.com> Co-authored-by: jiasiqi <jiasiqi3@tal.com> Co-authored-by: Seokrin Taron Sung <sungsjade@gmail.com> Co-authored-by: CrabSAMA <40541269+CrabSAMA@users.noreply.github.com> Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: yihong <zouzou0208@gmail.com> Co-authored-by: Joel <iamjoel007@gmail.com> Co-authored-by: Wu Tianwei <30284043+WTW0313@users.noreply.github.com> Co-authored-by: yessenia <yessenia.contact@gmail.com> Co-authored-by: Jax <anobaka@qq.com> Co-authored-by: niveshdandyan <155956228+niveshdandyan@users.noreply.github.com> Co-authored-by: OSS Contributor <oss-contributor@example.com> Co-authored-by: niveshdandyan <niveshdandyan@users.noreply.github.com> Co-authored-by: Sean Kenneth Doherty <Smaster7772@gmail.com>
131 lines
4.5 KiB
Python
131 lines
4.5 KiB
Python
from core.model_runtime.entities.model_entities import DefaultParameterName
|
||
|
||
PARAMETER_RULE_TEMPLATE: dict[DefaultParameterName, dict] = {
|
||
DefaultParameterName.TEMPERATURE: {
|
||
"label": {
|
||
"en_US": "Temperature",
|
||
"zh_Hans": "温度",
|
||
},
|
||
"type": "float",
|
||
"help": {
|
||
"en_US": "Controls randomness. Lower temperature results in less random completions."
|
||
" As the temperature approaches zero, the model will become deterministic and repetitive."
|
||
" Higher temperature results in more random completions.",
|
||
"zh_Hans": "温度控制随机性。较低的温度会导致较少的随机完成。随着温度接近零,模型将变得确定性和重复性。"
|
||
"较高的温度会导致更多的随机完成。",
|
||
},
|
||
"required": False,
|
||
"default": 0.0,
|
||
"min": 0.0,
|
||
"max": 1.0,
|
||
"precision": 2,
|
||
},
|
||
DefaultParameterName.TOP_P: {
|
||
"label": {
|
||
"en_US": "Top P",
|
||
"zh_Hans": "Top P",
|
||
},
|
||
"type": "float",
|
||
"help": {
|
||
"en_US": "Controls diversity via nucleus sampling: 0.5 means half of all likelihood-weighted options"
|
||
" are considered.",
|
||
"zh_Hans": "通过核心采样控制多样性:0.5 表示考虑了一半的所有可能性加权选项。",
|
||
},
|
||
"required": False,
|
||
"default": 1.0,
|
||
"min": 0.0,
|
||
"max": 1.0,
|
||
"precision": 2,
|
||
},
|
||
DefaultParameterName.TOP_K: {
|
||
"label": {
|
||
"en_US": "Top K",
|
||
"zh_Hans": "Top K",
|
||
},
|
||
"type": "int",
|
||
"help": {
|
||
"en_US": "Limits the number of tokens to consider for each step by keeping only the k most likely tokens.",
|
||
"zh_Hans": "通过只保留每一步中最可能的 k 个标记来限制要考虑的标记数量。",
|
||
},
|
||
"required": False,
|
||
"default": 50,
|
||
"min": 1,
|
||
"max": 100,
|
||
"precision": 0,
|
||
},
|
||
DefaultParameterName.PRESENCE_PENALTY: {
|
||
"label": {
|
||
"en_US": "Presence Penalty",
|
||
"zh_Hans": "存在惩罚",
|
||
},
|
||
"type": "float",
|
||
"help": {
|
||
"en_US": "Applies a penalty to the log-probability of tokens already in the text.",
|
||
"zh_Hans": "对文本中已有的标记的对数概率施加惩罚。",
|
||
},
|
||
"required": False,
|
||
"default": 0.0,
|
||
"min": 0.0,
|
||
"max": 1.0,
|
||
"precision": 2,
|
||
},
|
||
DefaultParameterName.FREQUENCY_PENALTY: {
|
||
"label": {
|
||
"en_US": "Frequency Penalty",
|
||
"zh_Hans": "频率惩罚",
|
||
},
|
||
"type": "float",
|
||
"help": {
|
||
"en_US": "Applies a penalty to the log-probability of tokens that appear in the text.",
|
||
"zh_Hans": "对文本中出现的标记的对数概率施加惩罚。",
|
||
},
|
||
"required": False,
|
||
"default": 0.0,
|
||
"min": 0.0,
|
||
"max": 1.0,
|
||
"precision": 2,
|
||
},
|
||
DefaultParameterName.MAX_TOKENS: {
|
||
"label": {
|
||
"en_US": "Max Tokens",
|
||
"zh_Hans": "最大 Token 数",
|
||
},
|
||
"type": "int",
|
||
"help": {
|
||
"en_US": "Specifies the upper limit on the length of generated results."
|
||
" If the generated results are truncated, you can increase this parameter.",
|
||
"zh_Hans": "指定生成结果长度的上限。如果生成结果截断,可以调大该参数。",
|
||
},
|
||
"required": False,
|
||
"default": 64,
|
||
"min": 1,
|
||
"max": 2048,
|
||
"precision": 0,
|
||
},
|
||
DefaultParameterName.RESPONSE_FORMAT: {
|
||
"label": {
|
||
"en_US": "Response Format",
|
||
"zh_Hans": "回复格式",
|
||
},
|
||
"type": "string",
|
||
"help": {
|
||
"en_US": "Set a response format, ensure the output from llm is a valid code block as possible,"
|
||
" such as JSON, XML, etc.",
|
||
"zh_Hans": "设置一个返回格式,确保 llm 的输出尽可能是有效的代码块,如 JSON、XML 等",
|
||
},
|
||
"required": False,
|
||
"options": ["JSON", "XML"],
|
||
},
|
||
DefaultParameterName.JSON_SCHEMA: {
|
||
"label": {
|
||
"en_US": "JSON Schema",
|
||
},
|
||
"type": "text",
|
||
"help": {
|
||
"en_US": "Set a response json schema will ensure LLM to adhere it.",
|
||
"zh_Hans": "设置返回的 json schema,llm 将按照它返回",
|
||
},
|
||
"required": False,
|
||
},
|
||
}
|