feat(llm): unify container access url info and login response (#24505)

This commit is contained in:
Zexi Li
2026-03-19 23:06:44 +08:00
committed by GitHub
parent 51724b1b21
commit e25d6eb814
11 changed files with 157 additions and 151 deletions

View File

@@ -212,9 +212,16 @@ type LLMVolumeInput struct {
AutoStart bool `json:"auto_start"`
}
// LLMLoginInfo is the response for GET /llms/<id>/login-info: login URL and credentials.
type LLMLoginInfo struct {
LoginUrl string `json:"login_url"`
type LLMAccessUrlInfo struct {
LoginUrl string `json:"login_url"`
PublicUrl string `json:"public_url"`
InternalUrl string `json:"internal_url"`
}
// LLMAccessInfo is the response for GET /llms/<id>/login-info: login URL and credentials.
type LLMAccessInfo struct {
LLMAccessUrlInfo
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Extra map[string]string `json:"extra,omitempty"`

View File

@@ -15,6 +15,10 @@ const (
LLM_OPENCLAW_TEMPLATE_USER_MD_B64 = LLMEnvKey("OPENCLAW_TEMPLATE_USER_MD_B64")
)
const (
LLM_OPENCLAW_DEFAULT_PORT = 3001
)
type OpenClawConfig struct {
Browser *OpenClawConfigBrowser `json:"browser"`
Agents *OpenClawConfigAgents `json:"agents"`

View File

@@ -2,10 +2,6 @@ package llm_container
import (
"context"
"fmt"
"strings"
"yunion.io/x/pkg/errors"
commonapi "yunion.io/x/onecloud/pkg/apis"
computeapi "yunion.io/x/onecloud/pkg/apis/compute"
@@ -150,18 +146,8 @@ func (c *comfyui) GetContainerSpecs(ctx context.Context, llm *models.SLLM, image
}
}
func (c *comfyui) GetLLMUrl(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM) (string, error) {
server, err := llm.GetServer(ctx)
if err != nil {
return "", errors.Wrap(err, "get server")
}
// 从 IPs 字符串中选择第一个 IP
ips := strings.Split(strings.TrimSpace(server.IPs), ",")
if len(ips) == 0 || len(strings.TrimSpace(ips[0])) == 0 {
return "", errors.Error("server IPs is empty")
}
firstIP := strings.TrimSpace(ips[0])
return fmt.Sprintf("http://%s:%d", firstIP, 8188), nil
func (c *comfyui) GetLLMAccessUrlInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM, input *models.LLMAccessInfoInput) (*api.LLMAccessUrlInfo, error) {
return models.GetLLMAccessUrlInfo(ctx, userCred, llm, input, "http", 8188)
}
func (c *comfyui) GetProbedInstantModelsExt(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM, mdlIds ...string) (map[string]api.LLMInternalInstantMdlInfo, error) {

View File

@@ -2,8 +2,6 @@ package llm_container
import (
"context"
"fmt"
"strconv"
"strings"
"yunion.io/x/pkg/errors"
@@ -263,25 +261,7 @@ func (d *dify) StartLLM(ctx context.Context, userCred mcclient.TokenCredential,
return nil
}
// GetLLMUrl returns the Dify access URL (nginx port 80). Same pattern as vLLM/Ollama: guest network uses LLMIp, hostlocal uses host IP.
func (d *dify) GetLLMUrl(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM) (string, error) {
server, err := llm.GetServer(ctx)
if err != nil {
return "", errors.Wrap(err, "get server")
}
port := 80
if p, err := strconv.Atoi(api.DIFY_NGINX_PORT); err == nil {
port = p
}
networkType := llm.NetworkType
if networkType == string(computeapi.NETWORK_TYPE_GUEST) {
if len(llm.LLMIp) == 0 {
return "", errors.Error("LLM IP is empty for guest network")
}
return fmt.Sprintf("http://%s:%d", llm.LLMIp, port), nil
}
if len(server.HostAccessIp) == 0 {
return "", errors.Error("host access IP is empty")
}
return fmt.Sprintf("http://%s:%d", server.HostAccessIp, port), nil
// GetLLMAccessUrlInfo returns the Dify access URL (nginx port 80). Same pattern as vLLM/Ollama: guest network uses LLMIp, hostlocal uses host IP.
func (d *dify) GetLLMAccessUrlInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM, input *models.LLMAccessInfoInput) (*api.LLMAccessUrlInfo, error) {
return models.GetLLMAccessUrlInfo(ctx, userCred, llm, input, "http", 80)
}

View File

@@ -3,7 +3,6 @@ package llm_container
import (
"context"
"crypto/sha256"
"database/sql"
"encoding/hex"
"encoding/json"
"fmt"
@@ -628,51 +627,8 @@ func parseModelName(path string) string {
return strings.TrimRight(model, `\`)
}
func (o *ollama) GetLLMUrl(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM) (string, error) {
// 查询 accessinfo
accessInfo := &models.SAccessInfo{}
q := models.GetAccessInfoManager().Query().Equals("llm_id", llm.Id)
err := q.First(accessInfo)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows {
// 如果没有 accessinfo使用对应主机
server, err := llm.GetServer(ctx)
if err != nil {
return "", errors.Wrap(err, "get server")
}
// 从 IPs 字符串中选择第一个 IP
ips := strings.Split(strings.TrimSpace(server.IPs), ",")
if len(ips) == 0 || len(strings.TrimSpace(ips[0])) == 0 {
return "", errors.Error("server IPs is empty")
}
firstIP := strings.TrimSpace(ips[0])
return fmt.Sprintf("http://%s:%d", firstIP, api.LLM_OLLAMA_DEFAULT_PORT), nil
}
return "", errors.Wrap(err, "query accessinfo")
}
// 判断网络类型
networkType := llm.NetworkType
if networkType == string(computeapi.NETWORK_TYPE_GUEST) {
// guest 网络:使用 LLM IP + 默认端口
if len(llm.LLMIp) == 0 {
return "", errors.Error("LLM IP is empty for guest network")
}
return fmt.Sprintf("http://%s:%d", llm.LLMIp, api.LLM_OLLAMA_DEFAULT_PORT), nil
} else {
// hostlocal 或其他网络类型:使用宿主机 IP + 映射端口
server, err := llm.GetServer(ctx)
if err != nil {
return "", errors.Wrap(err, "get server")
}
if len(server.HostAccessIp) == 0 {
return "", errors.Error("host access IP is empty")
}
if accessInfo.AccessPort == 0 {
return "", errors.Error("access port is not set")
}
return fmt.Sprintf("http://%s:%d", server.HostAccessIp, accessInfo.AccessPort), nil
}
func (o *ollama) GetLLMAccessUrlInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM, input *models.LLMAccessInfoInput) (*api.LLMAccessUrlInfo, error) {
return models.GetLLMAccessUrlInfo(ctx, userCred, llm, input, "http", api.LLM_OLLAMA_DEFAULT_PORT)
}
func getNamespaceAndRepo(modelName string) (string, string) {

View File

@@ -419,22 +419,12 @@ func (c *openclaw) GetContainerSpecs(ctx context.Context, llm *models.SLLM, imag
}
}
func (c *openclaw) GetLLMUrl(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM) (string, error) {
server, err := llm.GetServer(ctx)
if err != nil {
return "", errors.Wrap(err, "get server")
}
// 从 IPs 字符串中选择第一个 IP
ips := strings.Split(strings.TrimSpace(server.IPs), ",")
if len(ips) == 0 || len(strings.TrimSpace(ips[0])) == 0 {
return "", errors.Error("server IPs is empty")
}
firstIP := strings.TrimSpace(ips[0])
return fmt.Sprintf("https://%s:%d", firstIP, 3001), nil
func (c *openclaw) GetLLMAccessUrlInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM, input *models.LLMAccessInfoInput) (*api.LLMAccessUrlInfo, error) {
return models.GetLLMAccessUrlInfo(ctx, userCred, llm, input, "https", api.LLM_OPENCLAW_DEFAULT_PORT)
}
// GetLoginInfo returns OpenClaw web UI login credentials (same defaults as container env).
func (c *openclaw) GetLoginInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM) (*api.LLMLoginInfo, error) {
func (c *openclaw) GetLoginInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM) (*api.LLMAccessInfo, error) {
ctr, err := llm.GetLLMSContainer(ctx)
if err != nil {
if errors.Cause(err) == sql.ErrNoRows || strings.Contains(strings.ToLower(err.Error()), "not found") {
@@ -461,7 +451,7 @@ func (c *openclaw) GetLoginInfo(ctx context.Context, userCred mcclient.TokenCred
gatewayToken = env.Value
}
}
return &api.LLMLoginInfo{
return &api.LLMAccessInfo{
Username: username,
Password: password,
Extra: map[string]string{

View File

@@ -238,29 +238,8 @@ func (v *vllm) GetContainerSpecs(ctx context.Context, llm *models.SLLM, image *m
}
}
func (v *vllm) GetLLMUrl(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM) (string, error) {
// Similar logic to Ollama to determine URL
server, err := llm.GetServer(ctx)
if err != nil {
return "", errors.Wrap(err, "get server")
}
networkType := llm.NetworkType
if networkType == string(computeapi.NETWORK_TYPE_GUEST) {
if len(llm.LLMIp) == 0 {
return "", errors.Error("LLM IP is empty for guest network")
}
return fmt.Sprintf("http://%s:%d", llm.LLMIp, api.LLM_VLLM_DEFAULT_PORT), nil
} else {
// hostlocal
if len(server.HostAccessIp) == 0 {
return "", errors.Error("host access IP is empty")
}
// Assuming we might map ports or just use the default if host networking isn't strictly port-mapped per instance
// For simplicity, returning default port on host IP, assuming bridge/direct access or specific port mapping logic exists elsewhere.
// NOTE: In ollama.go, it queries AccessInfo. Here we simplify.
return fmt.Sprintf("http://%s:%d", server.HostAccessIp, api.LLM_VLLM_DEFAULT_PORT), nil
}
func (v *vllm) GetLLMAccessUrlInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *models.SLLM, input *models.LLMAccessInfoInput) (*api.LLMAccessUrlInfo, error) {
return models.GetLLMAccessUrlInfo(ctx, userCred, llm, input, "http", api.LLM_VLLM_DEFAULT_PORT)
}
// StartLLM starts the vLLM server inside the container via exec, then waits for the health endpoint to be ready.
@@ -319,11 +298,12 @@ func (v *vllm) StartLLM(ctx context.Context, userCred mcclient.TokenCredential,
}
cmd := startCmd
// Wait for health endpoint
baseURL, err := v.GetLLMUrl(ctx, userCred, llm)
input, err := llm.GetLLMAccessInfoInput(ctx, userCred)
if err != nil {
return errors.Wrap(err, "get llm url for health check")
}
healthURL := strings.TrimSuffix(baseURL, "/") + "/health"
healthURL := fmt.Sprintf("http://%s:%d/health", input.ServerIp, api.LLM_VLLM_DEFAULT_PORT)
deadline := time.Now().Add(api.LLM_VLLM_HEALTH_CHECK_TIMEOUT)
for time.Now().Before(deadline) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, healthURL, nil)

View File

@@ -729,33 +729,137 @@ func (llm *SLLM) StartSyncStatusTask(ctx context.Context, userCred mcclient.Toke
return nil
}
func (llm *SLLM) GetLLMUrl(ctx context.Context, userCred mcclient.TokenCredential) (string, error) {
if llm.CmpId == "" {
return "", nil
func (llm *SLLM) FindAccessInfos(protocol string) ([]SAccessInfo, error) {
q := GetAccessInfoManager().Query()
q = q.Equals("llm_id", llm.Id)
if protocol != "" {
q = q.Equals("protocol", protocol)
}
return llm.GetLLMContainerDriver().GetLLMUrl(ctx, userCred, llm)
}
func (llm *SLLM) GetDetailsUrl(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) (jsonutils.JSONObject, error) {
accessUrl, err := llm.GetLLMUrl(ctx, userCred)
accessInfos := make([]SAccessInfo, 0)
err := db.FetchModelObjects(GetAccessInfoManager(), q, &accessInfos)
if err != nil {
return nil, errors.Wrap(err, "GetLLMUrl")
return nil, errors.Wrap(err, "FetchModelObjects")
}
output := jsonutils.NewDict()
output.Set("access_url", jsonutils.NewString(accessUrl))
return output, nil
if len(accessInfos) == 0 {
return nil, errors.ErrNotFound
}
return accessInfos, nil
}
func (llm *SLLM) GetDetailsLoginInfo(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) (*api.LLMLoginInfo, error) {
func (llm *SLLM) FindAllAccessInfos() ([]SAccessInfo, error) {
return llm.FindAccessInfos("")
}
func (llm *SLLM) FindAccessInfoByEnv(protocol string, envKey string) (*SAccessInfo, error) {
ainfos, err := llm.FindAccessInfos(protocol)
if err != nil {
return nil, errors.Wrapf(err, "FindAccessInfo by env %s", envKey)
}
for _, ainfo := range ainfos {
for _, env := range ainfo.PortMappingEnvs {
if env.Key == envKey {
return &ainfo, nil
}
}
}
return nil, errors.ErrNotFound
}
func (llm *SLLM) getHostAccessIp(ctx context.Context, isPublic bool) (string, error) {
server, err := llm.GetServer(ctx)
if err != nil {
return "", errors.Wrap(err, "GetServer")
}
if isPublic {
return server.HostEIP, nil
}
return server.HostAccessIp, nil
}
func (llm *SLLM) GetHostEIP(ctx context.Context) (string, error) {
return llm.getHostAccessIp(ctx, true)
}
type LLMAccessInfoInput struct {
HostInternalIp string
HostPublicIp string
ServerIp string
AccessInfos []SAccessInfo
}
func (llm *SLLM) GetLLMAccessInfoInput(ctx context.Context, userCred mcclient.TokenCredential) (*LLMAccessInfoInput, error) {
accessInfos, _ := llm.FindAllAccessInfos()
server, err := llm.GetServer(ctx)
if err != nil {
return nil, errors.Wrap(err, "GetServer")
}
hostInternalIp := server.HostAccessIp
hostPublicIp := server.HostEIP
ips := strings.Split(strings.TrimSpace(server.IPs), ",")
if len(ips) == 0 || len(strings.TrimSpace(ips[0])) == 0 {
return nil, errors.Error("server IPs is empty")
}
serverIp := strings.TrimSpace(ips[0])
return &LLMAccessInfoInput{
HostInternalIp: hostInternalIp,
HostPublicIp: hostPublicIp,
ServerIp: serverIp,
AccessInfos: accessInfos,
}, nil
}
func (llm *SLLM) GetLLMAccessUrlInfo(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) (*api.LLMAccessUrlInfo, error) {
if llm.CmpId == "" {
return nil, nil
}
output := new(api.LLMLoginInfo)
loginUrl, err := llm.GetLLMUrl(ctx, userCred)
input, err := llm.GetLLMAccessInfoInput(ctx, userCred)
if err != nil {
return nil, errors.Wrap(err, "GetLLMUrl")
return nil, errors.Wrap(err, "GetLLMAccessInfoInput")
}
return llm.GetLLMContainerDriver().GetLLMAccessUrlInfo(ctx, userCred, llm, input)
}
func GetLLMAccessUrlInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *SLLM, input *LLMAccessInfoInput, protocol string, defaultPort int) (*api.LLMAccessUrlInfo, error) {
port := defaultPort
accessUrl := input.ServerIp
hasPortMapping := false
if len(input.AccessInfos) != 0 {
hasPortMapping = true
aInfo := input.AccessInfos[0]
port = aInfo.AccessPort
accessUrl = input.HostInternalIp
if input.HostPublicIp != "" {
accessUrl = input.HostPublicIp
}
}
ret := &api.LLMAccessUrlInfo{
LoginUrl: fmt.Sprintf("%s://%s:%d", protocol, accessUrl, port),
}
if hasPortMapping {
ret.InternalUrl = fmt.Sprintf("%s://%s:%d", protocol, input.HostInternalIp, port)
if input.HostPublicIp != "" {
ret.PublicUrl = fmt.Sprintf("%s://%s:%d", protocol, input.HostPublicIp, port)
}
}
return ret, nil
}
func (llm *SLLM) GetDetailsLoginInfo(ctx context.Context, userCred mcclient.TokenCredential, query jsonutils.JSONObject) (*api.LLMAccessInfo, error) {
if llm.CmpId == "" {
return nil, nil
}
accessUrl, err := llm.GetLLMAccessUrlInfo(ctx, userCred, query)
if err != nil {
return nil, errors.Wrap(err, "GetLLMAccessUrlInfo")
}
output := &api.LLMAccessInfo{
LLMAccessUrlInfo: *accessUrl,
}
output.LoginUrl = loginUrl
drv := llm.GetLLMContainerDriver()
if loginInfoDrv, ok := drv.(ILLMContainerLoginInfo); ok {
info, err := loginInfoDrv.GetLoginInfo(ctx, userCred, llm)

View File

@@ -126,8 +126,7 @@ func GetLLMBasePodCreateInput(
}
network.BwLimit = bandwidth
networkType := string(network.NetType)
if networkType == string(computeapi.NETWORK_TYPE_HOSTLOCAL) {
if len(network.PortMappings) == 0 {
network.PortMappings = portMappings
}

View File

@@ -113,12 +113,12 @@ type ILLMContainerInstantModelDriver interface {
}
type ILLMContainerMCPAgent interface {
GetLLMUrl(ctx context.Context, userCred mcclient.TokenCredential, llm *SLLM) (string, error)
GetLLMAccessUrlInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *SLLM, input *LLMAccessInfoInput) (*llm.LLMAccessUrlInfo, error)
}
// ILLMContainerLoginInfo is an optional interface for drivers that provide web login credentials (e.g. Dify, OpenClaw). If not implemented, GetDetailsLoginInfo returns only login_url.
type ILLMContainerLoginInfo interface {
GetLoginInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *SLLM) (*llm.LLMLoginInfo, error)
GetLoginInfo(ctx context.Context, userCred mcclient.TokenCredential, llm *SLLM) (*llm.LLMAccessInfo, error)
}
var (

View File

@@ -222,11 +222,11 @@ func (man *SMCPAgentManager) ValidateCreateData(ctx context.Context, userCred mc
}
llm := llmObj.(*SLLM)
input.LLMId = llm.Id
llmUrl, err := llm.GetLLMUrl(ctx, userCred)
llmUrl, err := llm.GetLLMAccessUrlInfo(ctx, userCred, query)
if err != nil {
return input, errors.Wrapf(err, "get LLM URL from LLM %s", input.LLMId)
}
input.LLMUrl = llmUrl
input.LLMUrl = llmUrl.LoginUrl
if len(input.Model) == 0 {
mdlInfos, err := llm.getProbedInstantModelsExt(ctx, userCred)
@@ -289,11 +289,11 @@ func (man *SMCPAgentManager) ValidateUpdateData(ctx context.Context, userCred mc
return input, errors.Wrapf(err, "fetch LLM by id %s", *input.LLMId)
}
llm := llmObj.(*SLLM)
llmUrl, err := llm.GetLLMUrl(ctx, userCred)
llmUrl, err := llm.GetLLMAccessUrlInfo(ctx, userCred, query)
if err != nil {
return input, errors.Wrapf(err, "get LLM URL from LLM %s", *input.LLMId)
}
input.LLMUrl = &llmUrl
input.LLMUrl = &llmUrl.LoginUrl
if input.Model == nil || len(*input.Model) == 0 {
mdlInfos, err := llm.getProbedInstantModelsExt(ctx, userCred)