Compare commits

..

4 Commits

14 changed files with 178 additions and 153 deletions

View File

View File

@ -470,7 +470,7 @@ class AdvancedChatDraftRunLoopNodeApi(Resource):
Run draft workflow loop node
"""
current_user, _ = current_account_with_tenant()
args = LoopNodeRunPayload.model_validate(console_ns.payload or {})
args = LoopNodeRunPayload.model_validate(console_ns.payload or {}).model_dump(exclude_none=True)
try:
response = AppGenerateService.generate_single_loop(
@ -508,7 +508,7 @@ class WorkflowDraftRunLoopNodeApi(Resource):
Run draft workflow loop node
"""
current_user, _ = current_account_with_tenant()
args = LoopNodeRunPayload.model_validate(console_ns.payload or {})
args = LoopNodeRunPayload.model_validate(console_ns.payload or {}).model_dump(exclude_none=True)
try:
response = AppGenerateService.generate_single_loop(
@ -999,7 +999,6 @@ class DraftWorkflowTriggerRunApi(Resource):
if not event:
return jsonable_encoder({"status": "waiting", "retry_in": LISTENING_RETRY_IN})
workflow_args = dict(event.workflow_args)
workflow_args[SKIP_PREPARE_USER_INPUTS_KEY] = True
return helper.compact_generate_response(
AppGenerateService.generate(
@ -1148,7 +1147,6 @@ class DraftWorkflowTriggerRunAllApi(Resource):
try:
workflow_args = dict(trigger_debug_event.workflow_args)
workflow_args[SKIP_PREPARE_USER_INPUTS_KEY] = True
response = AppGenerateService.generate(
app_model=app_model,

View File

@ -1,11 +1,9 @@
from __future__ import annotations
import contextvars
import logging
import threading
import uuid
from collections.abc import Generator, Mapping
from typing import TYPE_CHECKING, Any, Literal, Union, overload
from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
@ -15,9 +13,6 @@ from sqlalchemy.orm import Session, sessionmaker
import contexts
from configs import dify_config
from constants import UUID_NIL
if TYPE_CHECKING:
from controllers.console.app.workflow import LoopNodeRunPayload
from core.app.app_config.features.file_upload.manager import FileUploadConfigManager
from core.app.apps.advanced_chat.app_config_manager import AdvancedChatAppConfigManager
from core.app.apps.advanced_chat.app_runner import AdvancedChatAppRunner
@ -309,7 +304,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
workflow: Workflow,
node_id: str,
user: Account | EndUser,
args: LoopNodeRunPayload,
args: Mapping,
streaming: bool = True,
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], Any, None]:
"""
@ -325,7 +320,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
if not node_id:
raise ValueError("node_id is required")
if args.inputs is None:
if args.get("inputs") is None:
raise ValueError("inputs is required")
# convert to app config
@ -343,7 +338,7 @@ class AdvancedChatAppGenerator(MessageBasedAppGenerator):
stream=streaming,
invoke_from=InvokeFrom.DEBUGGER,
extras={"auto_generate_conversation_name": False},
single_loop_run=AdvancedChatAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args.inputs),
single_loop_run=AdvancedChatAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args["inputs"]),
)
contexts.plugin_tool_providers.set({})
contexts.plugin_tool_providers_lock.set(threading.Lock())

View File

@ -1,11 +1,9 @@
from __future__ import annotations
import contextvars
import logging
import threading
import uuid
from collections.abc import Generator, Mapping, Sequence
from typing import TYPE_CHECKING, Any, Literal, Union, overload
from typing import Any, Literal, Union, overload
from flask import Flask, current_app
from pydantic import ValidationError
@ -42,9 +40,6 @@ from models import Account, App, EndUser, Workflow, WorkflowNodeExecutionTrigger
from models.enums import WorkflowRunTriggeredFrom
from services.workflow_draft_variable_service import DraftVarLoader, WorkflowDraftVariableService
if TYPE_CHECKING:
from controllers.console.app.workflow import LoopNodeRunPayload
SKIP_PREPARE_USER_INPUTS_KEY = "_skip_prepare_user_inputs"
logger = logging.getLogger(__name__)
@ -386,7 +381,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
workflow: Workflow,
node_id: str,
user: Account | EndUser,
args: LoopNodeRunPayload,
args: Mapping[str, Any],
streaming: bool = True,
) -> Mapping[str, Any] | Generator[str | Mapping[str, Any], None, None]:
"""
@ -402,7 +397,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
if not node_id:
raise ValueError("node_id is required")
if args.inputs is None:
if args.get("inputs") is None:
raise ValueError("inputs is required")
# convert to app config
@ -418,7 +413,7 @@ class WorkflowAppGenerator(BaseAppGenerator):
stream=streaming,
invoke_from=InvokeFrom.DEBUGGER,
extras={"auto_generate_conversation_name": False},
single_loop_run=WorkflowAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args.inputs or {}),
single_loop_run=WorkflowAppGenerateEntity.SingleLoopRunEntity(node_id=node_id, inputs=args["inputs"]),
workflow_execution_id=str(uuid.uuid4()),
)
contexts.plugin_tool_providers.set({})

View File

@ -5,7 +5,7 @@ from collections.abc import Mapping, Sequence
from enum import StrEnum, auto
from typing import Annotated, Any, Literal, Union
from pydantic import BaseModel, Field, field_serializer, field_validator
from pydantic import BaseModel, Field, JsonValue, field_serializer, field_validator
class PromptMessageRole(StrEnum):
@ -69,6 +69,7 @@ class PromptMessageContent(ABC, BaseModel):
"""
type: PromptMessageContentType
opaque_body: JsonValue | None = None
class TextPromptMessageContent(PromptMessageContent):
@ -244,6 +245,7 @@ class AssistantPromptMessage(PromptMessage):
role: PromptMessageRole = PromptMessageRole.ASSISTANT
tool_calls: list[ToolCall] = []
opaque_body: JsonValue | None = None
def is_empty(self) -> bool:
"""

View File

@ -164,6 +164,7 @@ class LargeLanguageModel(AIModel):
usage = LLMUsage.empty_usage()
system_fingerprint = None
tools_calls: list[AssistantPromptMessage.ToolCall] = []
assistant_opaque_body = None
for chunk in result:
if isinstance(chunk.delta.message.content, str):
@ -172,6 +173,8 @@ class LargeLanguageModel(AIModel):
content_list.extend(chunk.delta.message.content)
if chunk.delta.message.tool_calls:
_increase_tool_call(chunk.delta.message.tool_calls, tools_calls)
if assistant_opaque_body is None and chunk.delta.message.opaque_body is not None:
assistant_opaque_body = chunk.delta.message.opaque_body
usage = chunk.delta.usage or LLMUsage.empty_usage()
system_fingerprint = chunk.system_fingerprint
@ -183,6 +186,7 @@ class LargeLanguageModel(AIModel):
message=AssistantPromptMessage(
content=content or content_list,
tool_calls=tools_calls,
opaque_body=assistant_opaque_body,
),
usage=usage,
system_fingerprint=system_fingerprint,
@ -261,6 +265,8 @@ class LargeLanguageModel(AIModel):
usage = None
system_fingerprint = None
real_model = model
assistant_opaque_body = None
tools_calls: list[AssistantPromptMessage.ToolCall] = []
def _update_message_content(content: str | list[PromptMessageContentUnionTypes] | None):
if not content:
@ -294,6 +300,10 @@ class LargeLanguageModel(AIModel):
)
_update_message_content(chunk.delta.message.content)
if chunk.delta.message.tool_calls:
_increase_tool_call(chunk.delta.message.tool_calls, tools_calls)
if assistant_opaque_body is None and chunk.delta.message.opaque_body is not None:
assistant_opaque_body = chunk.delta.message.opaque_body
real_model = chunk.model
if chunk.delta.usage:
@ -304,7 +314,11 @@ class LargeLanguageModel(AIModel):
except Exception as e:
raise self._transform_invoke_error(e)
assistant_message = AssistantPromptMessage(content=message_content)
assistant_message = AssistantPromptMessage(
content=message_content,
tool_calls=tools_calls,
opaque_body=assistant_opaque_body,
)
self._trigger_after_invoke_callbacks(
model=model,
result=LLMResult(

View File

@ -1,21 +0,0 @@
from enum import StrEnum
class HostedTrialProvider(StrEnum):
"""
Enum representing hosted model provider names for trial access.
"""
OPENAI = "langgenius/openai/openai"
ANTHROPIC = "langgenius/anthropic/anthropic"
GEMINI = "langgenius/gemini/google"
X = "langgenius/x/x"
DEEPSEEK = "langgenius/deepseek/deepseek"
TONGYI = "langgenius/tongyi/tongyi"
@property
def config_key(self) -> str:
"""Return the config key used in dify_config (e.g., HOSTED_{config_key}_PAID_ENABLED)."""
if self == HostedTrialProvider.X:
return "XAI"
return self.name

View File

@ -1,8 +1,6 @@
from __future__ import annotations
import uuid
from collections.abc import Generator, Mapping
from typing import TYPE_CHECKING, Any, Union
from typing import Any, Union
from configs import dify_config
from core.app.apps.advanced_chat.app_generator import AdvancedChatAppGenerator
@ -20,9 +18,6 @@ from services.errors.app import QuotaExceededError, WorkflowIdFormatError, Workf
from services.errors.llm import InvokeRateLimitError
from services.workflow_service import WorkflowService
if TYPE_CHECKING:
from controllers.console.app.workflow import LoopNodeRunPayload
class AppGenerateService:
@classmethod
@ -170,9 +165,7 @@ class AppGenerateService:
raise ValueError(f"Invalid app mode {app_model.mode}")
@classmethod
def generate_single_loop(
cls, app_model: App, user: Account, node_id: str, args: LoopNodeRunPayload, streaming: bool = True
):
def generate_single_loop(cls, app_model: App, user: Account, node_id: str, args: Any, streaming: bool = True):
if app_model.mode == AppMode.ADVANCED_CHAT:
workflow = cls._get_workflow(app_model, InvokeFrom.DEBUGGER)
return AdvancedChatAppGenerator.convert_to_event_stream(

View File

@ -143,7 +143,6 @@ class BillingService:
raise ValueError("Invalid arguments.")
if method == "POST" and response.status_code != httpx.codes.OK:
raise ValueError(f"Unable to send request to {url}. Please try again later or contact support.")
logger.info("billing_service: %s _send_request: response: %s", method, response.json())
return response.json()
@staticmethod
@ -166,7 +165,6 @@ class BillingService:
def delete_account(cls, account_id: str):
"""Delete account."""
params = {"account_id": account_id}
logger.info("billing_service: delete_account: params: %s", params)
return cls._send_request("DELETE", "/account/", params=params)
@classmethod

View File

@ -4,7 +4,6 @@ from pydantic import BaseModel, ConfigDict, Field
from configs import dify_config
from enums.cloud_plan import CloudPlan
from enums.hosted_provider import HostedTrialProvider
from services.billing_service import BillingService
from services.enterprise.enterprise_service import EnterpriseService
@ -171,7 +170,6 @@ class SystemFeatureModel(BaseModel):
plugin_installation_permission: PluginInstallationPermissionModel = PluginInstallationPermissionModel()
enable_change_email: bool = True
plugin_manager: PluginManagerModel = PluginManagerModel()
trial_models: list[str] = []
enable_trial_app: bool = False
enable_explore_banner: bool = False
@ -229,21 +227,9 @@ class FeatureService:
system_features.is_allow_register = dify_config.ALLOW_REGISTER
system_features.is_allow_create_workspace = dify_config.ALLOW_CREATE_WORKSPACE
system_features.is_email_setup = dify_config.MAIL_TYPE is not None and dify_config.MAIL_TYPE != ""
system_features.trial_models = cls._fulfill_trial_models_from_env()
system_features.enable_trial_app = dify_config.ENABLE_TRIAL_APP
system_features.enable_explore_banner = dify_config.ENABLE_EXPLORE_BANNER
@classmethod
def _fulfill_trial_models_from_env(cls) -> list[str]:
return [
provider.value
for provider in HostedTrialProvider
if (
getattr(dify_config, f"HOSTED_{provider.config_key}_PAID_ENABLED", False)
and getattr(dify_config, f"HOSTED_{provider.config_key}_TRIAL_ENABLED", False)
)
]
@classmethod
def _fulfill_params_from_env(cls, features: FeatureModel):
features.can_replace_logo = dify_config.CAN_REPLACE_LOGO

View File

@ -14,13 +14,10 @@ logger = logging.getLogger(__name__)
@shared_task(queue="dataset")
def delete_account_task(account_id):
with session_factory.create_session() as session:
logger.info("delete_account_task: account_id: %s", account_id)
account = session.query(Account).where(Account.id == account_id).first()
try:
if dify_config.BILLING_ENABLED:
logger.info("delete_account_task: before delete_account: %s", account_id)
BillingService.delete_account(account_id)
logger.info("delete_account_task: after delete_account: %s", account_id)
except Exception:
logger.exception("Failed to delete account %s from billing service.", account_id)
raise
@ -30,4 +27,3 @@ def delete_account_task(account_id):
return
# send success email
send_deletion_success_task.delay(account.email)
logger.info("delete_account_task: delete_account successfully: %s", account_id)

View File

@ -0,0 +1,145 @@
from __future__ import annotations
from datetime import datetime
from typing import Any
from unittest.mock import patch
from core.model_runtime.callbacks.base_callback import Callback
from core.model_runtime.entities.common_entities import I18nObject
from core.model_runtime.entities.llm_entities import LLMResult, LLMResultChunk, LLMResultChunkDelta
from core.model_runtime.entities.message_entities import AssistantPromptMessage, PromptMessage, UserPromptMessage
from core.model_runtime.entities.model_entities import ModelType
from core.model_runtime.entities.provider_entities import ConfigurateMethod, ProviderEntity
from core.model_runtime.model_providers.__base.large_language_model import LargeLanguageModel
from core.plugin.entities.plugin_daemon import PluginModelProviderEntity
class _CaptureAfterInvokeCallback(Callback):
after_result: LLMResult | None
def __init__(self) -> None:
self.after_result = None
def on_before_invoke(self, **kwargs: Any) -> None: # noqa: ANN401
return None
def on_new_chunk(self, **kwargs: Any) -> None: # noqa: ANN401
return None
def on_after_invoke(self, result: LLMResult, **kwargs: Any) -> None: # noqa: ANN401
self.after_result = result
def on_invoke_error(self, **kwargs: Any) -> None: # noqa: ANN401
return None
def _build_llm_instance() -> LargeLanguageModel:
declaration = ProviderEntity(
provider="test",
label=I18nObject(en_US="test"),
supported_model_types=[ModelType.LLM],
configurate_methods=[ConfigurateMethod.PREDEFINED_MODEL],
)
plugin_model_provider = PluginModelProviderEntity(
id="pmp_1",
created_at=datetime.now(),
updated_at=datetime.now(),
provider="test",
tenant_id="tenant_1",
plugin_unique_identifier="test/plugin",
plugin_id="test/plugin",
declaration=declaration,
)
return LargeLanguageModel(
tenant_id="tenant_1",
plugin_id="test/plugin",
provider_name="test",
plugin_model_provider=plugin_model_provider,
)
def test_invoke_non_stream_preserves_assistant_opaque_body() -> None:
llm = _build_llm_instance()
prompt_messages: list[PromptMessage] = [UserPromptMessage(content="hi")]
chunk = LLMResultChunk(
model="gpt-test",
prompt_messages=[],
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content="hello", opaque_body={"provider_message_id": "msg_123"}),
),
)
def _mock_invoke_llm(self, **kwargs: Any): # noqa: ANN001, ANN401
yield chunk
with patch("core.plugin.impl.model.PluginModelClient.invoke_llm", new=_mock_invoke_llm):
result = llm.invoke(
model="gpt-test",
credentials={},
prompt_messages=prompt_messages,
model_parameters={},
stream=False,
)
assert isinstance(result, LLMResult)
assert result.message.opaque_body == {"provider_message_id": "msg_123"}
assert list(result.prompt_messages) == prompt_messages
def test_invoke_stream_preserves_assistant_opaque_body_in_after_callback() -> None:
llm = _build_llm_instance()
prompt_messages: list[PromptMessage] = [UserPromptMessage(content="hi")]
callback = _CaptureAfterInvokeCallback()
tool_call_1 = AssistantPromptMessage.ToolCall(
id="1",
type="function",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name="func_foo", arguments='{"arg1": '),
)
tool_call_2 = AssistantPromptMessage.ToolCall(
id="",
type="",
function=AssistantPromptMessage.ToolCall.ToolCallFunction(name="", arguments='"value"}'),
)
chunk1 = LLMResultChunk(
model="gpt-test",
prompt_messages=[],
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content="h", tool_calls=[tool_call_1], opaque_body={"provider_message_id": "msg_123"}),
),
)
chunk2 = LLMResultChunk(
model="gpt-test",
prompt_messages=[],
delta=LLMResultChunkDelta(
index=0,
message=AssistantPromptMessage(content="i", tool_calls=[tool_call_2]),
),
)
def _mock_invoke_llm(self, **kwargs: Any): # noqa: ANN001, ANN401
yield chunk1
yield chunk2
with patch("core.plugin.impl.model.PluginModelClient.invoke_llm", new=_mock_invoke_llm):
gen = llm.invoke(
model="gpt-test",
credentials={},
prompt_messages=prompt_messages,
model_parameters={},
stream=True,
callbacks=[callback],
)
chunks = list(gen)
assert chunks[0].prompt_messages == prompt_messages
assert callback.after_result is not None
assert callback.after_result.message.opaque_body == {"provider_message_id": "msg_123"}
assert len(callback.after_result.message.tool_calls) == 1
assert callback.after_result.message.tool_calls[0].function.arguments == '{"arg1": "value"}'

View File

@ -67,7 +67,7 @@
"@lexical/react": "0.38.2",
"@lexical/selection": "0.38.2",
"@lexical/text": "0.38.2",
"@lexical/utils": "0.39.0",
"@lexical/utils": "0.38.2",
"@monaco-editor/react": "4.7.0",
"@octokit/core": "6.1.6",
"@octokit/request-error": "6.1.8",

80
web/pnpm-lock.yaml generated
View File

@ -94,8 +94,8 @@ importers:
specifier: 0.38.2
version: 0.38.2
'@lexical/utils':
specifier: 0.39.0
version: 0.39.0
specifier: 0.38.2
version: 0.38.2
'@monaco-editor/react':
specifier: 4.7.0
version: 4.7.0(monaco-editor@0.55.1)(react-dom@19.2.3(react@19.2.3))(react@19.2.3)
@ -2066,9 +2066,6 @@ packages:
'@lexical/clipboard@0.38.2':
resolution: {integrity: sha512-dDShUplCu8/o6BB9ousr3uFZ9bltR+HtleF/Tl8FXFNPpZ4AXhbLKUoJuucRuIr+zqT7RxEv/3M6pk/HEoE6NQ==}
'@lexical/clipboard@0.39.0':
resolution: {integrity: sha512-ylrHy8M+I5EH4utwqivslugqQhvgLTz9VEJdrb2RjbhKQEXwMcqKCRWh6cRfkYx64onE2YQE0nRIdzHhExEpLQ==}
'@lexical/code@0.38.2':
resolution: {integrity: sha512-wpqgbmPsfi/+8SYP0zI2kml09fGPRhzO5litR9DIbbSGvcbawMbRNcKLO81DaTbsJRnBJiQvbBBBJAwZKRqgBw==}
@ -2084,9 +2081,6 @@ packages:
'@lexical/extension@0.38.2':
resolution: {integrity: sha512-qbUNxEVjAC0kxp7hEMTzktj0/51SyJoIJWK6Gm790b4yNBq82fEPkksfuLkRg9VQUteD0RT1Nkjy8pho8nNamw==}
'@lexical/extension@0.39.0':
resolution: {integrity: sha512-mp/WcF8E53FWPiUHgHQz382J7u7C4+cELYNkC00dKaymf8NhS6M65Y8tyDikNGNUcLXSzaluwK0HkiKjTYGhVQ==}
'@lexical/hashtag@0.38.2':
resolution: {integrity: sha512-jNI4Pv+plth39bjOeeQegMypkjDmoMWBMZtV0lCynBpkkPFlfMnyL9uzW/IxkZnX8LXWSw5mbWk07nqOUNTCrA==}
@ -2096,18 +2090,12 @@ packages:
'@lexical/html@0.38.2':
resolution: {integrity: sha512-pC5AV+07bmHistRwgG3NJzBMlIzSdxYO6rJU4eBNzyR4becdiLsI4iuv+aY7PhfSv+SCs7QJ9oc4i5caq48Pkg==}
'@lexical/html@0.39.0':
resolution: {integrity: sha512-7VLWP5DpzBg3kKctpNK6PbhymKAtU6NAnKieopCfCIWlMW+EqpldteiIXGqSqrMRK0JWTmF1gKgr9nnQyOOsXw==}
'@lexical/link@0.38.2':
resolution: {integrity: sha512-UOKTyYqrdCR9+7GmH6ZVqJTmqYefKGMUHMGljyGks+OjOGZAQs78S1QgcPEqltDy+SSdPSYK7wAo6gjxZfEq9g==}
'@lexical/list@0.38.2':
resolution: {integrity: sha512-OQm9TzatlMrDZGxMxbozZEHzMJhKxAbH1TOnOGyFfzpfjbnFK2y8oLeVsfQZfZRmiqQS4Qc/rpFnRP2Ax5dsbA==}
'@lexical/list@0.39.0':
resolution: {integrity: sha512-mxgSxUrakTCHtC+gF30BChQBJTsCMiMgfC2H5VvhcFwXMgsKE/aK9+a+C/sSvvzCmPXqzYsuAcGkJcrY3e5xlw==}
'@lexical/mark@0.38.2':
resolution: {integrity: sha512-U+8KGwc3cP5DxSs15HfkP2YZJDs5wMbWQAwpGqep9bKphgxUgjPViKhdi+PxIt2QEzk7WcoZWUsK1d2ty/vSmg==}
@ -2135,24 +2123,15 @@ packages:
'@lexical/selection@0.38.2':
resolution: {integrity: sha512-eMFiWlBH6bEX9U9sMJ6PXPxVXTrihQfFeiIlWLuTpEIDF2HRz7Uo1KFRC/yN6q0DQaj7d9NZYA6Mei5DoQuz5w==}
'@lexical/selection@0.39.0':
resolution: {integrity: sha512-j0cgNuTKDCdf/4MzRnAUwEqG6C/WQp18k2WKmX5KIVZJlhnGIJmlgSBrxjo8AuZ16DIHxTm2XNB4cUDCgZNuPA==}
'@lexical/table@0.38.2':
resolution: {integrity: sha512-uu0i7yz0nbClmHOO5ZFsinRJE6vQnFz2YPblYHAlNigiBedhqMwSv5bedrzDq8nTTHwych3mC63tcyKIrM+I1g==}
'@lexical/table@0.39.0':
resolution: {integrity: sha512-1eH11kV4bJ0fufCYl8DpE19kHwqUI8Ev5CZwivfAtC3ntwyNkeEpjCc0pqeYYIWN/4rTZ5jgB3IJV4FntyfCzw==}
'@lexical/text@0.38.2':
resolution: {integrity: sha512-+juZxUugtC4T37aE3P0l4I9tsWbogDUnTI/mgYk4Ht9g+gLJnhQkzSA8chIyfTxbj5i0A8yWrUUSw+/xA7lKUQ==}
'@lexical/utils@0.38.2':
resolution: {integrity: sha512-y+3rw15r4oAWIEXicUdNjfk8018dbKl7dWHqGHVEtqzAYefnEYdfD2FJ5KOTXfeoYfxi8yOW7FvzS4NZDi8Bfw==}
'@lexical/utils@0.39.0':
resolution: {integrity: sha512-8YChidpMJpwQc4nex29FKUeuZzC++QCS/Jt46lPuy1GS/BZQoPHFKQ5hyVvM9QVhc5CEs4WGNoaCZvZIVN8bQw==}
'@lexical/yjs@0.38.2':
resolution: {integrity: sha512-fg6ZHNrVQmy1AAxaTs8HrFbeNTJCaCoEDPi6pqypHQU3QVfqr4nq0L0EcHU/TRlR1CeduEPvZZIjUUxWTZ0u8g==}
peerDependencies:
@ -2640,9 +2619,6 @@ packages:
'@preact/signals-core@1.12.1':
resolution: {integrity: sha512-BwbTXpj+9QutoZLQvbttRg5x3l5468qaV2kufh+51yha1c53ep5dY4kTuZR35+3pAZxpfQerGJiQqg34ZNZ6uA==}
'@preact/signals-core@1.12.2':
resolution: {integrity: sha512-5Yf8h1Ke3SMHr15xl630KtwPTW4sYDFkkxS0vQ8UiQLWwZQnrF9IKaVG1mN5VcJz52EcWs2acsc/Npjha/7ysA==}
'@preact/signals@1.3.2':
resolution: {integrity: sha512-naxcJgUJ6BTOROJ7C3QML7KvwKwCXQJYTc5L/b0eEsdYgPB6SxwoQ1vDGcS0Q7GVjAenVq/tXrybVdFShHYZWg==}
peerDependencies:
@ -6247,9 +6223,6 @@ packages:
lexical@0.38.2:
resolution: {integrity: sha512-JJmfsG3c4gwBHzUGffbV7ifMNkKAWMCnYE3xJl87gty7hjyV5f3xq7eqTjP5HFYvO4XpjJvvWO2/djHp5S10tw==}
lexical@0.39.0:
resolution: {integrity: sha512-lpLv7MEJH5QDujEDlYqettL3ATVtNYjqyimzqgrm0RvCm3AO9WXSdsgTxuN7IAZRu88xkxCDeYubeUf4mNZVdg==}
lib0@0.2.117:
resolution: {integrity: sha512-DeXj9X5xDCjgKLU/7RR+/HQEVzuuEUiwldwOGsHK/sfAfELGWEyTcf0x+uOvCvK3O2zPmZePXWL85vtia6GyZw==}
engines: {node: '>=16'}
@ -10400,14 +10373,6 @@ snapshots:
'@lexical/utils': 0.38.2
lexical: 0.38.2
'@lexical/clipboard@0.39.0':
dependencies:
'@lexical/html': 0.39.0
'@lexical/list': 0.39.0
'@lexical/selection': 0.39.0
'@lexical/utils': 0.39.0
lexical: 0.39.0
'@lexical/code@0.38.2':
dependencies:
'@lexical/utils': 0.38.2
@ -10436,12 +10401,6 @@ snapshots:
'@preact/signals-core': 1.12.1
lexical: 0.38.2
'@lexical/extension@0.39.0':
dependencies:
'@lexical/utils': 0.39.0
'@preact/signals-core': 1.12.2
lexical: 0.39.0
'@lexical/hashtag@0.38.2':
dependencies:
'@lexical/text': 0.38.2
@ -10460,12 +10419,6 @@ snapshots:
'@lexical/utils': 0.38.2
lexical: 0.38.2
'@lexical/html@0.39.0':
dependencies:
'@lexical/selection': 0.39.0
'@lexical/utils': 0.39.0
lexical: 0.39.0
'@lexical/link@0.38.2':
dependencies:
'@lexical/extension': 0.38.2
@ -10479,13 +10432,6 @@ snapshots:
'@lexical/utils': 0.38.2
lexical: 0.38.2
'@lexical/list@0.39.0':
dependencies:
'@lexical/extension': 0.39.0
'@lexical/selection': 0.39.0
'@lexical/utils': 0.39.0
lexical: 0.39.0
'@lexical/mark@0.38.2':
dependencies:
'@lexical/utils': 0.38.2
@ -10555,10 +10501,6 @@ snapshots:
dependencies:
lexical: 0.38.2
'@lexical/selection@0.39.0':
dependencies:
lexical: 0.39.0
'@lexical/table@0.38.2':
dependencies:
'@lexical/clipboard': 0.38.2
@ -10566,13 +10508,6 @@ snapshots:
'@lexical/utils': 0.38.2
lexical: 0.38.2
'@lexical/table@0.39.0':
dependencies:
'@lexical/clipboard': 0.39.0
'@lexical/extension': 0.39.0
'@lexical/utils': 0.39.0
lexical: 0.39.0
'@lexical/text@0.38.2':
dependencies:
lexical: 0.38.2
@ -10584,13 +10519,6 @@ snapshots:
'@lexical/table': 0.38.2
lexical: 0.38.2
'@lexical/utils@0.39.0':
dependencies:
'@lexical/list': 0.39.0
'@lexical/selection': 0.39.0
'@lexical/table': 0.39.0
lexical: 0.39.0
'@lexical/yjs@0.38.2(yjs@13.6.27)':
dependencies:
'@lexical/offset': 0.38.2
@ -11045,8 +10973,6 @@ snapshots:
'@preact/signals-core@1.12.1': {}
'@preact/signals-core@1.12.2': {}
'@preact/signals@1.3.2(preact@10.28.0)':
dependencies:
'@preact/signals-core': 1.12.1
@ -15164,8 +15090,6 @@ snapshots:
lexical@0.38.2: {}
lexical@0.39.0: {}
lib0@0.2.117:
dependencies:
isomorphic.js: 0.2.5