Merge remote-tracking branch 'myori/main' into feat/collaboration2

This commit is contained in:
hjlarry
2026-01-24 15:22:07 +08:00
873 changed files with 37203 additions and 8762 deletions

View File

@ -0,0 +1,60 @@
"""make message annotation question not nullable
Revision ID: 9e6fa5cbcd80
Revises: 03f8dcbc611e
Create Date: 2025-11-06 16:03:54.549378
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9e6fa5cbcd80'
down_revision = '288345cd01d1'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
message_annotations = sa.table(
"message_annotations",
sa.column("id", sa.String),
sa.column("message_id", sa.String),
sa.column("question", sa.Text),
)
messages = sa.table(
"messages",
sa.column("id", sa.String),
sa.column("query", sa.Text),
)
update_question_from_message = (
sa.update(message_annotations)
.where(
sa.and_(
message_annotations.c.question.is_(None),
message_annotations.c.message_id.isnot(None),
)
)
.values(
question=sa.select(sa.func.coalesce(messages.c.query, ""))
.where(messages.c.id == message_annotations.c.message_id)
.scalar_subquery()
)
)
bind.execute(update_question_from_message)
fill_remaining_questions = (
sa.update(message_annotations)
.where(message_annotations.c.question.is_(None))
.values(question="")
)
bind.execute(fill_remaining_questions)
with op.batch_alter_table('message_annotations', schema=None) as batch_op:
batch_op.alter_column('question', existing_type=sa.TEXT(), nullable=False)
def downgrade():
with op.batch_alter_table('message_annotations', schema=None) as batch_op:
batch_op.alter_column('question', existing_type=sa.TEXT(), nullable=True)

View File

@ -0,0 +1,73 @@
"""add table explore banner and trial
Revision ID: f9f6d18a37f9
Revises: 9e6fa5cbcd80
Create Date: 2026-01-017 11:10:18.079355
"""
from alembic import op
import models as models
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = 'f9f6d18a37f9'
down_revision = '9e6fa5cbcd80'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('account_trial_app_records',
sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('account_id', models.types.StringUUID(), nullable=False),
sa.Column('app_id', models.types.StringUUID(), nullable=False),
sa.Column('count', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.PrimaryKeyConstraint('id', name='user_trial_app_pkey'),
sa.UniqueConstraint('account_id', 'app_id', name='unique_account_trial_app_record')
)
with op.batch_alter_table('account_trial_app_records', schema=None) as batch_op:
batch_op.create_index('account_trial_app_record_account_id_idx', ['account_id'], unique=False)
batch_op.create_index('account_trial_app_record_app_id_idx', ['app_id'], unique=False)
op.create_table('exporle_banners',
sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('content', sa.JSON(), nullable=False),
sa.Column('link', sa.String(length=255), nullable=False),
sa.Column('sort', sa.Integer(), nullable=False),
sa.Column('status', sa.String(length=255), server_default=sa.text("'enabled'::character varying"), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('language', sa.String(length=255), server_default=sa.text("'en-US'::character varying"), nullable=False),
sa.PrimaryKeyConstraint('id', name='exporler_banner_pkey')
)
op.create_table('trial_apps',
sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuid_generate_v4()'), nullable=False),
sa.Column('app_id', models.types.StringUUID(), nullable=False),
sa.Column('tenant_id', models.types.StringUUID(), nullable=False),
sa.Column('created_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.Column('trial_limit', sa.Integer(), nullable=False),
sa.PrimaryKeyConstraint('id', name='trial_app_pkey'),
sa.UniqueConstraint('app_id', name='unique_trail_app_id')
)
with op.batch_alter_table('trial_apps', schema=None) as batch_op:
batch_op.create_index('trial_app_app_id_idx', ['app_id'], unique=False)
batch_op.create_index('trial_app_tenant_id_idx', ['tenant_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('trial_apps', schema=None) as batch_op:
batch_op.drop_index('trial_app_tenant_id_idx')
batch_op.drop_index('trial_app_app_id_idx')
op.drop_table('trial_apps')
op.drop_table('exporle_banners')
with op.batch_alter_table('account_trial_app_records', schema=None) as batch_op:
batch_op.drop_index('account_trial_app_record_app_id_idx')
batch_op.drop_index('account_trial_app_record_account_id_idx')
op.drop_table('account_trial_app_records')
# ### end Alembic commands ###

View File

@ -0,0 +1,95 @@
"""create workflow_archive_logs
Revision ID: 9d77545f524e
Revises: f9f6d18a37f9
Create Date: 2026-01-06 17:18:56.292479
"""
from alembic import op
import models as models
import sqlalchemy as sa
def _is_pg(conn):
return conn.dialect.name == "postgresql"
# revision identifiers, used by Alembic.
revision = '9d77545f524e'
down_revision = 'f9f6d18a37f9'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
conn = op.get_bind()
if _is_pg(conn):
op.create_table('workflow_archive_logs',
sa.Column('id', models.types.StringUUID(), server_default=sa.text('uuidv7()'), nullable=False),
sa.Column('log_id', models.types.StringUUID(), nullable=True),
sa.Column('tenant_id', models.types.StringUUID(), nullable=False),
sa.Column('app_id', models.types.StringUUID(), nullable=False),
sa.Column('workflow_id', models.types.StringUUID(), nullable=False),
sa.Column('workflow_run_id', models.types.StringUUID(), nullable=False),
sa.Column('created_by_role', sa.String(length=255), nullable=False),
sa.Column('created_by', models.types.StringUUID(), nullable=False),
sa.Column('log_created_at', sa.DateTime(), nullable=True),
sa.Column('log_created_from', sa.String(length=255), nullable=True),
sa.Column('run_version', sa.String(length=255), nullable=False),
sa.Column('run_status', sa.String(length=255), nullable=False),
sa.Column('run_triggered_from', sa.String(length=255), nullable=False),
sa.Column('run_error', models.types.LongText(), nullable=True),
sa.Column('run_elapsed_time', sa.Float(), server_default=sa.text('0'), nullable=False),
sa.Column('run_total_tokens', sa.BigInteger(), server_default=sa.text('0'), nullable=False),
sa.Column('run_total_steps', sa.Integer(), server_default=sa.text('0'), nullable=True),
sa.Column('run_created_at', sa.DateTime(), nullable=False),
sa.Column('run_finished_at', sa.DateTime(), nullable=True),
sa.Column('run_exceptions_count', sa.Integer(), server_default=sa.text('0'), nullable=True),
sa.Column('trigger_metadata', models.types.LongText(), nullable=True),
sa.Column('archived_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.PrimaryKeyConstraint('id', name='workflow_archive_log_pkey')
)
else:
op.create_table('workflow_archive_logs',
sa.Column('id', models.types.StringUUID(), nullable=False),
sa.Column('log_id', models.types.StringUUID(), nullable=True),
sa.Column('tenant_id', models.types.StringUUID(), nullable=False),
sa.Column('app_id', models.types.StringUUID(), nullable=False),
sa.Column('workflow_id', models.types.StringUUID(), nullable=False),
sa.Column('workflow_run_id', models.types.StringUUID(), nullable=False),
sa.Column('created_by_role', sa.String(length=255), nullable=False),
sa.Column('created_by', models.types.StringUUID(), nullable=False),
sa.Column('log_created_at', sa.DateTime(), nullable=True),
sa.Column('log_created_from', sa.String(length=255), nullable=True),
sa.Column('run_version', sa.String(length=255), nullable=False),
sa.Column('run_status', sa.String(length=255), nullable=False),
sa.Column('run_triggered_from', sa.String(length=255), nullable=False),
sa.Column('run_error', models.types.LongText(), nullable=True),
sa.Column('run_elapsed_time', sa.Float(), server_default=sa.text('0'), nullable=False),
sa.Column('run_total_tokens', sa.BigInteger(), server_default=sa.text('0'), nullable=False),
sa.Column('run_total_steps', sa.Integer(), server_default=sa.text('0'), nullable=True),
sa.Column('run_created_at', sa.DateTime(), nullable=False),
sa.Column('run_finished_at', sa.DateTime(), nullable=True),
sa.Column('run_exceptions_count', sa.Integer(), server_default=sa.text('0'), nullable=True),
sa.Column('trigger_metadata', models.types.LongText(), nullable=True),
sa.Column('archived_at', sa.DateTime(), server_default=sa.text('CURRENT_TIMESTAMP'), nullable=False),
sa.PrimaryKeyConstraint('id', name='workflow_archive_log_pkey')
)
with op.batch_alter_table('workflow_archive_logs', schema=None) as batch_op:
batch_op.create_index('workflow_archive_log_app_idx', ['tenant_id', 'app_id'], unique=False)
batch_op.create_index('workflow_archive_log_run_created_at_idx', ['run_created_at'], unique=False)
batch_op.create_index('workflow_archive_log_workflow_run_id_idx', ['workflow_run_id'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('workflow_archive_logs', schema=None) as batch_op:
batch_op.drop_index('workflow_archive_log_workflow_run_id_idx')
batch_op.drop_index('workflow_archive_log_run_created_at_idx')
batch_op.drop_index('workflow_archive_log_app_idx')
op.drop_table('workflow_archive_logs')
# ### end Alembic commands ###