# -*- coding: utf-8 -*-
"""
AgentLoop Dataset 管理操作示例

演示 Dataset 资源级别的增删查改列：
- create_dataset   创建 Dataset 并定义 Schema
- get_dataset      获取 Dataset 详情（含 Schema）
- list_datasets    列举所有 Dataset（支持分页 & 名称过滤）
- update_dataset   更新 Dataset 描述
- delete_dataset   删除 Dataset

使用前请设置环境变量：
- SLSDEMO_ALIYUN_ACCESS_KEY_ID
- SLSDEMO_ALIYUN_ACCESS_KEY_SECRET
- ALIBABA_CLOUD_CMS_ENDPOINT   (如: cms.cn-shanghai.aliyuncs.com)
- ALIBABA_CLOUD_CMS_WORKSPACE
"""

import json
import os

from alibabacloud_cms20240330.client import Client
from alibabacloud_cms20240330.models import (
    CreateDatasetRequest,
    IndexJsonKey,
    IndexKey,
    ListDatasetsRequest,
    UpdateDatasetRequest,
)
from alibabacloud_tea_openapi.models import Config
from dotenv import load_dotenv

load_dotenv()


def get_client() -> Client:
    config = Config(
        access_key_id=os.getenv("SLSDEMO_ALIYUN_ACCESS_KEY_ID"),
        access_key_secret=os.getenv("SLSDEMO_ALIYUN_ACCESS_KEY_SECRET"),
        endpoint=os.getenv("ALIBABA_CLOUD_CMS_ENDPOINT"),
    )
    return Client(config)


# =============================================================================
# 创建 Dataset
# =============================================================================

def example_create_dataset(client: Client, workspace: str, dataset_name: str):
    """
    创建 Dataset 并定义 Schema。

    Schema 通过 IndexKey 定义，支持的字段类型：
    - text:   文本。可设 chn=True 开启中文分词，embedding="模型名" 开启向量索引
    - long:   长整型
    - double: 浮点型
    - json:   JSON 嵌套，通过 json_keys 定义子字段索引
    """
    schema = {
        "input": IndexKey(type="text", chn=True, embedding="text-embedding-v4"),
        "output": IndexKey(type="text", chn=True, embedding="text-embedding-v4"),
        "model": IndexKey(type="text"),
        "score": IndexKey(type="double"),
        "metadata": IndexKey(
            type="json",
            json_keys={
                "input_tokens": IndexJsonKey(type="long"),
                "output_tokens": IndexJsonKey(type="long"),
                "total_tokens": IndexJsonKey(type="long"),
            },
        ),
    }

    request = CreateDatasetRequest(
        dataset_name=dataset_name,
        description="AI 应用问答数据集示例",
        schema=schema,
    )
    response = client.create_dataset(workspace, request)
    print(f"✓ Dataset '{dataset_name}' 创建成功")
    print(f"  requestId: {response.body.request_id}")

    schema_display = {k: v.to_map() for k, v in schema.items()}
    print(f"  Schema:\n{json.dumps(schema_display, indent=4, ensure_ascii=False)}")


# =============================================================================
# 获取 Dataset 详情
# =============================================================================

def example_get_dataset(client: Client, workspace: str, dataset_name: str):
    """获取 Dataset 的完整信息，包含 Schema、创建时间等。"""
    response = client.get_dataset(workspace, dataset_name)
    body = response.body.to_map()
    print(f"✓ Dataset '{dataset_name}' 详情:")
    print(json.dumps(body, indent=2, ensure_ascii=False))


# =============================================================================
# 列举 Dataset
# =============================================================================

def example_list_datasets(client: Client, workspace: str):
    """
    列举当前 workspace 下所有 Dataset，支持：
    - dataset_name: 按名称过滤
    - max_results / next_token: 分页
    """
    request = ListDatasetsRequest(max_results=100)
    response = client.list_datasets(workspace, request)
    body = response.body.to_map()

    datasets = body.get("datasets", [])
    print(f"✓ 当前 workspace 共 {len(datasets)} 个 Dataset:")
    for ds in datasets:
        print(f"  - {ds['datasetName']}: {ds.get('description', '')}")

    next_token = body.get("nextToken")
    if next_token:
        print(f"  (还有更多，nextToken: {next_token})")


def example_list_datasets_with_filter(client: Client, workspace: str, name_filter: str):
    """按名称过滤列举 Dataset。"""
    request = ListDatasetsRequest(max_results=100, dataset_name=name_filter)
    response = client.list_datasets(workspace, request)
    datasets = response.body.to_map().get("datasets", [])
    print(f"✓ 过滤 '{name_filter}': 匹配 {len(datasets)} 个")
    for ds in datasets:
        print(f"  - {ds['datasetName']}")


# =============================================================================
# 更新 Dataset
# =============================================================================

def example_update_dataset(client: Client, workspace: str, dataset_name: str):
    """更新 Dataset 的描述信息。"""
    request = UpdateDatasetRequest(description="更新后的描述信息")
    response = client.update_dataset(workspace, dataset_name, request)
    print(f"✓ Dataset '{dataset_name}' 描述已更新")
    print(f"  requestId: {response.body.request_id}")


# =============================================================================
# 删除 Dataset
# =============================================================================

def example_delete_dataset(client: Client, workspace: str, dataset_name: str):
    """删除 Dataset（不可恢复，请谨慎操作）。"""
    response = client.delete_dataset(workspace, dataset_name)
    print(f"✓ Dataset '{dataset_name}' 已删除")
    print(f"  requestId: {response.body.request_id}")


# =============================================================================
# 主函数
# =============================================================================

def main():
    client = get_client()
    workspace = os.getenv("ALIBABA_CLOUD_CMS_WORKSPACE")
    dataset_name = "manage_demo"

    print("AgentLoop Dataset 管理操作示例")
    print("=" * 50)

    # 1. 创建
    example_create_dataset(client, workspace, dataset_name)

    # 2. 列举
    example_list_datasets(client, workspace)

    # 3. 按名称过滤
    example_list_datasets_with_filter(client, workspace, "manage")

    # 4. 查看详情
    example_get_dataset(client, workspace, dataset_name)

    # 5. 更新描述
    example_update_dataset(client, workspace, dataset_name)

    # 6. 删除（取消注释以执行）
    # example_delete_dataset(client, workspace, dataset_name)

    print("\n" + "=" * 50)
    print("管理操作示例完成")


if __name__ == "__main__":
    main()
