import { prisma } from "./prisma.js";
import { createId } from "./id.js";
import { decryptSecret } from "./crypto.js";

async function getActiveAiCredentialRecord(lawFirmId: string) {
  const settings = await prisma.lawFirmAiSetting.findUnique({
    where: {
      law_firm_id: lawFirmId,
    },
  });

  if (!settings?.active_credential_id) {
    throw new Error("No active AI credential configured for this law firm");
  }

  const credential = await prisma.lawFirmAiCredential.findUnique({
    where: {
      id: settings.active_credential_id,
    },
  });

  if (!credential || !credential.is_active) {
    throw new Error("Active AI credential is missing or inactive");
  }

  return {
    settings,
    credential,
  };
}

export async function getActiveAiContext(lawFirmId: string) {
  const { settings, credential } = await getActiveAiCredentialRecord(lawFirmId);

  return {
    provider: settings.default_provider,
    model: settings.default_model,
    credentialId: credential.id,
  };
}

export async function getActiveAiRuntime(lawFirmId: string) {
  const { settings, credential } = await getActiveAiCredentialRecord(lawFirmId);

  return {
    provider: settings.default_provider,
    model: settings.default_model,
    credentialId: credential.id,
    apiKey: decryptSecret(Buffer.from(credential.encrypted_api_key)),
  };
}

export async function runJsonChatCompletion(input: {
  lawFirmId: string;
  systemPrompt: string;
  userPrompt: string;
  maxCompletionTokens?: number;
}) {
  const runtime = await getActiveAiRuntime(input.lawFirmId);

  if (runtime.provider !== "openai") {
    throw new Error(`Unsupported AI provider: ${runtime.provider}`);
  }

  const response = await fetch("https://api.openai.com/v1/chat/completions", {
    method: "POST",
    headers: {
      Authorization: `Bearer ${runtime.apiKey}`,
      "Content-Type": "application/json",
    },
    body: JSON.stringify({
      model: runtime.model,
      response_format: {
        type: "json_object",
      },
      temperature: 0,
      max_completion_tokens: input.maxCompletionTokens ?? 2000,
      messages: [
        {
          role: "system",
          content: input.systemPrompt,
        },
        {
          role: "user",
          content: input.userPrompt,
        },
      ],
    }),
  });

  const payload = await response.json().catch(() => null);

  if (!response.ok || !payload || typeof payload !== "object") {
    const message =
      payload &&
      typeof payload === "object" &&
      payload !== null &&
      "error" in payload &&
      payload.error &&
      typeof payload.error === "object" &&
      "message" in payload.error
        ? String(payload.error.message ?? "OpenAI request failed")
        : "OpenAI request failed";

    throw new Error(message);
  }

  const choices = (payload as { choices?: Array<{ message?: { content?: unknown } }> }).choices;
  const content =
    Array.isArray(choices) && choices[0]?.message?.content ? choices[0].message?.content : null;

  if (typeof content !== "string" || !content.trim()) {
    throw new Error("OpenAI returned an empty response");
  }

  return {
    json: JSON.parse(content) as Record<string, unknown>,
    usage: {
      inputTokens: Number(
        (payload as { usage?: { prompt_tokens?: number } }).usage?.prompt_tokens ?? 0,
      ),
      outputTokens: Number(
        (payload as { usage?: { completion_tokens?: number } }).usage?.completion_tokens ?? 0,
      ),
      totalTokens: Number(
        (payload as { usage?: { total_tokens?: number } }).usage?.total_tokens ?? 0,
      ),
    },
    model: String((payload as { model?: string }).model ?? runtime.model),
  };
}

export async function runTextChatCompletion(input: {
  lawFirmId: string;
  systemPrompt: string;
  messages: Array<{
    role: "user" | "assistant";
    content: string;
  }>;
  maxCompletionTokens?: number;
  temperature?: number;
}) {
  const runtime = await getActiveAiRuntime(input.lawFirmId);

  if (runtime.provider !== "openai") {
    throw new Error(`Unsupported AI provider: ${runtime.provider}`);
  }

  const response = await fetch("https://api.openai.com/v1/chat/completions", {
    method: "POST",
    headers: {
      Authorization: `Bearer ${runtime.apiKey}`,
      "Content-Type": "application/json",
    },
    body: JSON.stringify({
      model: runtime.model,
      temperature: input.temperature ?? 0.2,
      max_completion_tokens: input.maxCompletionTokens ?? 1200,
      messages: [
        {
          role: "system",
          content: input.systemPrompt,
        },
        ...input.messages.map((message) => ({
          role: message.role,
          content: message.content,
        })),
      ],
    }),
  });

  const payload = await response.json().catch(() => null);

  if (!response.ok || !payload || typeof payload !== "object") {
    const message =
      payload &&
      typeof payload === "object" &&
      payload !== null &&
      "error" in payload &&
      payload.error &&
      typeof payload.error === "object" &&
      "message" in payload.error
        ? String(payload.error.message ?? "OpenAI request failed")
        : "OpenAI request failed";

    throw new Error(message);
  }

  const choices = (payload as { choices?: Array<{ message?: { content?: unknown } }> }).choices;
  const content =
    Array.isArray(choices) && choices[0]?.message?.content ? choices[0].message?.content : null;

  if (typeof content !== "string" || !content.trim()) {
    throw new Error("OpenAI returned an empty response");
  }

  return {
    text: content.trim(),
    usage: {
      inputTokens: Number(
        (payload as { usage?: { prompt_tokens?: number } }).usage?.prompt_tokens ?? 0,
      ),
      outputTokens: Number(
        (payload as { usage?: { completion_tokens?: number } }).usage?.completion_tokens ?? 0,
      ),
      totalTokens: Number(
        (payload as { usage?: { total_tokens?: number } }).usage?.total_tokens ?? 0,
      ),
    },
    model: String((payload as { model?: string }).model ?? runtime.model),
  };
}

export async function createAiRun(input: {
  lawFirmId: string;
  caseId?: string | null;
  clientId?: string | null;
  runType: string;
  status?: string;
}) {
  const aiContext = await getActiveAiContext(input.lawFirmId);
  const aiRunId = createId();
  const aiRun = await prisma.$executeRaw`
    INSERT INTO ai_runs (
      id, law_firm_id, case_id, client_id, ai_provider, ai_model,
      api_key_reference_id, ai_run_type_code, status, started_at, created_at
    ) VALUES (
      ${aiRunId},
      ${input.lawFirmId},
      ${input.caseId ?? null},
      ${input.clientId ?? null},
      ${aiContext.provider},
      ${aiContext.model},
      ${aiContext.credentialId},
      ${input.runType},
      ${input.status ?? "running"},
      NOW(),
      CURRENT_TIMESTAMP
    )
  `;

  void aiRun;

  const [row] = await prisma.$queryRaw<
    Array<{
      id: string;
      ai_provider: string;
      ai_model: string;
      api_key_reference_id: string;
    }>
  >`
    SELECT id, ai_provider, ai_model, api_key_reference_id
    FROM ai_runs
    WHERE id = ${aiRunId}
    LIMIT 1
  `;

  return row;
}

export async function finishAiRun(input: {
  aiRunId: string;
  status: string;
  inputTokens?: number;
  outputTokens?: number;
  estimatedCost?: number;
  errorMessage?: string | null;
}) {
  await prisma.$executeRaw`
    UPDATE ai_runs
    SET
      status = ${input.status},
      input_tokens = ${input.inputTokens ?? 0},
      output_tokens = ${input.outputTokens ?? 0},
      estimated_cost = ${input.estimatedCost ?? 0},
      error_message = ${input.errorMessage ?? null},
      completed_at = NOW()
    WHERE id = ${input.aiRunId}
  `;
}
