using Microsoft.AspNetCore.Mvc;
using Microsoft.Extensions.Configuration;
using Microsoft.SemanticKernel;
using Microsoft.SemanticKernel.Connectors.Ollama;
using Microsoft.SemanticKernel.ChatCompletion;
using System;
using System.Threading.Tasks;

namespace LlamaWebApp.Controllers
{
    public class ChatController : Controller
    {
        private readonly IConfiguration _config;

        public ChatController(IConfiguration config)
        {
            _config = config;
        }

        [HttpGet]
        public IActionResult Index()
        {
            return View();
        }

        [HttpPost]
        public async Task<IActionResult> Index(string prompt)
        {
            if (string.IsNullOrWhiteSpace(prompt))
                return View("Index", "Please enter a prompt.");

            // ✅ Get Ollama config from appsettings.json
            var model = _config["Ollama:Model"];
            var endpoint = new Uri(_config["Ollama:Endpoint"]);

            // ✅ Create OllamaChatCompletionService directly
            var ollamaService = new OllamaChatCompletionService(model, endpoint);

            // ✅ Create the Semantic Kernel instance
            var kernel = Kernel.CreateBuilder().Build().WithChatCompletionService(ollamaService);

            // ✅ Ask the model
            var result = await kernel.InvokePromptAsync(prompt);
            var response = result.ToString();

            ViewBag.Prompt = prompt;
            ViewBag.Response = response;

            return View();
        }
    }
}
