<?xml version="1.0" encoding="UTF-8"?>
<urlset
  xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
  xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"
>
  <url>
    <loc>https://aiexpert.news/en/article/openai-resolves-microsoft-conflict-over-its-50b-amazon-deal-restructuring-has-en</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-30T00:02:36.451Z</news:publication_date>
      <news:title>OpenAI&apos;s $50B AWS Deal Closes After Microsoft Drops API Exclusivity</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/openai-resolves-microsoft-conflict-over-its-50b-amazon-deal-restructuring-has-en</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-30T00:02:36.451Z</news:publication_date>
      <news:title>Acordo de US$ 50 bi da OpenAI com a AWS é fechado após Microsoft abrir mão da exclusividade de API</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/openai-resolves-microsoft-conflict-over-its-50b-amazon-deal-restructuring-has-en</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-30T00:02:36.451Z</news:publication_date>
      <news:title>El acuerdo de US$ 50 mil millones entre OpenAI y AWS se concreta tras la renuncia de Microsoft a la exclusividad de API</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/openai-and-microsoft-renegotiate-their-deal-what-the-new-terms-mean-for-enterpri</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:48:04.767Z</news:publication_date>
      <news:title>OpenAI Ends Azure Exclusivity and Caps Microsoft Revenue Share Through 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/openai-and-microsoft-renegotiate-their-deal-what-the-new-terms-mean-for-enterpri</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:48:04.767Z</news:publication_date>
      <news:title>OpenAI Encerra Exclusividade com Azure e Limita Repasse de Receita à Microsoft até 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/openai-and-microsoft-renegotiate-their-deal-what-the-new-terms-mean-for-enterpri</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:48:04.767Z</news:publication_date>
      <news:title>OpenAI Termina la Exclusividad con Azure y Limita el Reparto de Ingresos con Microsoft hasta 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/specvalidator-catches-defective-llm-prompts-before-they-corrupt-code-generation</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:30:04.332Z</news:publication_date>
      <news:title>SpecValidator Hits 0.804 F1 on Prompt Defect Detection, Doubling Frontier Model MCC</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/specvalidator-catches-defective-llm-prompts-before-they-corrupt-code-generation</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:30:04.332Z</news:publication_date>
      <news:title>SpecValidator Atinge F1 de 0,804 na Detecção de Defeitos em Prompts, Dobrando o MCC dos Modelos Frontier</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/specvalidator-catches-defective-llm-prompts-before-they-corrupt-code-generation</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:30:04.332Z</news:publication_date>
      <news:title>SpecValidator Alcanza F1 de 0,804 en Detección de Defectos en Prompts, Duplicando el MCC de los Modelos Frontier</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/training-on-disagreement-how-multi-thinker-cot-supervision-outperforms-single-te</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:18:04.348Z</news:publication_date>
      <news:title>Multi-teacher CoT pooling can be computationally hard, active queries fix it</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/training-on-disagreement-how-multi-thinker-cot-supervision-outperforms-single-te</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:18:04.348Z</news:publication_date>
      <news:title>O agrupamento multi-teacher de CoT pode ser computacionalmente difícil — consultas ativas resolvem o problema</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/training-on-disagreement-how-multi-thinker-cot-supervision-outperforms-single-te</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:18:04.348Z</news:publication_date>
      <news:title>El agrupamiento multi-teacher de CoT puede ser computacionalmente difícil — las consultas activas lo resuelven</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/hylo-upcycling-pretrained-transformer-checkpoints-into-long-context-hybrid-archi</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:05:42.310Z</news:publication_date>
      <news:title>AMD HyLo Converts Transformer Checkpoints to 32x Longer Context Without Retraining</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/hylo-upcycling-pretrained-transformer-checkpoints-into-long-context-hybrid-archi</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:05:42.310Z</news:publication_date>
      <news:title>AMD HyLo Converte Checkpoints Transformer para Contexto 32x Mais Longo Sem Retreinamento</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/hylo-upcycling-pretrained-transformer-checkpoints-into-long-context-hybrid-archi</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:05:42.310Z</news:publication_date>
      <news:title>AMD HyLo Convierte Checkpoints Transformer a Contexto 32x Más Largo Sin Reentrenamiento</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/green-shielding-researchers-expose-how-routine-phrasing-variation-silently-shift</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:50:04.238Z</news:publication_date>
      <news:title>Safer-Looking LLM Outputs Miss More Critical Diagnoses, Green Shielding Study Finds</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/green-shielding-researchers-expose-how-routine-phrasing-variation-silently-shift</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:50:04.238Z</news:publication_date>
      <news:title>Outputs de LLMs com Aparência Mais Segura Erram Mais Diagnósticos Críticos, Aponta Estudo de Green Shielding</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/green-shielding-researchers-expose-how-routine-phrasing-variation-silently-shift</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:50:04.238Z</news:publication_date>
      <news:title>Los Outputs de LLMs con Apariencia más Segura Fallan más Diagnósticos Críticos, Revela Estudio de Green Shielding</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/hdet-repurposes-idle-gpu-replicas-to-run-learning-rate-exploration-during-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:38:04.216Z</news:publication_date>
      <news:title>HDET Converts Allocated GPU Replicas Into a Live Learning-Rate Search Engine</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/hdet-repurposes-idle-gpu-replicas-to-run-learning-rate-exploration-during-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:38:04.216Z</news:publication_date>
      <news:title>HDET Converte Réplicas de GPU Alocadas em um Motor de Busca de Learning Rate em Tempo Real</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/hdet-repurposes-idle-gpu-replicas-to-run-learning-rate-exploration-during-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:38:04.216Z</news:publication_date>
      <news:title>HDET Convierte Réplicas de GPU Asignadas en un Motor de Búsqueda de Learning Rate en Tiempo Real</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/persona-collapse-why-multi-agent-llm-simulations-converge-to-a-single-behavioral</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:24:04.290Z</news:publication_date>
      <news:title>Persona Collapse Undermines Multi-Agent LLM Simulations Across Ten Models</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/persona-collapse-why-multi-agent-llm-simulations-converge-to-a-single-behavioral</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:24:04.290Z</news:publication_date>
      <news:title>Persona Collapse Compromete Simulações LLM Multiagente em Dez Modelos</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/persona-collapse-why-multi-agent-llm-simulations-converge-to-a-single-behavioral</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:24:04.290Z</news:publication_date>
      <news:title>Persona Collapse Socava las Simulaciones LLM Multiagente en Diez Modelos</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/llm-rubrics-match-clinician-scoring-across-823-encounters-unblocking-clinical-ai</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:04:04.166Z</news:publication_date>
      <news:title>LLM Rubric Scoring Matches Clinician Agreement on 823 Cases at 1,000x Lower Cost</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/llm-rubrics-match-clinician-scoring-across-823-encounters-unblocking-clinical-ai</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:04:04.166Z</news:publication_date>
      <news:title>Pontuação por Rubrica de LLM Corresponde ao Acordo entre Clínicos em 823 Casos com Custo 1.000x Menor</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/llm-rubrics-match-clinician-scoring-across-823-encounters-unblocking-clinical-ai</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:04:04.166Z</news:publication_date>
      <news:title>La Puntuación por Rúbrica de LLM Iguala el Acuerdo entre Clínicos en 823 Casos con un Costo 1.000x Menor</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/depthkv-layer-aware-kv-cache-pruning-cuts-long-context-llm-memory-overhead</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:50:04.170Z</news:publication_date>
      <news:title>DepthKV Beats Uniform KV Cache Pruning by Allocating Memory per Layer Sensitivity</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/depthkv-layer-aware-kv-cache-pruning-cuts-long-context-llm-memory-overhead</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:50:04.170Z</news:publication_date>
      <news:title>DepthKV Supera o Pruning Uniforme de Cache KV ao Alocar Memória por Sensibilidade de Camada</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/depthkv-layer-aware-kv-cache-pruning-cuts-long-context-llm-memory-overhead</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:50:04.170Z</news:publication_date>
      <news:title>DepthKV Supera el Pruning Uniforme de Caché KV al Asignar Memoria por Sensibilidad de Capa</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/agentward-new-security-architecture-addresses-full-lifecycle-vulnerabilities-in-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:34:04.170Z</news:publication_date>
      <news:title>FIND-Lab releases AgentWard, a five-layer AI agent security framework</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/agentward-new-security-architecture-addresses-full-lifecycle-vulnerabilities-in-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:34:04.170Z</news:publication_date>
      <news:title>FIND-Lab lança AgentWard, framework de segurança em cinco camadas para agentes de IA</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/agentward-new-security-architecture-addresses-full-lifecycle-vulnerabilities-in-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:34:04.170Z</news:publication_date>
      <news:title>FIND-Lab lanza AgentWard, un framework de seguridad de cinco capas para agentes de IA</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/meta-races-to-unwind-manus-ai-deal-before-beijings-export-control-deadline</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:15:42.618Z</news:publication_date>
      <news:title>Meta Forced to Dismantle Manus AI Acquisition Under Beijing Deadline</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/meta-races-to-unwind-manus-ai-deal-before-beijings-export-control-deadline</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:15:42.618Z</news:publication_date>
      <news:title>Meta Obrigada a Desmantelar Aquisição da Manus AI sob Prazo de Pequim</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/meta-races-to-unwind-manus-ai-deal-before-beijings-export-control-deadline</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:15:42.618Z</news:publication_date>
      <news:title>Meta Obligada a Desmantelar la Adquisición de Manus AI bajo Plazo de Pekín</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/anthropic-tests-claude-models-for-safety-research-sabotageand-finds-none</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:02:04.344Z</news:publication_date>
      <news:title>Anthropic finds Claude does not start safety sabotage but will continue it when primed</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/anthropic-tests-claude-models-for-safety-research-sabotageand-finds-none</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:02:04.344Z</news:publication_date>
      <news:title>Anthropic descobre que Claude não inicia sabotagem de segurança, mas a continua quando induzido</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/anthropic-tests-claude-models-for-safety-research-sabotageand-finds-none</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:02:04.344Z</news:publication_date>
      <news:title>Anthropic concluye que Claude no inicia sabotaje de seguridad pero lo continúa cuando se le induce</news:title>
    </news:news>
  </url>
</urlset>