<?xml version="1.0" encoding="UTF-8"?>
<urlset
  xmlns="http://www.sitemaps.org/schemas/sitemap/0.9"
  xmlns:news="http://www.google.com/schemas/sitemap-news/0.9"
>
  <url>
    <loc>https://aiexpert.news/en/article/openai-resolves-microsoft-conflict-over-its-50b-amazon-deal-restructuring-has-en</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-30T00:02:36.451Z</news:publication_date>
      <news:title>OpenAI&apos;s $50B AWS Deal Closes After Microsoft Drops API Exclusivity</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/openai-resolves-microsoft-conflict-over-its-50b-amazon-deal-restructuring-has-en</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-30T00:02:36.451Z</news:publication_date>
      <news:title>Acordo de US$ 50 bi da OpenAI com a AWS é fechado após Microsoft abrir mão da exclusividade de API</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/openai-resolves-microsoft-conflict-over-its-50b-amazon-deal-restructuring-has-en</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-30T00:02:36.451Z</news:publication_date>
      <news:title>El acuerdo de US$ 50 mil millones entre OpenAI y AWS se concreta tras la renuncia de Microsoft a la exclusividad de API</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/openai-and-microsoft-renegotiate-their-deal-what-the-new-terms-mean-for-enterpri</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:48:04.767Z</news:publication_date>
      <news:title>OpenAI Ends Azure Exclusivity and Caps Microsoft Revenue Share Through 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/openai-and-microsoft-renegotiate-their-deal-what-the-new-terms-mean-for-enterpri</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:48:04.767Z</news:publication_date>
      <news:title>OpenAI Encerra Exclusividade com Azure e Limita Repasse de Receita à Microsoft até 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/openai-and-microsoft-renegotiate-their-deal-what-the-new-terms-mean-for-enterpri</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:48:04.767Z</news:publication_date>
      <news:title>OpenAI Termina la Exclusividad con Azure y Limita el Reparto de Ingresos con Microsoft hasta 2030</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/specvalidator-catches-defective-llm-prompts-before-they-corrupt-code-generation</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:30:04.332Z</news:publication_date>
      <news:title>SpecValidator Hits 0.804 F1 on Prompt Defect Detection, Doubling Frontier Model MCC</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/specvalidator-catches-defective-llm-prompts-before-they-corrupt-code-generation</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:30:04.332Z</news:publication_date>
      <news:title>SpecValidator Atinge F1 de 0,804 na Detecção de Defeitos em Prompts, Dobrando o MCC dos Modelos Frontier</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/specvalidator-catches-defective-llm-prompts-before-they-corrupt-code-generation</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:30:04.332Z</news:publication_date>
      <news:title>SpecValidator Alcanza F1 de 0,804 en Detección de Defectos en Prompts, Duplicando el MCC de los Modelos Frontier</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/training-on-disagreement-how-multi-thinker-cot-supervision-outperforms-single-te</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:18:04.348Z</news:publication_date>
      <news:title>Multi-teacher CoT pooling can be computationally hard, active queries fix it</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/training-on-disagreement-how-multi-thinker-cot-supervision-outperforms-single-te</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:18:04.348Z</news:publication_date>
      <news:title>O agrupamento multi-teacher de CoT pode ser computacionalmente difícil — consultas ativas resolvem o problema</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/training-on-disagreement-how-multi-thinker-cot-supervision-outperforms-single-te</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:18:04.348Z</news:publication_date>
      <news:title>El agrupamiento multi-teacher de CoT puede ser computacionalmente difícil — las consultas activas lo resuelven</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/hylo-upcycling-pretrained-transformer-checkpoints-into-long-context-hybrid-archi</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:05:42.310Z</news:publication_date>
      <news:title>AMD HyLo Converts Transformer Checkpoints to 32x Longer Context Without Retraining</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/hylo-upcycling-pretrained-transformer-checkpoints-into-long-context-hybrid-archi</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:05:42.310Z</news:publication_date>
      <news:title>AMD HyLo Converte Checkpoints Transformer para Contexto 32x Mais Longo Sem Retreinamento</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/hylo-upcycling-pretrained-transformer-checkpoints-into-long-context-hybrid-archi</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T02:05:42.310Z</news:publication_date>
      <news:title>AMD HyLo Convierte Checkpoints Transformer a Contexto 32x Más Largo Sin Reentrenamiento</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/green-shielding-researchers-expose-how-routine-phrasing-variation-silently-shift</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:50:04.238Z</news:publication_date>
      <news:title>Safer-Looking LLM Outputs Miss More Critical Diagnoses, Green Shielding Study Finds</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/green-shielding-researchers-expose-how-routine-phrasing-variation-silently-shift</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:50:04.238Z</news:publication_date>
      <news:title>Outputs de LLMs com Aparência Mais Segura Erram Mais Diagnósticos Críticos, Aponta Estudo de Green Shielding</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/green-shielding-researchers-expose-how-routine-phrasing-variation-silently-shift</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:50:04.238Z</news:publication_date>
      <news:title>Los Outputs de LLMs con Apariencia más Segura Fallan más Diagnósticos Críticos, Revela Estudio de Green Shielding</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/hdet-repurposes-idle-gpu-replicas-to-run-learning-rate-exploration-during-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:38:04.216Z</news:publication_date>
      <news:title>HDET Converts Allocated GPU Replicas Into a Live Learning-Rate Search Engine</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/hdet-repurposes-idle-gpu-replicas-to-run-learning-rate-exploration-during-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:38:04.216Z</news:publication_date>
      <news:title>HDET Converte Réplicas de GPU Alocadas em um Motor de Busca de Learning Rate em Tempo Real</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/hdet-repurposes-idle-gpu-replicas-to-run-learning-rate-exploration-during-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:38:04.216Z</news:publication_date>
      <news:title>HDET Convierte Réplicas de GPU Asignadas en un Motor de Búsqueda de Learning Rate en Tiempo Real</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/persona-collapse-why-multi-agent-llm-simulations-converge-to-a-single-behavioral</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:24:04.290Z</news:publication_date>
      <news:title>Persona Collapse Undermines Multi-Agent LLM Simulations Across Ten Models</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/persona-collapse-why-multi-agent-llm-simulations-converge-to-a-single-behavioral</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:24:04.290Z</news:publication_date>
      <news:title>Persona Collapse Compromete Simulações LLM Multiagente em Dez Modelos</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/persona-collapse-why-multi-agent-llm-simulations-converge-to-a-single-behavioral</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:24:04.290Z</news:publication_date>
      <news:title>Persona Collapse Socava las Simulaciones LLM Multiagente en Diez Modelos</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/llm-rubrics-match-clinician-scoring-across-823-encounters-unblocking-clinical-ai</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:04:04.166Z</news:publication_date>
      <news:title>LLM Rubric Scoring Matches Clinician Agreement on 823 Cases at 1,000x Lower Cost</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/llm-rubrics-match-clinician-scoring-across-823-encounters-unblocking-clinical-ai</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:04:04.166Z</news:publication_date>
      <news:title>Pontuação por Rubrica de LLM Corresponde ao Acordo entre Clínicos em 823 Casos com Custo 1.000x Menor</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/llm-rubrics-match-clinician-scoring-across-823-encounters-unblocking-clinical-ai</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T01:04:04.166Z</news:publication_date>
      <news:title>La Puntuación por Rúbrica de LLM Iguala el Acuerdo entre Clínicos en 823 Casos con un Costo 1.000x Menor</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/depthkv-layer-aware-kv-cache-pruning-cuts-long-context-llm-memory-overhead</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:50:04.170Z</news:publication_date>
      <news:title>DepthKV Beats Uniform KV Cache Pruning by Allocating Memory per Layer Sensitivity</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/depthkv-layer-aware-kv-cache-pruning-cuts-long-context-llm-memory-overhead</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:50:04.170Z</news:publication_date>
      <news:title>DepthKV Supera o Pruning Uniforme de Cache KV ao Alocar Memória por Sensibilidade de Camada</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/depthkv-layer-aware-kv-cache-pruning-cuts-long-context-llm-memory-overhead</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:50:04.170Z</news:publication_date>
      <news:title>DepthKV Supera el Pruning Uniforme de Caché KV al Asignar Memoria por Sensibilidad de Capa</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/agentward-new-security-architecture-addresses-full-lifecycle-vulnerabilities-in-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:34:04.170Z</news:publication_date>
      <news:title>FIND-Lab releases AgentWard, a five-layer AI agent security framework</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/agentward-new-security-architecture-addresses-full-lifecycle-vulnerabilities-in-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:34:04.170Z</news:publication_date>
      <news:title>FIND-Lab lança AgentWard, framework de segurança em cinco camadas para agentes de IA</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/agentward-new-security-architecture-addresses-full-lifecycle-vulnerabilities-in-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:34:04.170Z</news:publication_date>
      <news:title>FIND-Lab lanza AgentWard, un framework de seguridad de cinco capas para agentes de IA</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/meta-races-to-unwind-manus-ai-deal-before-beijings-export-control-deadline</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:15:42.618Z</news:publication_date>
      <news:title>Meta Forced to Dismantle Manus AI Acquisition Under Beijing Deadline</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/meta-races-to-unwind-manus-ai-deal-before-beijings-export-control-deadline</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:15:42.618Z</news:publication_date>
      <news:title>Meta Obrigada a Desmantelar Aquisição da Manus AI sob Prazo de Pequim</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/meta-races-to-unwind-manus-ai-deal-before-beijings-export-control-deadline</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:15:42.618Z</news:publication_date>
      <news:title>Meta Obligada a Desmantelar la Adquisición de Manus AI bajo Plazo de Pekín</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/anthropic-tests-claude-models-for-safety-research-sabotageand-finds-none</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:02:04.344Z</news:publication_date>
      <news:title>Anthropic finds Claude does not start safety sabotage but will continue it when primed</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/anthropic-tests-claude-models-for-safety-research-sabotageand-finds-none</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:02:04.344Z</news:publication_date>
      <news:title>Anthropic descobre que Claude não inicia sabotagem de segurança, mas a continua quando induzido</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/anthropic-tests-claude-models-for-safety-research-sabotageand-finds-none</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-29T00:02:04.344Z</news:publication_date>
      <news:title>Anthropic concluye que Claude no inicia sabotaje de seguridad pero lo continúa cuando se le induce</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/alec-radfords-talkie-is-a-13b-llm-trained-on-260b-tokens-of-pre-1931-text-releas</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T03:14:49.577Z</news:publication_date>
      <news:title>Alec Radford Releases 13B Model Trained on Pre-1931 Text Under Apache 2.0</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/alec-radfords-talkie-is-a-13b-llm-trained-on-260b-tokens-of-pre-1931-text-releas</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T03:14:49.577Z</news:publication_date>
      <news:title>Alec Radford Lança Modelo de 13B Treinado em Textos Anteriores a 1931 sob Apache 2.0</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/alec-radfords-talkie-is-a-13b-llm-trained-on-260b-tokens-of-pre-1931-text-releas</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T03:14:49.577Z</news:publication_date>
      <news:title>Alec Radford Lanza Modelo de 13B Entrenado con Textos Anteriores a 1931 bajo Apache 2.0</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/doc-to-lora-adaptation-collapses-to-46-accuracy-when-documents-contradict-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:44:04.398Z</news:publication_date>
      <news:title>Doc-to-LoRA Accuracy Falls to 16% Against Strongly Entrenched Model Facts</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/doc-to-lora-adaptation-collapses-to-46-accuracy-when-documents-contradict-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:44:04.398Z</news:publication_date>
      <news:title>Acurácia do Doc-to-LoRA Cai para 16% Contra Fatos Fortemente Consolidados no Modelo</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/doc-to-lora-adaptation-collapses-to-46-accuracy-when-documents-contradict-traini</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:44:04.398Z</news:publication_date>
      <news:title>La Precisión del Doc-to-LoRA Cae al 16% Frente a Hechos Fuertemente Arraigados en el Modelo</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/elementsclaw-agentic-framework-closes-the-loop-on-ai-driven-materials-discovery</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:30:04.353Z</news:publication_date>
      <news:title>ElementsClaw Screens 2.4 Million Crystals in 28 GPU Hours, Finds Four New Superconductors</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/elementsclaw-agentic-framework-closes-the-loop-on-ai-driven-materials-discovery</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:30:04.353Z</news:publication_date>
      <news:title>ElementsClaw Analisa 2,4 Milhões de Cristais em 28 Horas de GPU e Descobre Quatro Novos Supercondutores</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/elementsclaw-agentic-framework-closes-the-loop-on-ai-driven-materials-discovery</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:30:04.353Z</news:publication_date>
      <news:title>ElementsClaw Analiza 2,4 Millones de Cristales en 28 Horas de GPU y Descubre Cuatro Nuevos Superconductores</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/silent-deepspeed-bug-has-corrupted-rl-fine-tuning-benchmarks-across-trl-openrlhf</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:15:35.617Z</news:publication_date>
      <news:title>DeepSpeed CPU-Offload Bug Corrupted RLHF Benchmarks in Three Major Frameworks</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/silent-deepspeed-bug-has-corrupted-rl-fine-tuning-benchmarks-across-trl-openrlhf</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:15:35.617Z</news:publication_date>
      <news:title>Bug de CPU-Offload do DeepSpeed Corrompeu Benchmarks de RLHF em Três Grandes Frameworks</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/silent-deepspeed-bug-has-corrupted-rl-fine-tuning-benchmarks-across-trl-openrlhf</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T02:15:35.617Z</news:publication_date>
      <news:title>El Bug de CPU-Offload de DeepSpeed Corrompió Benchmarks de RLHF en Tres Frameworks Principales</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/google-deepmind-signs-national-ai-partnership-with-south-korea-eyes-government-a</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:50:04.392Z</news:publication_date>
      <news:title>Google DeepMind Strikes Lab-Direct AI Partnership with South Korea</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/google-deepmind-signs-national-ai-partnership-with-south-korea-eyes-government-a</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:50:04.392Z</news:publication_date>
      <news:title>Google DeepMind firma parceria de IA Lab-Direct com a Coreia do Sul</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/google-deepmind-signs-national-ai-partnership-with-south-korea-eyes-government-a</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:50:04.392Z</news:publication_date>
      <news:title>Google DeepMind establece alianza de IA Lab-Direct con Corea del Sur</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/new-method-cuts-the-cost-of-fitting-ai-scaling-laws-by-actively-selecting-which-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:30:04.379Z</news:publication_date>
      <news:title>MSPE Fits AI Scaling Laws at 10% of Standard Compute Cost</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/new-method-cuts-the-cost-of-fitting-ai-scaling-laws-by-actively-selecting-which-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:30:04.379Z</news:publication_date>
      <news:title>MSPE Ajusta Leis de Escala de IA com 10% do Custo Computacional Padrão</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/new-method-cuts-the-cost-of-fitting-ai-scaling-laws-by-actively-selecting-which-</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:30:04.379Z</news:publication_date>
      <news:title>MSPE Ajusta Leyes de Escala de IA con el 10% del Costo Computacional Estándar</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/en/article/llms-systematically-generate-harmful-narratives-about-global-majority-nationalit</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>en</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:18:04.626Z</news:publication_date>
      <news:title>Frontier LLMs show 50x subordination bias against Global Majority nationalities</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/pt/article/llms-systematically-generate-harmful-narratives-about-global-majority-nationalit</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>pt-BR</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:18:04.626Z</news:publication_date>
      <news:title>LLMs de fronteira apresentam viés de subordinação 50x maior contra nacionalidades da Maioria Global</news:title>
    </news:news>
  </url>
  <url>
    <loc>https://aiexpert.news/es/article/llms-systematically-generate-harmful-narratives-about-global-majority-nationalit</loc>
    <news:news>
      <news:publication>
        <news:name>ai|expert</news:name>
        <news:language>es</news:language>
      </news:publication>
      <news:publication_date>2026-04-28T01:18:04.626Z</news:publication_date>
      <news:title>Los LLMs de frontera muestran un sesgo de subordinación 50x contra nacionalidades de la Mayoría Global</news:title>
    </news:news>
  </url>
</urlset>