[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-llm-architecture":3},{"tag":4,"articles":10},{"id":5,"name":6,"slug":7,"article_count":8,"description_zh":9,"description_en":9},"4af0eced-499a-4ad7-bf0b-819d90828e1b","LLM architecture","llm-architecture",2,null,[11,20],{"id":12,"slug":13,"title":14,"summary":15,"category":16,"image_url":17,"cover_image":17,"language":18,"created_at":19},"1e4ba03d-b371-427a-8d9e-d694f09827b1","unipool-shared-expert-pool-moe-en","UniPool shares MoE experts across layers","UniPool replaces per-layer MoE experts with one shared pool, cutting redundancy and improving validation loss in five LLaMA-scale models.","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1778221264459-eh59.png","en","2026-05-08T06:20:40.975202+00:00",{"id":21,"slug":22,"title":23,"summary":24,"category":16,"image_url":25,"cover_image":25,"language":18,"created_at":26},"cdcfe76f-c9bf-44ac-98d9-e9041d414d6c","sebastian-raschka-llm-architecture-gallery-en","Sebastian Raschka’s LLM Architecture Gallery","Raschka’s gallery compares GPT-2, Llama 3, OLMo 2, DeepSeek, and Qwen stacks with exact layer, cache, and attention data.","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1775121663908-8tcs.png","2026-04-02T07:27:33.848813+00:00"]