[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-reinforcement-learning":3},{"tag":4,"articles":11},{"id":5,"name":6,"slug":7,"article_count":8,"description_zh":9,"description_en":10},"d52d08ae-f7f9-4625-ada6-d32a7bcd1036","reinforcement learning","reinforcement-learning",15,"強化學習研究如何讓模型在回饋訊號下逐步學會決策，常見於機器人控制、長期代理訓練與 LLM 微調。這個主題也涵蓋 PPO、BRRL、持續學習與安全約束等方法，重點在穩定更新、長期規劃與部署風險。","Reinforcement learning studies how models learn decisions from feedback over time, and it underpins robot control, long-horizon agent training, and LLM fine-tuning. Recent work spans PPO variants, safe continual RL, stability, and planning under changing environments.",[12,21,28,35,42,50,58],{"id":13,"slug":14,"title":15,"summary":16,"category":17,"image_url":18,"cover_image":18,"language":19,"created_at":20},"7a04d752-3f1a-4df7-b7c5-8bcb1e69c565","bounded-ratio-reinforcement-learning-ppo-zh","BRRL 取代 PPO 剪裁：BPO 與 GBPO 的穩定性升級","BRRL 把 PPO 的剪裁目標改寫成有界比例框架，推出 BPO 與 GBPO，主打更穩定的更新與更清楚的理論基礎。","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1776751794578-t5j7.png","zh","2026-04-21T06:09:39.661696+00:00",{"id":22,"slug":23,"title":24,"summary":25,"category":17,"image_url":26,"cover_image":26,"language":19,"created_at":27},"46ad5553-2eab-41b1-8602-82bf7fb94933","llm-generalization-shortest-path-scale-zh","LLM 會看地圖，卻撐不住長度","這篇合成最短路徑研究把「會換地圖」和「能拉長題目」拆開看，結果發現 LLM 能跨地圖泛化，卻在長度變長時因遞迴推理不穩而失手。","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1776406013309-pvmm.png","2026-04-17T06:06:33.258278+00:00",{"id":29,"slug":30,"title":31,"summary":32,"category":17,"image_url":33,"cover_image":33,"language":19,"created_at":34},"ff7d80fb-56b3-4d87-94cc-ad38b20f6e5d","physics-simulators-rl-llm-reasoning-zh","用物理模擬器訓練 LLM 推理","研究者把物理模擬器變成強化學習資料來源，訓練 LLM 學會物理推理，並在 IPhO 題目上帶來 zero-shot 提升。","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1776146993167-rwzt.png","2026-04-14T06:09:32.812614+00:00",{"id":36,"slug":37,"title":38,"summary":39,"category":17,"image_url":40,"cover_image":40,"language":19,"created_at":41},"5e4f3620-9a8e-4185-84d2-fa8ef42fc058","act-wisely-tool-use-agentic-multimodal-models-zh","教代理何時別叫工具","HDPO 把「答對」和「少叫工具」分開訓練，想修正多模態代理的盲目工具使用。摘要稱它能大幅減少呼叫次數，同時提升推理正確率。","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1775801029065-5n2l.png","2026-04-10T06:03:34.31315+00:00",{"id":43,"slug":44,"title":45,"summary":46,"category":47,"image_url":48,"cover_image":48,"language":19,"created_at":49},"779f5798-9c39-4ce2-95d7-f0abfd24a695","five-ai-infra-frontiers-bessemer-2026-zh","Bessemer 看準的 5 個 AI 基礎設施前線","Bessemer 2026 AI infra 藍圖指向 memory、continual learning、RL、inference 與 world models。重點不是更大模型，而是讓 AI 真正進到生產環境。","industry","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1775164388114-uo7t.png","2026-04-02T21:12:39.852377+00:00",{"id":51,"slug":52,"title":53,"summary":54,"category":55,"image_url":56,"cover_image":56,"language":19,"created_at":57},"c34422da-87f3-4b42-9f47-36ef66e0760e","build-ai-crypto-trading-bot-guide-zh","如何打造 AI 加密貨幣交易機器人","2026 AI 加密貨幣交易機器人實作指南：資料管線、模型選擇、風控、部署與合規，幫你把想法變成能上線的系統。","blockchain","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1775121945326-q2q9.png","2026-04-02T08:12:40.708166+00:00",{"id":59,"slug":60,"title":61,"summary":62,"category":63,"image_url":64,"cover_image":65,"language":19,"created_at":66},"ce38adca-0f38-4eae-8155-97ac51582a85","cursor-self-hosted-agents-real-time-rl-zh","Cursor 推自架代理與即時 RL","Cursor 在 2026 年 3 月推出自架雲端代理，並公開 Composer 的即時 RL 訓練法。官方稱新 checkpoint 最快每 5 小時更新一次，企業可把程式碼與工具執行留在自家網路內。","tools",null,"https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1774497189210-w1wd.png","2026-03-28T03:10:51.511587+00:00"]