[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-hallucination":3},{"tag":4,"articles":9},{"id":5,"name":6,"slug":6,"article_count":7,"description_zh":8,"description_en":8},"017d1e13-cdcd-44ec-94b4-e7ddef8634aa","hallucination",0,null,[10],{"id":11,"slug":12,"title":13,"summary":14,"category":15,"image_url":16,"cover_image":16,"language":17,"created_at":18},"19e8a080-c448-4e41-92f5-9ec578bed83b","ai-reading-assistants-epistemic-guardrails-en","Why AI reading assistants need guardrails","A minimal prototype tests whether LLM reading assistants stay honest when users push them beyond retrieval into interpretation.","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1778050257881-qv24.png","en","2026-05-06T06:50:41.442428+00:00"]