[{"data":1,"prerenderedAt":-1},["ShallowReactive",2],{"tag-content-moderation":3},{"tag":4,"articles":10},{"id":5,"name":6,"slug":7,"article_count":8,"description_zh":9,"description_en":9},"e807f977-45ae-4dd9-b557-4d9638f88bd0","content moderation","content-moderation",0,null,[11,20,28],{"id":12,"slug":13,"title":14,"summary":15,"category":16,"image_url":17,"cover_image":17,"language":18,"created_at":19},"94547a94-aa6b-4c4f-9d64-31eb0b906947","policy-invariance-llm-safety-judge-test-en","Policy Invariance as a Better LLM Judge Test","This paper argues that accuracy alone is not enough to trust LLM safety judges, and proposes policy invariance as a reliability test.","research","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1778568046410-5agq.png","en","2026-05-12T06:40:28.372648+00:00",{"id":21,"slug":22,"title":23,"summary":24,"category":25,"image_url":26,"cover_image":26,"language":18,"created_at":27},"6d4455e1-644c-4f2e-8e4a-b41ea3630ac5","how-ai-is-changing-social-media-2026-en","How AI Is Changing Social Media in 2026","AI now shapes social feeds, moderation, ads, and deepfake risk, while chatbot use keeps pulling attention away from posting.","industry","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1778271079313-72g5.png","2026-05-08T20:10:42.391335+00:00",{"id":29,"slug":30,"title":31,"summary":32,"category":25,"image_url":33,"cover_image":33,"language":18,"created_at":34},"a58854c0-2757-45a3-b3d7-09007af51ed2","why-ai-apps-should-not-hard-block-flagged-moderation-en","Why AI apps should not hard-block every flagged moderation result","AI apps should treat moderation flags as signals, not automatic shutdowns, because hard-blocking every flag overblocks legitimate content.","https:\u002F\u002Fxxdpdyhzhpamafnrdkyq.supabase.co\u002Fstorage\u002Fv1\u002Fobject\u002Fpublic\u002Fcovers\u002Finline-1778137849909-bbj2.png","2026-05-07T07:10:27.345387+00:00"]