[[["Easy to understand","easyToUnderstand","thumb-up"],["Solved my problem","solvedMyProblem","thumb-up"],["Other","otherUp","thumb-up"]],[["Missing the information I need","missingTheInformationINeed","thumb-down"],["Too complicated / too many steps","tooComplicatedTooManySteps","thumb-down"],["Out of date","outOfDate","thumb-down"],["Samples / code issue","samplesCodeIssue","thumb-down"],["Other","otherDown","thumb-down"]],["Last updated 2025-08-20 UTC."],[[["\u003cp\u003eThis document provides versioned reference documentation for the \u003ccode\u003eHarmBlockMethod\u003c/code\u003e enum within the \u003ccode\u003eGoogle.Cloud.AIPlatform.V1\u003c/code\u003e namespace, with versions ranging from 1.0.0 to 3.22.0.\u003c/p\u003e\n"],["\u003cp\u003eThe latest version of the \u003ccode\u003eHarmBlockMethod\u003c/code\u003e enum is 3.22.0, and each version has it's own dedicated documentation page.\u003c/p\u003e\n"],["\u003cp\u003eThe \u003ccode\u003eHarmBlockMethod\u003c/code\u003e enum in the Cloud AI Platform v1 API includes three fields: \u003ccode\u003eProbability\u003c/code\u003e, \u003ccode\u003eSeverity\u003c/code\u003e, and \u003ccode\u003eUnspecified\u003c/code\u003e, each defining a different method for blocking harmful content.\u003c/p\u003e\n"],["\u003cp\u003eThe \u003ccode\u003eProbability\u003c/code\u003e field indicates that the harm block method relies solely on a probability score to determine content blocking.\u003c/p\u003e\n"],["\u003cp\u003eThe \u003ccode\u003eSeverity\u003c/code\u003e field indicates that the method uses both the probability and severity scores to determine content blocking.\u003c/p\u003e\n"]]],[],null,[]]