-
Notifications
You must be signed in to change notification settings - Fork 388
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #479 from akinfaa/main
automated reasoning checks code samples
- Loading branch information
Showing
12 changed files
with
1,683 additions
and
0 deletions.
There are no files selected for viewing
1,116 changes: 1,116 additions & 0 deletions
1,116
responsible_ai/bedrock-automated-reasoning-checks/automated_reasoning_checks.ipynb
Large diffs are not rendered by default.
Oops, something went wrong.
125 changes: 125 additions & 0 deletions
125
responsible_ai/bedrock-automated-reasoning-checks/conversation.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,125 @@ | ||
""" | ||
Module for managing question-answer interactions and conversations. | ||
This module provides classes to handle Q&A interactions and conversations in a structured way. | ||
It includes functionality to format interactions for use with the Amazon Bedrock API and | ||
guardrail validation. | ||
Classes: | ||
Interaction: Represents a single Q&A exchange with question, answer and optional rewritten answer | ||
Conversation: Manages a collection of Interaction objects as a conversation thread | ||
The module is designed to simplify working with Q&A pairs by providing: | ||
- Structured storage of questions and answers | ||
- Formatting for Bedrock API integration | ||
- Support for answer rewriting and validation | ||
- Conversation history management | ||
Example: | ||
conversation = Conversation() | ||
interaction = Interaction("What is the weather?", "It's sunny") | ||
conversation.add_interaction(interaction) | ||
""" | ||
from typing import List | ||
|
||
|
||
class Interaction: | ||
""" | ||
A class representing a single interaction in a conversation. | ||
Contains the question asked, the answer provided, and optionally a rewritten version of the answer. | ||
Attributes: | ||
question (str): The question text | ||
answer (str): The answer text, can be None initially | ||
rewritten_answer (str): An optional rewritten version of the answer, defaults to None | ||
""" | ||
|
||
def __init__(self, question: str, answer: str = None) -> None: | ||
""" | ||
Initialize an Interaction with a question and optional answer. | ||
Args: | ||
question (str): The question text | ||
answer (str, optional): The answer text. Defaults to None. | ||
""" | ||
self.question = question | ||
self.answer = answer | ||
self.rewritten_answer = None | ||
|
||
def set_answer(self, answer): | ||
"""Set the answer text for this interaction.""" | ||
self.answer = answer | ||
|
||
def set_rewritten_answer(self, rewritten_answer): | ||
"""Set a rewritten version of the answer text.""" | ||
self.rewritten_answer = rewritten_answer | ||
|
||
def get_bedrock_question(self): | ||
""" | ||
Format the question for Bedrock API. | ||
Returns: | ||
dict: Question formatted for Bedrock API | ||
""" | ||
return { | ||
"role": "user", "content": [{"text": self.question}] | ||
} | ||
|
||
def get_bedrock_answer(self): | ||
""" | ||
Format the answer for Bedrock API. | ||
Returns: | ||
dict: Answer formatted for Bedrock API | ||
""" | ||
return { | ||
"role": "assistant", "content": [{"text": self.answer}] | ||
} | ||
|
||
def to_guardrail_input(self): | ||
""" | ||
Format the interaction for guardrail validation. | ||
Returns: | ||
list: Question and answer formatted for guardrail validation | ||
""" | ||
return [ | ||
{ | ||
"text": { | ||
"text": self.question, | ||
"qualifiers": ["query"] | ||
} | ||
}, | ||
{ | ||
"text": { | ||
"text": self.answer, | ||
"qualifiers": ["guard_content"] | ||
} | ||
} | ||
] | ||
|
||
|
||
class Conversation: | ||
""" | ||
A class representing a conversation composed of multiple interactions. | ||
Manages a list of Interaction objects that represent the back-and-forth | ||
exchanges in a conversation. | ||
Attributes: | ||
messages (List[Interaction]): List of Interaction objects in the conversation | ||
""" | ||
|
||
def __init__(self) -> None: | ||
"""Initialize an empty Conversation with no messages.""" | ||
self.messages: List[Interaction] = [] | ||
|
||
def add_interaction(self, interaction: Interaction): | ||
""" | ||
Add a new interaction to the conversation. | ||
Args: | ||
interaction (Interaction): The Interaction object to add | ||
""" | ||
self.messages.append(interaction) |
Binary file added
BIN
+70 KB
responsible_ai/bedrock-automated-reasoning-checks/data/LoA_sample_policy_data.pdf
Binary file not shown.
166 changes: 166 additions & 0 deletions
166
responsible_ai/bedrock-automated-reasoning-checks/feedback.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,166 @@ | ||
""" | ||
A module for processing and formatting validation feedback for natural language interactions. | ||
This module provides functionality to handle validation findings and generate structured | ||
feedback messages. It includes the InteractionFeedback class which processes findings | ||
containing validation results, rules, and suggestions. | ||
The module supports: | ||
- Validating findings and determining overall validity | ||
- Extracting invalid rules and their descriptions | ||
- Formatting suggestions for corrections | ||
- Generating formatted feedback messages for APIs | ||
The feedback messages follow a standardized format using XML tags to denote different | ||
types of feedback (<feedback>) and suggestions (<assumption>). | ||
""" | ||
import typing | ||
|
||
CORRECTION_MESSAGE = ( | ||
"Rewrite your answer using the feedback below. " | ||
"The feedback includes the policy rules your answer broke inside " | ||
"<feedback> tags.\n\n" | ||
"The text inside the <correction> tag specifically reference factual " | ||
"statements you should change. \n\n" | ||
"For answers that are correct but incomplete, I also included an example " | ||
"of the conditions you should specify inside the <assumption> tag.\n\n" | ||
"The values inside the correction and assumption tags look like variables " | ||
"in code, make sure you change this to natural language. " | ||
"It is very important that your rewritten answer does not mention " | ||
"the fact that you received feedback! " | ||
"Do not quote the feedback, corrections, or assumptions verbatim in your answer." | ||
) | ||
|
||
class InteractionFeedback: | ||
""" | ||
A class that stores and processes feedback about the accuracy of a natural language statement. | ||
This class takes findings from a validation process and provides methods to check validity | ||
and generate feedback messages. | ||
Attributes: | ||
raw_findings: List of validation findings containing results and rules | ||
""" | ||
def __init__(self, findings) -> None: | ||
""" | ||
Initialize InteractionFeedback with validation findings. | ||
Args: | ||
findings: List of validation findings containing results and rules | ||
""" | ||
self.raw_findings = findings | ||
|
||
def is_invalid(self) -> bool: | ||
""" | ||
Check if any findings indicate an invalid result. | ||
Returns: | ||
bool: True if any finding has an "INVALID" result, False otherwise | ||
""" | ||
for f in self.raw_findings: | ||
if f["result"] == "INVALID": | ||
return True | ||
|
||
return False | ||
|
||
def validation_result(self) -> str: | ||
""" | ||
Get the overall validation result. | ||
Returns: | ||
str: "VALID" if all findings are valid, "INVALID" otherwise | ||
""" | ||
if self.is_invalid(): | ||
return "INVALID" | ||
|
||
return "VALID" | ||
|
||
def invalid_rules(self) -> typing.List: | ||
""" | ||
Get descriptions of all invalid rules from the findings. | ||
Iterates through the raw findings and extracts rule descriptions for any findings | ||
that are marked as invalid and have associated rules. | ||
Returns: | ||
list: A list of rule description strings for invalid findings. Empty list if no | ||
invalid rules are found. | ||
""" | ||
rules = [] | ||
for f in self.raw_findings: | ||
if f["result"] == "INVALID" and f["rules"] is not None: | ||
for r in f["rules"]: | ||
rules.append(r["description"]) | ||
|
||
return rules | ||
|
||
def suggestions(self) -> typing.List: | ||
""" | ||
Get formatted suggestions from invalid findings. | ||
Iterates through raw findings and extracts suggestions for any findings that are | ||
marked as invalid and have associated suggestions. Each suggestion contains a type, | ||
key (variable name), and value. | ||
Returns: | ||
list: A list of formatted suggestion strings in XML tags. Each suggestion indicates | ||
what value a variable should have. Returns empty list if no suggestions found. | ||
""" | ||
suggestions = [] | ||
for f in self.raw_findings: | ||
if "suggestions" in f: | ||
true_scenario = "" | ||
corrections = "" | ||
# gather all assumptions to generate a valid scenario string | ||
for suggestion in f["suggestions"]: | ||
suggestion_type = suggestion["type"].lower() | ||
if suggestion_type == "assumption": | ||
if true_scenario != "": | ||
true_scenario += " and " | ||
true_scenario += ( | ||
f"The variable {suggestion['key']} should have a value of {suggestion['value']}" | ||
) | ||
if suggestion_type == "correction": | ||
if corrections != "": | ||
corrections += " and " | ||
corrections += ( | ||
f"Change the value for the variable {suggestion['key']} to {suggestion['value']}" | ||
) | ||
|
||
if true_scenario != "": | ||
suggestions.append(f"<assumption>{true_scenario}</assumption") | ||
if corrections != "": | ||
suggestions.append(f"<correction>{corrections}</correction>") | ||
return suggestions | ||
|
||
def to_feedback_message(self) -> str: | ||
""" | ||
Generate a formatted feedback message from invalid findings. | ||
Returns: | ||
str: A string containing formatted feedback messages from invalid findings, | ||
or None if there are no invalid findings | ||
""" | ||
feedback = "" | ||
for r in self.invalid_rules(): | ||
feedback += f"<feedback>{r}</feedback>\n" | ||
for s in self.suggestions(): | ||
feedback += f"{s}\n" | ||
|
||
return None if feedback == "" else feedback | ||
|
||
def get_bedrock_feedback(self): | ||
""" | ||
Create a formatted feedback message for Bedrock API. | ||
Returns: | ||
dict: A dictionary containing the feedback message formatted for Bedrock, | ||
or None if there is no feedback to send | ||
""" | ||
feedback_str = self.to_feedback_message() | ||
if not feedback_str: | ||
return None | ||
|
||
return { | ||
"role": "user", "content": [{"text": f"{CORRECTION_MESSAGE}\n\n {feedback_str}"}] | ||
} |
Binary file added
BIN
+425 KB
responsible_ai/bedrock-automated-reasoning-checks/images/ar-policy-nav.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+226 KB
responsible_ai/bedrock-automated-reasoning-checks/images/create-policy.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+242 KB
responsible_ai/bedrock-automated-reasoning-checks/images/policy-ready.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+254 KB
responsible_ai/bedrock-automated-reasoning-checks/images/policy_id.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file added
BIN
+332 KB
responsible_ai/bedrock-automated-reasoning-checks/images/version-details.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
75 changes: 75 additions & 0 deletions
75
responsible_ai/bedrock-automated-reasoning-checks/readme.md
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
# Automated Reasoning Checks with Amazon Bedrock Guardrails | ||
|
||
This repository demonstrates how to implement Automated Reasoning (AR) checks using Amazon Bedrock Guardrails. The implementation validates model responses against business policies and automatically corrects policy violations. | ||
|
||
## Repository Structure | ||
``` | ||
├── automated_reasoning_checks.ipynb # Main notebook with implementation | ||
├── conversation.py # Conversation management module | ||
├── feedback.py # AR feedback processing module | ||
├── validation_client.py # Bedrock validation client | ||
├── models/ # Directory for Bedrock service models. This will be given to you by your account manager | ||
│ ├── bedrock-<version>.api.json | ||
│ └── bedrock-runtime-<version>.api.json | ||
└── README.md | ||
``` | ||
|
||
## Prerequisites | ||
- AWS account with Bedrock access | ||
- Appropriate IAM roles and permissions | ||
- Python 3.8+ | ||
- Required Python packages: | ||
- `boto3` | ||
- `botocore` | ||
- `jupyter` | ||
|
||
## Setup Instructions | ||
1. Clone this repository | ||
```bash | ||
git clone <repository-url> | ||
cd automated_reasoning_checks | ||
``` | ||
4. Download Bedrock service models | ||
- Place model files in the `models/` directory | ||
- Ensure correct file naming convention | ||
|
||
## Usage | ||
1. Create AR Policy (via AWS Console) | ||
- Navigate to Amazon Bedrock > Safeguards > Automated Reasoning | ||
- Follow the policy creation steps in the notebook | ||
2. Run the Notebook | ||
```bash | ||
jupyter notebook automated_reasoning_checks.ipynb | ||
``` | ||
3. Follow the implementation steps in the notebook: | ||
- Configure environment | ||
- Load service models | ||
- Create/attach guardrails | ||
- Test with sample queries | ||
|
||
## Implementation Details | ||
- `conversation.py`: Manages conversation flow and history | ||
- `feedback.py`: Processes AR policy validation feedback | ||
- `validation_client.py`: Handles Bedrock model interactions and guardrail validation | ||
- `models/`: Contains required Bedrock service model files | ||
|
||
## Example Usage | ||
```python | ||
# Initialize validation client | ||
client = ValidatingConversationalClient( | ||
bedrock_client=runtime_client, | ||
guardrail_id=guardrail_id, | ||
guardrail_version=guardrail_version, | ||
model=model_id | ||
) | ||
|
||
# Process question with AR validation | ||
process_qa("I am a part-time employee, am I eligible for LoAP?") | ||
``` | ||
|
||
## Contributing | ||
Feel free to submit issues and enhancement requests! | ||
|
||
## References | ||
- [Amazon Bedrock Documentation](https://docs.aws.amazon.com/bedrock) | ||
- [Bedrock Guardrails Guide](https://docs.aws.amazon.com/bedrock/latest/userguide/guardrails.html) |
Oops, something went wrong.