Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -252,16 +252,28 @@ fixable = [
"YTT",
]
select = [
"A", # https://docs.astral.sh/ruff/rules/#flake8-builtins-a
"B", # https://docs.astral.sh/ruff/rules/#flake8-bugbear-b
"C4", # https://docs.astral.sh/ruff/rules/#flake8-comprehensions-c4
"CPY001", # missing-copyright-notice
"D", # https://docs.astral.sh/ruff/rules/#pydocstyle-d
"DOC", # https://docs.astral.sh/ruff/rules/#pydoclint-doc
"ERA", # https://docs.astral.sh/ruff/rules/#eradicate-era
"EXE", # https://docs.astral.sh/ruff/rules/#flake8-executable-exe
"F401", # unused-import
"FA", # https://docs.astral.sh/ruff/rules/#flake8-future-annotations-fa
"FLY", # https://docs.astral.sh/ruff/rules/#flynt-fly
"I", # isort
"ICN", # https://docs.astral.sh/ruff/rules/#flake8-import-conventions-icn
"ISC", # https://docs.astral.sh/ruff/rules/#flake8-implicit-str-concat-isc
"LOG", # https://docs.astral.sh/ruff/rules/#flake8-logging-log
"PIE", # https://docs.astral.sh/ruff/rules/#flake8-pie-pie
"PLE", # https://docs.astral.sh/ruff/rules/#pylint-ple
"Q", # https://docs.astral.sh/ruff/rules/#flake8-quotes-q
"RET", # https://docs.astral.sh/ruff/rules/#flake8-return-ret
"RSE", # https://docs.astral.sh/ruff/rules/#flake8-raise-rse
"SIM", # https://docs.astral.sh/ruff/rules/#flake8-simplify-sim
"SLOT", # https://docs.astral.sh/ruff/rules/#flake8-slots-slot
"TCH", # https://docs.astral.sh/ruff/rules/#flake8-type-checking-tch
"UP", # https://docs.astral.sh/ruff/rules/#pyupgrade-up
"W", # https://docs.astral.sh/ruff/rules/#pycodestyle-w
Expand Down Expand Up @@ -300,7 +312,7 @@ notice-rgx = "Copyright \\(c\\) Microsoft Corporation\\.\\s*\\n.*Licensed under
# Temporary ignores for pyrit/ subdirectories until issue #1176
# https://github.com/Azure/PyRIT/issues/1176 is fully resolved
# TODO: Remove these ignores once the issues are fixed
"pyrit/{auxiliary_attacks,ui}/**/*.py" = ["B905", "D101", "D102", "D103", "D104", "D105", "D106", "D107", "D401", "D404", "D417", "D418", "DOC102", "DOC201", "DOC202", "DOC402", "DOC501", "SIM101", "SIM108"]
"pyrit/{auxiliary_attacks,ui}/**/*.py" = ["A", "B905", "D101", "D102", "D103", "D104", "D105", "D106", "D107", "D401", "D404", "D417", "D418", "DOC102", "DOC201", "DOC202", "DOC402", "DOC501", "SIM101", "SIM108"]
# Backend API routes raise HTTPException handled by FastAPI, not true exceptions
"pyrit/backend/**/*.py" = ["DOC501", "B008"]
"pyrit/__init__.py" = ["D104"]
Expand Down
4 changes: 2 additions & 2 deletions pyrit/auth/azure_auth.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ def get_speech_config(resource_id: Union[str, None], key: Union[str, None], regi
except ModuleNotFoundError as e:
logger.error(
"Could not import azure.cognitiveservices.speech. "
+ "You may need to install it via 'pip install pyrit[speech]'"
"You may need to install it via 'pip install pyrit[speech]'"
)
raise e

Expand Down Expand Up @@ -361,7 +361,7 @@ def get_speech_config_from_default_azure_credential(resource_id: str, region: st
except ModuleNotFoundError as e:
logger.error(
"Could not import azure.cognitiveservices.speech. "
+ "You may need to install it via 'pip install pyrit[speech]'"
"You may need to install it via 'pip install pyrit[speech]'"
)
raise e

Expand Down
2 changes: 1 addition & 1 deletion pyrit/backend/middleware/error_handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ async def generic_exception_handler(
# Log the full exception for debugging
logger.error(
f"Unhandled exception on {request.method} {request.url.path}: {exc}",
exc_info=True,
exc_info=True, # noqa: LOG014
)

problem = ProblemDetail(
Expand Down
2 changes: 1 addition & 1 deletion pyrit/common/path.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from appdirs import user_data_dir


def get_default_data_path(dir: str) -> pathlib.Path:
def get_default_data_path(dir: str) -> pathlib.Path: # noqa: A002
"""
Retrieve the default data path for PyRIT.

Expand Down
2 changes: 1 addition & 1 deletion pyrit/exceptions/exception_classes.py
Original file line number Diff line number Diff line change
Expand Up @@ -377,6 +377,6 @@ def handle_bad_request_exception(
request=request, response_text_pieces=[resp_text], response_type="error", error="blocked"
)
else:
raise
raise # noqa: PLE0704

return response_entry
2 changes: 1 addition & 1 deletion pyrit/exceptions/exceptions_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ def log_exception(retry_state: RetryCallState) -> None:
try:
exec_context = get_execution_context()
if exec_context:
# Format: "objective scorer; TrueFalseScorer::_score_value_with_llm"
# Format: "objective scorer; TrueFalseScorer::_score_value_with_llm" # noqa: ERA001
role_display = exec_context.component_role.value.replace("_", " ")
if exec_context.component_name:
for_clause = f"{role_display}. {exec_context.component_name}::{fn_name}"
Expand Down
2 changes: 1 addition & 1 deletion pyrit/memory/memory_interface.py
Original file line number Diff line number Diff line change
Expand Up @@ -392,7 +392,7 @@ def add_scores_to_memory(self, *, scores: Sequence[Score]) -> None:
message_piece_id = score.message_piece_id
pieces = self.get_message_pieces(prompt_ids=[str(message_piece_id)])
if not pieces:
logging.error(f"MessagePiece with ID {message_piece_id} not found in memory.")
logger.error(f"MessagePiece with ID {message_piece_id} not found in memory.")
continue
# auto-link score to the original prompt id if the prompt is a duplicate
if pieces[0].original_prompt_id != pieces[0].id:
Expand Down
2 changes: 1 addition & 1 deletion pyrit/models/message_piece.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ def __init__(
original_value_sha256: Optional[str] = None,
converted_value: Optional[str] = None,
converted_value_sha256: Optional[str] = None,
id: Optional[uuid.UUID | str] = None,
id: Optional[uuid.UUID | str] = None, # noqa: A002
conversation_id: Optional[str] = None,
sequence: int = -1,
labels: Optional[dict[str, str]] = None,
Expand Down
2 changes: 1 addition & 1 deletion pyrit/models/scenario_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def __init__(
labels: Optional[dict[str, str]] = None,
completion_time: Optional[datetime] = None,
number_tries: int = 0,
id: Optional[uuid.UUID] = None,
id: Optional[uuid.UUID] = None, # noqa: A002
# Deprecated parameter - will be removed in 0.13.0
objective_scorer: Optional["Scorer"] = None,
) -> None:
Expand Down
2 changes: 1 addition & 1 deletion pyrit/models/score.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def __init__(
score_type: ScoreType,
score_rationale: str,
message_piece_id: str | uuid.UUID,
id: Optional[uuid.UUID | str] = None,
id: Optional[uuid.UUID | str] = None, # noqa: A002
score_category: Optional[list[str]] = None,
score_metadata: Optional[dict[str, Union[str, int, float]]] = None,
scorer_class_identifier: Union[ComponentIdentifier, dict[str, Any]],
Expand Down
2 changes: 1 addition & 1 deletion pyrit/models/seeds/seed.py
Original file line number Diff line number Diff line change
Expand Up @@ -201,7 +201,7 @@ def render_template_value_silent(self, **kwargs: Any) -> str:
# Render the template with the provided kwargs
return jinja_template.render(**kwargs)
except Exception as e:
logging.error("Error rendering template: %s", e)
logger.error("Error rendering template: %s", e)
return self.value

async def set_sha256_value_async(self) -> None:
Expand Down
6 changes: 3 additions & 3 deletions pyrit/models/storage_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -225,9 +225,9 @@ async def _upload_blob_async(self, file_name: str, data: bytes, content_type: st
if isinstance(exc, ClientAuthenticationError):
logger.exception(
msg="Authentication failed. Please check that the container existence in the "
+ "Azure Storage Account and ensure the validity of the provided SAS token. If you "
+ "haven't set the SAS token as an environment variable use `az login` to "
+ "enable delegation-based SAS authentication to connect to the storage account"
"Azure Storage Account and ensure the validity of the provided SAS token. If you "
"haven't set the SAS token as an environment variable use `az login` to "
"enable delegation-based SAS authentication to connect to the storage account"
)
raise
logger.exception(msg=f"An unexpected error occurred: {exc}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -150,7 +150,7 @@ def recognize_audio(self, audio_bytes: bytes) -> str:
except ModuleNotFoundError as e:
logger.error(
"Could not import azure.cognitiveservices.speech. "
+ "You may need to install it via 'pip install pyrit[speech]'"
"You may need to install it via 'pip install pyrit[speech]'"
)
raise e

Expand Down Expand Up @@ -219,7 +219,7 @@ def stop_cb(self, evt: Any, recognizer: Any) -> None:
except ModuleNotFoundError as e:
logger.error(
"Could not import azure.cognitiveservices.speech. "
+ "You may need to install it via 'pip install pyrit[speech]'"
"You may need to install it via 'pip install pyrit[speech]'"
)
raise e

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ async def convert_async(self, *, prompt: str, input_type: PromptDataType = "text
except ModuleNotFoundError as e:
logger.error(
"Could not import azure.cognitiveservices.speech. "
+ "You may need to install it via 'pip install pyrit[speech]'"
"You may need to install it via 'pip install pyrit[speech]'"
)
raise e

Expand Down Expand Up @@ -176,7 +176,7 @@ async def convert_async(self, *, prompt: str, input_type: PromptDataType = "text
logger.error(f"Error details: {cancellation_details.error_details}")
raise RuntimeError(
f"Speech synthesis canceled: {cancellation_details.reason}"
+ f"Error details: {cancellation_details.error_details}"
f"Error details: {cancellation_details.error_details}"
)
except Exception as e:
logger.error("Failed to convert prompt to audio: %s", str(e))
Expand Down
2 changes: 1 addition & 1 deletion pyrit/prompt_converter/insert_punctuation_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def _is_valid_punctuation(self, punctuation_list: list[str]) -> bool:
Returns:
bool: valid list and valid punctuations
"""
return all(str in string.punctuation for str in punctuation_list)
return all(char in string.punctuation for char in punctuation_list)

async def convert_async(
self, *, prompt: str, input_type: PromptDataType = "text", punctuation_list: Optional[list[str]] = None
Expand Down
62 changes: 31 additions & 31 deletions pyrit/prompt_converter/transparency_attack_converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
from io import BytesIO
from pathlib import Path

import numpy
import numpy as np
from PIL import Image

from pyrit.identifiers import ComponentIdentifier
Expand All @@ -18,7 +18,7 @@

class _AdamOptimizer:
"""
Implementation of the Adam Optimizer using NumPy. Adam optimization is a stochastic gradient
Implementation of the Adam Optimizer using np. Adam optimization is a stochastic gradient
descent method that is based on adaptive estimation of first-order and second-order moments.
For further details, see the original paper: `"Adam: A Method for Stochastic Optimization"`
by D. P. Kingma and J. Ba, 2014: https://arxiv.org/abs/1412.6980.
Expand All @@ -44,24 +44,24 @@ def __init__(
self.beta_1 = beta_1
self.beta_2 = beta_2
self.epsilon = epsilon
self.m: numpy.ndarray # type: ignore[type-arg, unused-ignore] # first moment vector
self.v: numpy.ndarray # type: ignore[type-arg, unused-ignore] # second moment vector
self.m: np.ndarray # type: ignore[type-arg, unused-ignore] # first moment vector
self.v: np.ndarray # type: ignore[type-arg, unused-ignore] # second moment vector
self.t = 0 # initialize timestep

def update(self, *, params: numpy.ndarray, grads: numpy.ndarray) -> numpy.ndarray: # type: ignore[type-arg, unused-ignore]
def update(self, *, params: np.ndarray, grads: np.ndarray) -> np.ndarray: # type: ignore[type-arg, unused-ignore]
"""
Perform a single update step using the Adam optimization algorithm.

Args:
params (numpy.ndarray): Current parameter values to be optimized.
grads (numpy.ndarray): Gradients w.r.t. stochastic objective.
params (np.ndarray): Current parameter values to be optimized.
grads (np.ndarray): Gradients w.r.t. stochastic objective.

Returns:
numpy.ndarray: Updated parameter values after applying the Adam optimization step.
np.ndarray: Updated parameter values after applying the Adam optimization step.
"""
if self.t == 0:
self.m = numpy.zeros_like(params)
self.v = numpy.zeros_like(params)
self.m = np.zeros_like(params)
self.v = np.zeros_like(params)
self.t += 1

# Update biased first and second raw moment estimates
Expand All @@ -72,7 +72,7 @@ def update(self, *, params: numpy.ndarray, grads: numpy.ndarray) -> numpy.ndarra
m_hat = self.m / (1 - self.beta_1**self.t)
v_hat = self.v / (1 - self.beta_2**self.t)

params -= self.learning_rate * m_hat / (numpy.sqrt(v_hat) + self.epsilon)
params -= self.learning_rate * m_hat / (np.sqrt(v_hat) + self.epsilon)
return params


Expand Down Expand Up @@ -200,15 +200,15 @@ def _build_identifier(self) -> ComponentIdentifier:
}
)

def _load_and_preprocess_image(self, path: str) -> numpy.ndarray: # type: ignore[type-arg, unused-ignore]
def _load_and_preprocess_image(self, path: str) -> np.ndarray: # type: ignore[type-arg, unused-ignore]
"""
Load image, convert to grayscale, resize, and normalize for optimization.

Args:
path (str): The file path to the image.

Returns:
numpy.ndarray: Preprocessed image as a normalized NumPy array.
np.ndarray: Preprocessed image as a normalized NumPy array.

Raises:
ValueError: If the image cannot be loaded or processed.
Expand All @@ -217,52 +217,52 @@ def _load_and_preprocess_image(self, path: str) -> numpy.ndarray: # type: ignor
with Image.open(path) as img:
img_gray = img.convert("L") if img.mode != "L" else img # read as grayscale
img_resized = img_gray.resize(self.size, Image.Resampling.LANCZOS)
return numpy.array(img_resized, dtype=numpy.float32) / 255.0 # normalize to [0, 1]
return np.array(img_resized, dtype=np.float32) / 255.0 # normalize to [0, 1]
except Exception as e:
raise ValueError(f"Failed to load and preprocess image from {path}: {e}") from e

def _compute_mse_loss(self, blended_image: numpy.ndarray, target_tensor: numpy.ndarray) -> float: # type: ignore[type-arg, unused-ignore]
def _compute_mse_loss(self, blended_image: np.ndarray, target_tensor: np.ndarray) -> float: # type: ignore[type-arg, unused-ignore]
"""
Compute Mean Squared Error (MSE) loss between blended and target images.

Args:
blended_image (numpy.ndarray): The blended image array.
target_tensor (numpy.ndarray): The target benign image array.
blended_image (np.ndarray): The blended image array.
target_tensor (np.ndarray): The target benign image array.

Returns:
float: The computed MSE loss value.
"""
return float(numpy.mean(numpy.square(blended_image - target_tensor)))
return float(np.mean(np.square(blended_image - target_tensor)))

def _create_blended_image(self, attack_image: numpy.ndarray, alpha: numpy.ndarray) -> numpy.ndarray: # type: ignore[type-arg, unused-ignore]
def _create_blended_image(self, attack_image: np.ndarray, alpha: np.ndarray) -> np.ndarray: # type: ignore[type-arg, unused-ignore]
"""
Create a blended image using the attack image and alpha transparency.

Args:
attack_image (numpy.ndarray): The attack image array.
alpha (numpy.ndarray): The alpha transparency array.
attack_image (np.ndarray): The attack image array.
alpha (np.ndarray): The alpha transparency array.

Returns:
numpy.ndarray: The blended image in LA mode.
np.ndarray: The blended image in LA mode.
"""
attack_image_uint8 = (attack_image * 255).astype(numpy.uint8)
transparency_uint8 = (alpha * 255).astype(numpy.uint8)
attack_image_uint8 = (attack_image * 255).astype(np.uint8)
transparency_uint8 = (alpha * 255).astype(np.uint8)

# Create LA image: Luminance + Alpha (grayscale with transparency)
height, width = attack_image_uint8.shape[:2]
la_image = numpy.zeros((height, width, 2), dtype=numpy.uint8)
la_image = np.zeros((height, width, 2), dtype=np.uint8)
la_image[:, :, 0] = attack_image_uint8 # L (Luminance)
la_image[:, :, 1] = transparency_uint8 # A (Alpha)

return la_image

async def _save_blended_image(self, attack_image: numpy.ndarray, alpha: numpy.ndarray) -> str: # type: ignore[type-arg, unused-ignore]
async def _save_blended_image(self, attack_image: np.ndarray, alpha: np.ndarray) -> str: # type: ignore[type-arg, unused-ignore]
"""
Save the blended image with transparency as a PNG file.

Args:
attack_image (numpy.ndarray): The attack image array.
alpha (numpy.ndarray): The alpha transparency array.
attack_image (np.ndarray): The attack image array.
alpha (np.ndarray): The alpha transparency array.

Returns:
str: The file path to the saved blended image.
Expand Down Expand Up @@ -309,8 +309,8 @@ async def convert_async(self, *, prompt: str, input_type: PromptDataType = "imag
background_image = self._load_and_preprocess_image(prompt)
background_tensor = background_image * 0.5 # darkening for better blending optimization

alpha = numpy.ones_like(background_tensor) # optimized to determine transparency pattern
white_background = numpy.ones_like(background_tensor) # white canvas for blending simulation
alpha = np.ones_like(background_tensor) # optimized to determine transparency pattern
white_background = np.ones_like(background_tensor) # white canvas for blending simulation

optimizer = _AdamOptimizer(learning_rate=self.learning_rate)
grad_blended_alpha_constant = background_tensor - white_background
Expand Down Expand Up @@ -340,7 +340,7 @@ async def convert_async(self, *, prompt: str, input_type: PromptDataType = "imag
grad_loss_blended = 2 * (blended_image - self._cached_benign_image) / blended_image.size
grad_alpha = grad_loss_blended * grad_blended_alpha_constant
alpha = optimizer.update(params=alpha, grads=grad_alpha)
alpha = numpy.clip(alpha, 0.0, 1.0)
alpha = np.clip(alpha, 0.0, 1.0)

image_path = await self._save_blended_image(background_tensor, alpha)
return ConverterResult(output_text=image_path, output_type="image_path")
6 changes: 3 additions & 3 deletions pyrit/prompt_target/azure_blob_storage_target.py
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,9 @@ async def _upload_blob_async(self, file_name: str, data: bytes, content_type: st
if isinstance(exc, ClientAuthenticationError):
logger.exception(
msg="Authentication failed. Please check that the container existence in the "
+ "Azure Storage Account and ensure the validity of the provided SAS token. If you "
+ "haven't set the SAS token as an environment variable use `az login` to "
+ "enable delegation-based SAS authentication to connect to the storage account"
"Azure Storage Account and ensure the validity of the provided SAS token. If you "
"haven't set the SAS token as an environment variable use `az login` to "
"enable delegation-based SAS authentication to connect to the storage account"
)
raise
logger.exception(msg=f"An unexpected error occurred: {exc}")
Expand Down
Loading