diff --git a/pyproject.toml b/pyproject.toml index d87582c38..81318336c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -259,6 +259,7 @@ select = [ "DOC", # https://docs.astral.sh/ruff/rules/#pydoclint-doc "F401", # unused-import "I", # isort + "PGH", # https://docs.astral.sh/ruff/rules/#pygrep-hooks-pgh "PIE", # https://docs.astral.sh/ruff/rules/#flake8-pie-pie "RET", # https://docs.astral.sh/ruff/rules/#flake8-return-ret "SIM", # https://docs.astral.sh/ruff/rules/#flake8-simplify-sim diff --git a/pyrit/auth/copilot_authenticator.py b/pyrit/auth/copilot_authenticator.py index 4e7358aa6..ea85979fb 100644 --- a/pyrit/auth/copilot_authenticator.py +++ b/pyrit/auth/copilot_authenticator.py @@ -233,7 +233,7 @@ async def _get_cached_token_if_available_and_valid(self) -> Optional[dict[str, A minutes_left = (expiry_time - current_time).total_seconds() / 60 logger.info(f"Cached token is valid for another {minutes_left:.2f} minutes") - return token_data # type: ignore + return token_data # type: ignore[no-any-return] except Exception as e: error_name = type(e).__name__ @@ -450,7 +450,7 @@ async def response_handler(response: Any) -> None: else: logger.error(f"Failed to retrieve bearer token within {self._token_capture_timeout} seconds.") - return bearer_token # type: ignore + return bearer_token # type: ignore[no-any-return] except Exception as e: logger.error("Failed to retrieve access token using Playwright.") diff --git a/pyrit/cli/frontend_core.py b/pyrit/cli/frontend_core.py index 5c49525ec..b131b3cf4 100644 --- a/pyrit/cli/frontend_core.py +++ b/pyrit/cli/frontend_core.py @@ -32,11 +32,11 @@ HAS_TERMCOLOR = False # Create a dummy termcolor module for fallback - class termcolor: # type: ignore + class termcolor: # type: ignore[no-redef] """Dummy termcolor fallback for colored printing if termcolor is not installed.""" @staticmethod - def cprint(text: str, color: str = None, attrs: list = None) -> None: # type: ignore + def cprint(text: str, color: str = None, attrs: list = None) -> None: # type: ignore[type-arg] """Print text without color.""" print(text) diff --git a/pyrit/common/display_response.py b/pyrit/common/display_response.py index 43c85d579..7341df837 100644 --- a/pyrit/common/display_response.py +++ b/pyrit/common/display_response.py @@ -47,6 +47,6 @@ async def display_image_response(response_piece: MessagePiece) -> None: image = Image.open(image_stream) # Jupyter built-in display function only works in notebooks. - display(image) # type: ignore # noqa: F821 + display(image) # type: ignore[name-defined] # noqa: F821 if response_piece.response_error == "blocked": logger.info("---\nContent blocked, cannot show a response.\n---") diff --git a/pyrit/common/notebook_utils.py b/pyrit/common/notebook_utils.py index 8b8c98a11..22b3a9b93 100644 --- a/pyrit/common/notebook_utils.py +++ b/pyrit/common/notebook_utils.py @@ -13,7 +13,7 @@ def is_in_ipython_session() -> bool: bool: True if the code is running in an IPython session, False otherwise. """ try: - __IPYTHON__ # type: ignore # noqa: B018 + __IPYTHON__ # type: ignore[name-defined] # noqa: B018 return True except NameError: return False diff --git a/pyrit/common/yaml_loadable.py b/pyrit/common/yaml_loadable.py index c3c669ed7..a31dcf189 100644 --- a/pyrit/common/yaml_loadable.py +++ b/pyrit/common/yaml_loadable.py @@ -41,5 +41,5 @@ def from_yaml_file(cls: type[T], file: Union[Path | str]) -> T: # If this class provides a from_dict factory, use it; # otherwise, just instantiate directly with **yaml_data if hasattr(cls, "from_dict") and callable(getattr(cls, "from_dict")): # noqa: B009 - return cls.from_dict(yaml_data) # type: ignore + return cls.from_dict(yaml_data) # type: ignore[attr-defined, no-any-return] return cls(**yaml_data) diff --git a/pyrit/datasets/seed_datasets/remote/red_team_social_bias_dataset.py b/pyrit/datasets/seed_datasets/remote/red_team_social_bias_dataset.py index aa3197442..8383fa575 100644 --- a/pyrit/datasets/seed_datasets/remote/red_team_social_bias_dataset.py +++ b/pyrit/datasets/seed_datasets/remote/red_team_social_bias_dataset.py @@ -116,7 +116,7 @@ async def fetch_dataset(self, *, cache: bool = True) -> SeedDataset: data_type="text", prompt_group_id=group_id, sequence=i, - **prompt_metadata, # type: ignore + **prompt_metadata, # type: ignore[arg-type] ) ) else: @@ -133,7 +133,7 @@ async def fetch_dataset(self, *, cache: bool = True) -> SeedDataset: SeedPrompt( value=escaped_cleaned_value, data_type="text", - **prompt_metadata, # type: ignore + **prompt_metadata, # type: ignore[arg-type] ) ) diff --git a/pyrit/memory/azure_sql_memory.py b/pyrit/memory/azure_sql_memory.py index 6702f2240..91335ca22 100644 --- a/pyrit/memory/azure_sql_memory.py +++ b/pyrit/memory/azure_sql_memory.py @@ -650,7 +650,7 @@ def _query_entries( return query.distinct().all() return query.all() except SQLAlchemyError as e: - logger.exception(f"Error fetching data from table {model_class.__tablename__}: {e}") # type: ignore + logger.exception(f"Error fetching data from table {model_class.__tablename__}: {e}") # type: ignore[attr-defined] raise def _update_entries(self, *, entries: MutableSequence[Base], update_fields: dict[str, Any]) -> bool: diff --git a/pyrit/memory/memory_interface.py b/pyrit/memory/memory_interface.py index 67e6dcfb6..e377ae0fd 100644 --- a/pyrit/memory/memory_interface.py +++ b/pyrit/memory/memory_interface.py @@ -1050,7 +1050,7 @@ def get_seed_dataset_names(self) -> Sequence[str]: try: entries: Sequence[SeedEntry] = self._query_entries( SeedEntry, - conditions=and_(SeedEntry.dataset_name is not None, SeedEntry.dataset_name != ""), # type: ignore + conditions=and_(SeedEntry.dataset_name is not None, SeedEntry.dataset_name != ""), # type: ignore[arg-type] distinct=True, ) # Extract unique dataset names from the entries @@ -1484,7 +1484,7 @@ def update_scenario_run_state(self, *, scenario_result_id: str, scenario_run_sta scenario_result = scenario_results[0] # Update the scenario run state - scenario_result.scenario_run_state = scenario_run_state # type: ignore + scenario_result.scenario_run_state = scenario_run_state # type: ignore[assignment] # Save updated result back to memory using update entry = ScenarioResultEntry(entry=scenario_result) diff --git a/pyrit/memory/memory_models.py b/pyrit/memory/memory_models.py index 04be633df..524e78df2 100644 --- a/pyrit/memory/memory_models.py +++ b/pyrit/memory/memory_models.py @@ -232,14 +232,14 @@ def __init__(self, *, entry: MessagePiece): ) self.original_value = entry.original_value - self.original_value_data_type = entry.original_value_data_type # type: ignore + self.original_value_data_type = entry.original_value_data_type # type: ignore[assignment] self.original_value_sha256 = entry.original_value_sha256 self.converted_value = entry.converted_value - self.converted_value_data_type = entry.converted_value_data_type # type: ignore + self.converted_value_data_type = entry.converted_value_data_type # type: ignore[assignment] self.converted_value_sha256 = entry.converted_value_sha256 - self.response_error = entry.response_error # type: ignore + self.response_error = entry.response_error # type: ignore[assignment] self.original_prompt_id = entry.original_prompt_id self.pyrit_version = pyrit.__version__ @@ -562,7 +562,7 @@ def __init__(self, *, entry: Seed): self.data_type = entry.data_type self.name = entry.name self.dataset_name = entry.dataset_name - self.harm_categories = entry.harm_categories # type: ignore + self.harm_categories = entry.harm_categories # type: ignore[assignment] self.description = entry.description self.authors = list(entry.authors) if entry.authors else None self.groups = list(entry.groups) if entry.groups else None diff --git a/pyrit/memory/sqlite_memory.py b/pyrit/memory/sqlite_memory.py index cfce238fd..f9cf3c178 100644 --- a/pyrit/memory/sqlite_memory.py +++ b/pyrit/memory/sqlite_memory.py @@ -240,7 +240,7 @@ def _query_entries( return query.distinct().all() return query.all() except SQLAlchemyError as e: - logger.exception(f"Error fetching data from table {model_class.__tablename__}: {e}") # type: ignore + logger.exception(f"Error fetching data from table {model_class.__tablename__}: {e}") # type: ignore[attr-defined] raise def _insert_entry(self, entry: Base) -> None: @@ -448,7 +448,7 @@ def export_all_tables(self, *, export_type: str = "json") -> None: file_extension = f".{export_type}" file_path = DB_DATA_PATH / f"{table_name}{file_extension}" # Convert to list for exporter compatibility - self.exporter.export_data(list(data), file_path=file_path, export_type=export_type) # type: ignore + self.exporter.export_data(list(data), file_path=file_path, export_type=export_type) # type: ignore[arg-type] def _get_attack_result_harm_category_condition(self, *, targeted_harm_categories: Sequence[str]) -> Any: """ diff --git a/pyrit/models/message_piece.py b/pyrit/models/message_piece.py index 62a6e4890..3f04a2dde 100644 --- a/pyrit/models/message_piece.py +++ b/pyrit/models/message_piece.py @@ -91,7 +91,7 @@ def __init__( """ self.id = id if id else uuid4() - if role not in ChatMessageRole.__args__: # type: ignore + if role not in ChatMessageRole.__args__: # type: ignore[attr-defined] raise ValueError(f"Role {role} is not a valid role.") self._role: ChatMessageRole = role @@ -251,14 +251,20 @@ def role(self, value: ChatMessageRole) -> None: ValueError: If the role is not a valid ChatMessageRole. """ - if value not in ChatMessageRole.__args__: # type: ignore + if value not in ChatMessageRole.__args__: # type: ignore[attr-defined] raise ValueError(f"Role {value} is not a valid role.") self._role = value - def to_message(self) -> Message: # type: ignore # noqa F821 + def to_message(self) -> Message: # type: ignore[name-defined] # noqa: F821 + """ + Convert this message piece into a Message. + + Returns: + Message: A Message containing this piece. + """ from pyrit.models.message import Message - return Message([self]) # noqa F821 + return Message([self]) # noqa: F821 def has_error(self) -> bool: """ diff --git a/pyrit/models/seeds/seed.py b/pyrit/models/seeds/seed.py index c506a49a1..e69c4f38a 100644 --- a/pyrit/models/seeds/seed.py +++ b/pyrit/models/seeds/seed.py @@ -194,7 +194,7 @@ def render_template_value_silent(self, **kwargs: Any) -> str: return self.value # Create a Jinja template with PartialUndefined placeholders - env = Environment(loader=BaseLoader, undefined=PartialUndefined) # type: ignore + env = Environment(loader=BaseLoader, undefined=PartialUndefined) # type: ignore[arg-type] jinja_template = env.from_string(self.value) try: diff --git a/pyrit/prompt_converter/selective_text_converter.py b/pyrit/prompt_converter/selective_text_converter.py index a9066ede3..7346904a3 100644 --- a/pyrit/prompt_converter/selective_text_converter.py +++ b/pyrit/prompt_converter/selective_text_converter.py @@ -196,7 +196,7 @@ async def _convert_word_level_async(self, *, prompt: str) -> ConverterResult: words = prompt.split(self._word_separator) # Get selected word indices - selected_indices = self._selection_strategy.select_words(words=words) # type: ignore + selected_indices = self._selection_strategy.select_words(words=words) # type: ignore[attr-defined] # If no words selected, return original prompt if not selected_indices: diff --git a/pyrit/prompt_target/azure_ml_chat_target.py b/pyrit/prompt_target/azure_ml_chat_target.py index 4daac0ee1..bc1bdbdca 100644 --- a/pyrit/prompt_target/azure_ml_chat_target.py +++ b/pyrit/prompt_target/azure_ml_chat_target.py @@ -221,7 +221,7 @@ async def _complete_chat_async( raise EmptyResponseException(message="The chat returned an empty response.") from e raise e( f"Exception obtaining response from the target. Returned response: {response.json()}. " - + f"Exception: {str(e)}" # type: ignore + + f"Exception: {str(e)}" # type: ignore[operator] ) from e async def _construct_http_body_async( diff --git a/pyrit/prompt_target/openai/openai_chat_target.py b/pyrit/prompt_target/openai/openai_chat_target.py index e55461539..9e1d53750 100644 --- a/pyrit/prompt_target/openai/openai_chat_target.py +++ b/pyrit/prompt_target/openai/openai_chat_target.py @@ -587,7 +587,7 @@ async def _build_chat_messages_for_multi_modal_async( elif message_piece.converted_value_data_type == "image_path": data_base64_encoded_url = await convert_local_image_to_data_url(message_piece.converted_value) image_url_entry = {"url": data_base64_encoded_url} - entry = {"type": "image_url", "image_url": image_url_entry} # type: ignore + entry = {"type": "image_url", "image_url": image_url_entry} # type: ignore[dict-item] content.append(entry) elif message_piece.converted_value_data_type == "audio_path": ext = DataTypeSerializer.get_extension(message_piece.converted_value) @@ -608,7 +608,7 @@ async def _build_chat_messages_for_multi_modal_async( base64_data = await audio_serializer.read_data_base64() audio_format = ext.lower().lstrip(".") input_audio_entry = {"data": base64_data, "format": audio_format} - entry = {"type": "input_audio", "input_audio": input_audio_entry} # type: ignore + entry = {"type": "input_audio", "input_audio": input_audio_entry} # type: ignore[dict-item] content.append(entry) else: raise ValueError( diff --git a/pyrit/prompt_target/openai/openai_target.py b/pyrit/prompt_target/openai/openai_target.py index bf9c46bf6..aee21688f 100644 --- a/pyrit/prompt_target/openai/openai_target.py +++ b/pyrit/prompt_target/openai/openai_target.py @@ -73,7 +73,7 @@ async def async_token_provider() -> str: Returns: str: The token string from the synchronous provider. """ - return api_key() # type: ignore + return api_key() # type: ignore[return-value] return async_token_provider diff --git a/pyrit/prompt_target/text_target.py b/pyrit/prompt_target/text_target.py index 784309b93..d47e5d665 100644 --- a/pyrit/prompt_target/text_target.py +++ b/pyrit/prompt_target/text_target.py @@ -72,13 +72,13 @@ def import_scores_from_csv(self, csv_file_path: Path) -> list[MessagePiece]: labels = json.loads(labels_str) if labels_str else None message_piece = MessagePiece( - role=row["role"], # type: ignore + role=row["role"], # type: ignore[arg-type] original_value=row["value"], - original_value_data_type=row.get["data_type", None], # type: ignore + original_value_data_type=row.get("data_type", None), # type: ignore[arg-type] conversation_id=row.get("conversation_id", None), sequence=int(sequence_str) if sequence_str else None, labels=labels, - response_error=row.get("response_error", None), # type: ignore + response_error=row.get("response_error", None), # type: ignore[arg-type] prompt_target_identifier=self.get_identifier(), ) message_pieces.append(message_piece) diff --git a/pyrit/scenario/core/scenario.py b/pyrit/scenario/core/scenario.py index a3e260fe3..3c4440f16 100644 --- a/pyrit/scenario/core/scenario.py +++ b/pyrit/scenario/core/scenario.py @@ -175,7 +175,7 @@ def default_dataset_config(cls) -> DatasetConfiguration: async def initialize_async( self, *, - objective_target: PromptTarget = REQUIRED_VALUE, # type: ignore + objective_target: PromptTarget = REQUIRED_VALUE, # type: ignore[assignment] scenario_strategies: Optional[Sequence[ScenarioStrategy | ScenarioCompositeStrategy]] = None, dataset_config: Optional[DatasetConfiguration] = None, max_concurrency: int = 10, diff --git a/pyrit/scenario/scenarios/airt/psychosocial_scenario.py b/pyrit/scenario/scenarios/airt/psychosocial_scenario.py index 44c1720f8..405795a44 100644 --- a/pyrit/scenario/scenarios/airt/psychosocial_scenario.py +++ b/pyrit/scenario/scenarios/airt/psychosocial_scenario.py @@ -438,7 +438,7 @@ def _get_scorer(self, subharm: Optional[str] = None) -> FloatScaleThresholdScore ) # Wrap with conversation scorer to evaluate full conversation history - conversation_scorer: FloatScaleScorer = create_conversation_scorer(scorer=psych_scorer) # type: ignore + conversation_scorer: FloatScaleScorer = create_conversation_scorer(scorer=psych_scorer) # type: ignore[assignment] # Wrap with threshold scorer # threshold=1.0 means only raw score 5 counts as attack success (extremely harmful) diff --git a/pyrit/score/conversation_scorer.py b/pyrit/score/conversation_scorer.py index 333a2e812..e2b7a5ce1 100644 --- a/pyrit/score/conversation_scorer.py +++ b/pyrit/score/conversation_scorer.py @@ -183,7 +183,7 @@ def create_conversation_scorer( ) # Dynamically create a class that inherits from both ConversationScorer and the scorer's base class - class DynamicConversationScorer(ConversationScorer, scorer_base_class): # type: ignore + class DynamicConversationScorer(ConversationScorer, scorer_base_class): # type: ignore[misc, valid-type] """Dynamic ConversationScorer that inherits from both ConversationScorer and the wrapped scorer's base class.""" def __init__(self) -> None: diff --git a/tests/integration/ai_recruiter/test_ai_recruiter.py b/tests/integration/ai_recruiter/test_ai_recruiter.py index 84039a423..929ce8a8e 100644 --- a/tests/integration/ai_recruiter/test_ai_recruiter.py +++ b/tests/integration/ai_recruiter/test_ai_recruiter.py @@ -218,7 +218,7 @@ async def test_ai_recruiter_workflow(): # Execute the XPIA flow. # Step 1: PDF with hidden text is uploaded to /upload/ # Step 2: /search_candidates/ is called automatically afterward. - final_result = await xpia.execute_async( # type: ignore + final_result = await xpia.execute_async( # type: ignore[arg-type] attack_content='{"description": "Hidden PDF Attack"}', processing_prompt="Evaluate all uploaded resumes and pick the best candidate.", ) diff --git a/tests/unit/common/test_helper_functions.py b/tests/unit/common/test_helper_functions.py index e52616c23..583e9e405 100644 --- a/tests/unit/common/test_helper_functions.py +++ b/tests/unit/common/test_helper_functions.py @@ -358,4 +358,4 @@ def test_verify_and_resolve_path_with_path_object(self) -> None: def test_verify_and_resolve_path_invalid_type(self) -> None: """Test that the function raises ValueError for invalid types.""" with pytest.raises(ValueError, match="Path must be a string or Path object"): - verify_and_resolve_path(123) # type: ignore + verify_and_resolve_path(123) # type: ignore[arg-type] diff --git a/tests/unit/converter/test_add_image_text_converter.py b/tests/unit/converter/test_add_image_text_converter.py index 08e1a4710..414bdd36d 100644 --- a/tests/unit/converter/test_add_image_text_converter.py +++ b/tests/unit/converter/test_add_image_text_converter.py @@ -82,7 +82,7 @@ def test_image_text_converter_add_text_to_image(image_text_converter_sample_imag async def test_add_image_text_converter_invalid_input_text(image_text_converter_sample_image) -> None: converter = AddImageTextConverter(img_to_add=image_text_converter_sample_image) with pytest.raises(ValueError): - assert await converter.convert_async(prompt="", input_type="text") # type: ignore + assert await converter.convert_async(prompt="", input_type="text") # type: ignore[arg-type] os.remove("test.png") @@ -90,7 +90,7 @@ async def test_add_image_text_converter_invalid_input_text(image_text_converter_ async def test_add_image_text_converter_invalid_file_path(): converter = AddImageTextConverter(img_to_add="nonexistent_image.png", font_name="helvetica.ttf") with pytest.raises(FileNotFoundError): - assert await converter.convert_async(prompt="Sample Text!", input_type="text") # type: ignore + assert await converter.convert_async(prompt="Sample Text!", input_type="text") # type: ignore[arg-type] @pytest.mark.asyncio diff --git a/tests/unit/converter/test_add_text_image_converter.py b/tests/unit/converter/test_add_text_image_converter.py index c76ed7253..f58b42874 100644 --- a/tests/unit/converter/test_add_text_image_converter.py +++ b/tests/unit/converter/test_add_text_image_converter.py @@ -77,7 +77,7 @@ def test_text_image_converter_add_text_to_image(text_image_converter_sample_imag async def test_add_text_image_converter_invalid_input_image() -> None: converter = AddTextImageConverter(text_to_add="test") with pytest.raises(FileNotFoundError): - assert await converter.convert_async(prompt="mock_image.png", input_type="image_path") # type: ignore + assert await converter.convert_async(prompt="mock_image.png", input_type="image_path") # type: ignore[arg-type] @pytest.mark.asyncio diff --git a/tests/unit/converter/test_azure_speech_converter.py b/tests/unit/converter/test_azure_speech_converter.py index ca8a81788..17a5865ec 100644 --- a/tests/unit/converter/test_azure_speech_converter.py +++ b/tests/unit/converter/test_azure_speech_converter.py @@ -67,7 +67,7 @@ async def test_send_prompt_to_audio_file_raises_value_error(self) -> None: # testing empty space string prompt = " " with pytest.raises(ValueError): - await converter.convert_async(prompt=prompt, input_type="text") # type: ignore + await converter.convert_async(prompt=prompt, input_type="text") # type: ignore[arg-type] def test_azure_speech_audio_text_converter_input_supported(self): converter = AzureSpeechTextToAudioConverter() diff --git a/tests/unit/converter/test_bin_ascii_converter.py b/tests/unit/converter/test_bin_ascii_converter.py index b14e4dd21..6c3808957 100644 --- a/tests/unit/converter/test_bin_ascii_converter.py +++ b/tests/unit/converter/test_bin_ascii_converter.py @@ -173,7 +173,7 @@ class TestBinAsciiConverterErrorHandling: async def test_invalid_encoding_function(self) -> None: """Test that invalid encoding function raises ValueError.""" converter = BinAsciiConverter(encoding_func="hex") - converter._encoding_func = "invalid" # type: ignore + converter._encoding_func = "invalid" # type: ignore[arg-type] with pytest.raises(ValueError, match="Unsupported encoding function"): await converter.convert_async(prompt="test") @@ -182,4 +182,4 @@ async def test_invalid_encoding_function(self) -> None: async def test_invalid_encoding_function_at_init(self) -> None: """Test that invalid encoding function at initialization raises ValueError.""" with pytest.raises(ValueError, match="Invalid encoding_func"): - BinAsciiConverter(encoding_func="invalid") # type: ignore + BinAsciiConverter(encoding_func="invalid") # type: ignore[arg-type] diff --git a/tests/unit/converter/test_image_compression_converter.py b/tests/unit/converter/test_image_compression_converter.py index 413a0fc31..6e7a7d63a 100644 --- a/tests/unit/converter/test_image_compression_converter.py +++ b/tests/unit/converter/test_image_compression_converter.py @@ -40,11 +40,11 @@ def test_image_compression_converter_initialization_output_format_validation(): """Test validation of output_format parameter.""" for unsupported_format in ["GIF", "BMP", "TIFF", "ICO", "WEBM", "SVG", "jpg", "png"]: with pytest.raises(ValueError, match="Output format must be one of 'JPEG', 'PNG', or 'WEBP'"): - ImageCompressionConverter(output_format=unsupported_format) # type: ignore + ImageCompressionConverter(output_format=unsupported_format) # type: ignore[arg-type] supported_formats = ["JPEG", "PNG", "WEBP"] for supported_format in supported_formats: - converter = ImageCompressionConverter(output_format=supported_format) # type: ignore + converter = ImageCompressionConverter(output_format=supported_format) # type: ignore[arg-type] assert converter._output_format == supported_format converter = ImageCompressionConverter(output_format=None) @@ -117,7 +117,7 @@ def test_image_compression_converter_initialization_background_color_validation( for invalid_color in invalid_colors: with pytest.raises(ValueError, match="Background color must be a tuple of three integers between 0 and 255"): - ImageCompressionConverter(background_color=invalid_color) # type: ignore + ImageCompressionConverter(background_color=invalid_color) # type: ignore[arg-type] valid_colors = [ (0, 0, 0), diff --git a/tests/unit/converter/test_math_prompt_converter.py b/tests/unit/converter/test_math_prompt_converter.py index 703000a73..c29782851 100644 --- a/tests/unit/converter/test_math_prompt_converter.py +++ b/tests/unit/converter/test_math_prompt_converter.py @@ -144,7 +144,7 @@ async def test_math_prompt_converter_invalid_input_type(): # Test with an invalid input type with pytest.raises(ValueError, match="Input type not supported"): # Use type: ignore to suppress the type error for testing invalid input - await converter.convert_async(prompt="Test prompt", input_type="unsupported") # type: ignore + await converter.convert_async(prompt="Test prompt", input_type="unsupported") # type: ignore[arg-type] @pytest.mark.asyncio diff --git a/tests/unit/converter/test_prompt_converter.py b/tests/unit/converter/test_prompt_converter.py index 60577dedb..f8ce86dd1 100644 --- a/tests/unit/converter/test_prompt_converter.py +++ b/tests/unit/converter/test_prompt_converter.py @@ -195,7 +195,7 @@ async def test_str_join_converter_none_raises() -> None: async def test_str_join_converter_invalid_type_raises() -> None: converter = StringJoinConverter() with pytest.raises(ValueError): - assert await converter.convert_async(prompt="test", input_type="invalid") # type: ignore # noqa + assert await converter.convert_async(prompt="test", input_type="invalid") # type: ignore[arg-type] # noqa: PGH003 @pytest.mark.asyncio diff --git a/tests/unit/converter/test_unicode_confusable_converter.py b/tests/unit/converter/test_unicode_confusable_converter.py index 7b95e96f1..3e876ea0b 100644 --- a/tests/unit/converter/test_unicode_confusable_converter.py +++ b/tests/unit/converter/test_unicode_confusable_converter.py @@ -1,6 +1,5 @@ # Copyright (c) Microsoft Corporation. # Licensed under the MIT license. -# flake8: noqa from unittest.mock import patch diff --git a/tests/unit/executor/workflow/test_xpia.py b/tests/unit/executor/workflow/test_xpia.py index 2717e9279..2b90b213f 100644 --- a/tests/unit/executor/workflow/test_xpia.py +++ b/tests/unit/executor/workflow/test_xpia.py @@ -109,7 +109,7 @@ def test_validate_context_missing_attack_content_raises_error( self, workflow: XPIAWorkflow, mock_processing_callback: AsyncMock ) -> None: """Test that validation fails when attack_content is None.""" - context = XPIAContext(attack_content=None, processing_callback=mock_processing_callback) # type: ignore + context = XPIAContext(attack_content=None, processing_callback=mock_processing_callback) # type: ignore[arg-type] with pytest.raises(ValueError, match="attack_content: Message must be provided"): workflow._validate_context(context=context) @@ -118,7 +118,7 @@ def test_validate_context_empty_message_raises_error( self, workflow: XPIAWorkflow, mock_processing_callback: AsyncMock ) -> None: """Test that validation fails when message has no pieces.""" - context = XPIAContext(attack_content=None, processing_callback=mock_processing_callback) # type: ignore + context = XPIAContext(attack_content=None, processing_callback=mock_processing_callback) # type: ignore[arg-type] with pytest.raises(ValueError, match="attack_content: Message must be provided"): workflow._validate_context(context=context) @@ -162,7 +162,7 @@ def test_validate_context_missing_processing_callback_raises_error( self, workflow: XPIAWorkflow, valid_message: Message ) -> None: """Test that validation fails when processing_callback is None.""" - context = XPIAContext(attack_content=valid_message, processing_callback=None) # type: ignore + context = XPIAContext(attack_content=valid_message, processing_callback=None) # type: ignore[arg-type] with pytest.raises(ValueError, match="processing_callback is required"): workflow._validate_context(context=context) diff --git a/tests/unit/memory/memory_interface/test_interface_attack_results.py b/tests/unit/memory/memory_interface/test_interface_attack_results.py index 3106409fd..17e2ac199 100644 --- a/tests/unit/memory/memory_interface/test_interface_attack_results.py +++ b/tests/unit/memory/memory_interface/test_interface_attack_results.py @@ -560,8 +560,8 @@ def test_attack_result_with_attack_generation_conversation_ids(sqlite_instance: entry: AttackResultEntry = sqlite_instance._query_entries(AttackResultEntry)[0] - assert set(entry.pruned_conversation_ids) == pruned_ids # type: ignore - assert set(entry.adversarial_chat_conversation_ids) == adversarial_ids # type: ignore + assert set(entry.pruned_conversation_ids) == pruned_ids # type: ignore[arg-type] + assert set(entry.adversarial_chat_conversation_ids) == adversarial_ids # type: ignore[arg-type] retrieved_result = entry.get_attack_result() assert { diff --git a/tests/unit/memory/test_azure_sql_memory.py b/tests/unit/memory/test_azure_sql_memory.py index 431627025..910cadd77 100644 --- a/tests/unit/memory/test_azure_sql_memory.py +++ b/tests/unit/memory/test_azure_sql_memory.py @@ -66,7 +66,7 @@ def test_insert_entries(memory_interface: AzureSQLMemory): ] # Now, get a new session to query the database and verify the entries were inserted - with memory_interface.get_session() as session: # type: ignore + with memory_interface.get_session() as session: # type: ignore[arg-type] # Use the insert_entries method to insert multiple entries into the database memory_interface._insert_entries(entries=entries) inserted_entries = session.query(PromptMemoryEntry).order_by(PromptMemoryEntry.conversation_id).all() @@ -88,7 +88,7 @@ def test_insert_embedding_entry(memory_interface: AzureSQLMemory): memory_interface._insert_entry(conversation_entry) # Re-query the ConversationData entry within a new session to ensure it's attached - with memory_interface.get_session() as session: # type: ignore + with memory_interface.get_session() as session: # type: ignore[arg-type] # Assuming uuid is the primary key and is set upon insertion reattached_conversation_entry = session.query(PromptMemoryEntry).filter_by(conversation_id="123").one() uuid = reattached_conversation_entry.id @@ -98,7 +98,7 @@ def test_insert_embedding_entry(memory_interface: AzureSQLMemory): memory_interface._insert_entry(embedding_entry) # Verify the EmbeddingData entry was inserted correctly - with memory_interface.get_session() as session: # type: ignore + with memory_interface.get_session() as session: # type: ignore[arg-type] persisted_embedding_entry = session.query(EmbeddingDataEntry).filter_by(id=uuid).first() assert persisted_embedding_entry is not None assert persisted_embedding_entry.embedding == [1, 2, 3] @@ -174,7 +174,7 @@ def test_get_memories_with_json_properties(memory_interface: AzureSQLMemory): target = TextTarget() # Start a session - with memory_interface.get_session() as session: # type: ignore + with memory_interface.get_session() as session: # type: ignore[arg-type] # Create a ConversationData entry with all attributes filled entry = PromptMemoryEntry( entry=MessagePiece( @@ -238,7 +238,7 @@ def test_update_entries(memory_interface: AzureSQLMemory): memory_interface._update_entries(entries=entries_to_update, update_fields={"original_value": "Updated Hello"}) # Verify the entry was updated - with memory_interface.get_session() as session: # type: ignore + with memory_interface.get_session() as session: # type: ignore[arg-type] updated_entry = session.query(PromptMemoryEntry).filter_by(conversation_id="123").first() assert updated_entry.original_value == "Updated Hello" @@ -293,7 +293,7 @@ def test_update_prompt_entries_by_conversation_id(memory_interface: AzureSQLMemo assert update_result is True # Verify the entry was updated - with memory_interface.get_session() as session: # type: ignore + with memory_interface.get_session() as session: # type: ignore[arg-type] updated_entries = session.query(PromptMemoryEntry).filter_by(conversation_id=specific_conversation_id) for entry in updated_entries: assert entry.original_value == "Updated Hello" @@ -318,7 +318,7 @@ def test_update_labels_by_conversation_id(memory_interface: AzureSQLMemory): memory_interface.update_labels_by_conversation_id(conversation_id="123", labels={"test1": "change"}) # Verify the labels were updated - with memory_interface.get_session() as session: # type: ignore + with memory_interface.get_session() as session: # type: ignore[arg-type] updated_entry = session.query(PromptMemoryEntry).filter_by(conversation_id="123").first() assert updated_entry.labels["test1"] == "change" @@ -343,6 +343,6 @@ def test_update_prompt_metadata_by_conversation_id(memory_interface: AzureSQLMem ) # Verify the metadata was updated - with memory_interface.get_session() as session: # type: ignore + with memory_interface.get_session() as session: # type: ignore[arg-type] updated_entry = session.query(PromptMemoryEntry).filter_by(conversation_id="123").first() assert updated_entry.prompt_metadata == {"updated": "updated"} diff --git a/tests/unit/models/test_message_piece.py b/tests/unit/models/test_message_piece.py index 469c94e87..974a57d79 100644 --- a/tests/unit/models/test_message_piece.py +++ b/tests/unit/models/test_message_piece.py @@ -153,7 +153,7 @@ def test_hashes_generated_files_unknown_type(): MessagePiece( role="user", original_value="Hello1", - original_value_data_type="new_unknown_type", # type: ignore + original_value_data_type="new_unknown_type", # type: ignore[arg-type] ) @@ -363,7 +363,7 @@ def test_message_piece_no_roles(): Message( message_pieces=[ MessagePiece( - role="", # type: ignore + role="", # type: ignore[arg-type] converted_value_data_type="text", original_value="Hello", converted_value="Hello", diff --git a/tests/unit/prompt_normalizer/test_prompt_normalizer.py b/tests/unit/prompt_normalizer/test_prompt_normalizer.py index c2a414090..6386a1024 100644 --- a/tests/unit/prompt_normalizer/test_prompt_normalizer.py +++ b/tests/unit/prompt_normalizer/test_prompt_normalizer.py @@ -81,7 +81,7 @@ class MockPromptConverter(PromptConverter): def __init__(self) -> None: pass - def convert_async(self, *, prompt: str, input_type: PromptDataType = "text") -> ConverterResult: # type: ignore + def convert_async(self, *, prompt: str, input_type: PromptDataType = "text") -> ConverterResult: # type: ignore[arg-type] return ConverterResult(output_text=prompt, output_type="text") def input_supported(self, input_type: PromptDataType) -> bool: @@ -245,7 +245,7 @@ async def test_send_prompt_async_mixed_sequence_types(mock_memory_instance): piece1 = MessagePiece(role="user", original_value="test1", sequence=1, conversation_id=conv_id) piece2 = MessagePiece(role="user", original_value="test2", sequence=1, conversation_id=conv_id) # Manually set different sequence to test validation - piece2.sequence = None # type: ignore + piece2.sequence = None # type: ignore[arg-type] with pytest.raises(ValueError, match="Inconsistent sequences within the same message entry"): Message(message_pieces=[piece1, piece2]) diff --git a/tests/unit/scenarios/test_cyber.py b/tests/unit/scenarios/test_cyber.py index 749d05d27..afa7f144a 100644 --- a/tests/unit/scenarios/test_cyber.py +++ b/tests/unit/scenarios/test_cyber.py @@ -358,7 +358,7 @@ async def test_no_target_duplication(self, mock_objective_target, mock_memory_se # this works because TrueFalseCompositeScorer subclasses TrueFalseScorer, # but TrueFalseScorer itself (the type for ScorerConfig) does not have ._scorers. - scorer_target = scenario._scorer_config.objective_scorer._scorers[0] # type: ignore + scorer_target = scenario._scorer_config.objective_scorer._scorers[0] # type: ignore[arg-type] adversarial_target = scenario._adversarial_chat assert objective_target != scorer_target diff --git a/tests/unit/scenarios/test_jailbreak.py b/tests/unit/scenarios/test_jailbreak.py index f999219ad..666dc5cff 100644 --- a/tests/unit/scenarios/test_jailbreak.py +++ b/tests/unit/scenarios/test_jailbreak.py @@ -438,7 +438,7 @@ async def test_no_target_duplication_async( await scenario.initialize_async(objective_target=mock_objective_target) objective_target = scenario._objective_target - scorer_target = scenario._scorer_config.objective_scorer # type: ignore + scorer_target = scenario._scorer_config.objective_scorer # type: ignore[arg-type] assert objective_target != scorer_target diff --git a/tests/unit/scenarios/test_leakage_scenario.py b/tests/unit/scenarios/test_leakage_scenario.py index 86773e002..53d591f2f 100644 --- a/tests/unit/scenarios/test_leakage_scenario.py +++ b/tests/unit/scenarios/test_leakage_scenario.py @@ -455,7 +455,7 @@ async def test_no_target_duplication(self, mock_objective_target, mock_memory_se # This works because TrueFalseCompositeScorer subclasses TrueFalseScorer, # but TrueFalseScorer itself (the type for ScorerConfig) does not have ._scorers. - scorer_target = scenario._scorer_config.objective_scorer._scorers[0] # type: ignore + scorer_target = scenario._scorer_config.objective_scorer._scorers[0] # type: ignore[arg-type] adversarial_target = scenario._adversarial_chat assert objective_target != scorer_target diff --git a/tests/unit/scenarios/test_scam.py b/tests/unit/scenarios/test_scam.py index ef277d1b6..a74046bf9 100644 --- a/tests/unit/scenarios/test_scam.py +++ b/tests/unit/scenarios/test_scam.py @@ -380,7 +380,7 @@ async def test_no_target_duplication_async( await scenario.initialize_async(objective_target=mock_objective_target, dataset_config=mock_dataset_config) objective_target = scenario._objective_target - scorer_target = scenario._scorer_config.objective_scorer # type: ignore + scorer_target = scenario._scorer_config.objective_scorer # type: ignore[arg-type] adversarial_target = scenario._adversarial_chat assert objective_target != scorer_target diff --git a/tests/unit/score/test_conversation_history_scorer.py b/tests/unit/score/test_conversation_history_scorer.py index 3e5d1ac23..60786daeb 100644 --- a/tests/unit/score/test_conversation_history_scorer.py +++ b/tests/unit/score/test_conversation_history_scorer.py @@ -361,7 +361,7 @@ def test_factory_returns_instance_of_true_false_scorer(): def test_factory_preserves_wrapped_scorer(): """Test that factory preserves reference to wrapped scorer.""" original_scorer = MockFloatScaleScorer() - original_scorer.custom_attr = "test_value" # type: ignore + original_scorer.custom_attr = "test_value" # type: ignore[abstract] conv_scorer = create_conversation_scorer(scorer=original_scorer) @@ -371,7 +371,7 @@ def test_factory_preserves_wrapped_scorer(): assert hasattr(conv_scorer, "_wrapped_scorer") wrapped = conv_scorer._wrapped_scorer assert wrapped is original_scorer - assert wrapped.custom_attr == "test_value" # type: ignore + assert wrapped.custom_attr == "test_value" # type: ignore[abstract] def test_factory_with_custom_validator(): diff --git a/tests/unit/score/test_true_false_composite_scorer.py b/tests/unit/score/test_true_false_composite_scorer.py index d69faf4e9..3824a9557 100644 --- a/tests/unit/score/test_true_false_composite_scorer.py +++ b/tests/unit/score/test_true_false_composite_scorer.py @@ -167,7 +167,7 @@ async def _score_piece_async( return [] with pytest.raises(ValueError, match="All scorers must be true_false scorers"): - TrueFalseCompositeScorer(aggregator=TrueFalseScoreAggregator.AND, scorers=[InvalidScorer()]) # type: ignore + TrueFalseCompositeScorer(aggregator=TrueFalseScoreAggregator.AND, scorers=[InvalidScorer()]) # type: ignore[arg-type] @pytest.mark.asyncio diff --git a/tests/unit/setup/test_configuration_loader.py b/tests/unit/setup/test_configuration_loader.py index 15be87a45..b4e737bf5 100644 --- a/tests/unit/setup/test_configuration_loader.py +++ b/tests/unit/setup/test_configuration_loader.py @@ -135,7 +135,7 @@ def test_initializer_dict_without_name_raises_error(self): def test_initializer_invalid_type_raises_error(self): """Test that invalid initializer type raises ValueError.""" with pytest.raises(ValueError, match="must be a string or dict"): - ConfigurationLoader(initializers=[123]) # type: ignore + ConfigurationLoader(initializers=[123]) # type: ignore[arg-type] def test_from_dict_with_all_fields(self): """Test from_dict with all configuration fields.""" diff --git a/tests/unit/setup/test_initialization.py b/tests/unit/setup/test_initialization.py index bed550b51..9c386ba68 100644 --- a/tests/unit/setup/test_initialization.py +++ b/tests/unit/setup/test_initialization.py @@ -107,7 +107,7 @@ async def initialize_async(self) -> None: async def test_invalid_memory_type_raises_error(self): """Test that invalid memory type raises ValueError.""" with pytest.raises(ValueError, match="is not a supported type"): - await initialize_pyrit_async(memory_db_type="InvalidType") # type: ignore + await initialize_pyrit_async(memory_db_type="InvalidType") # type: ignore[arg-type] class TestLoadEnvironmentFiles: diff --git a/tests/unit/setup/test_pyrit_initializer.py b/tests/unit/setup/test_pyrit_initializer.py index 1fedbee2c..406624783 100644 --- a/tests/unit/setup/test_pyrit_initializer.py +++ b/tests/unit/setup/test_pyrit_initializer.py @@ -32,7 +32,7 @@ def teardown_method(self) -> None: def test_cannot_instantiate_abstract_class(self): """Test that PyRITInitializer cannot be instantiated directly.""" with pytest.raises(TypeError): - PyRITInitializer() # type: ignore + PyRITInitializer() # type: ignore[arg-type] def test_concrete_initializer_can_be_created(self): """Test that concrete subclass can be instantiated.""" @@ -64,7 +64,7 @@ async def initialize_async(self) -> None: pass with pytest.raises(TypeError): - MissingName() # type: ignore + MissingName() # type: ignore[arg-type] def test_initialize_method_is_abstract(self): """Test that initialize method must be implemented.""" @@ -79,7 +79,7 @@ def description(self) -> str: return "Missing initialize" with pytest.raises(TypeError): - MissingInitialize() # type: ignore + MissingInitialize() # type: ignore[arg-type] def test_default_execution_order_is_one(self): """Test that default execution order is 1.""" @@ -287,7 +287,7 @@ async def initialize_async(self) -> None: # Verify the global variable was set assert hasattr(sys.modules["__main__"], "tracked_var") - assert sys.modules["__main__"].tracked_var == "test_value" # type: ignore + assert sys.modules["__main__"].tracked_var == "test_value" # type: ignore[arg-type] class TestGetInfo: @@ -652,7 +652,7 @@ async def test_get_dynamic_info_without_memory_returns_message(self): from pyrit.memory import CentralMemory # Ensure memory is not set - CentralMemory.set_memory_instance(None) # type: ignore + CentralMemory.set_memory_instance(None) # type: ignore[arg-type] class NoMemoryInit(PyRITInitializer): @property diff --git a/tests/unit/target/test_huggingface_chat_target.py b/tests/unit/target/test_huggingface_chat_target.py index f24a9b477..77052fb1b 100644 --- a/tests/unit/target/test_huggingface_chat_target.py +++ b/tests/unit/target/test_huggingface_chat_target.py @@ -176,7 +176,7 @@ async def test_send_prompt_async(): message = Message(message_pieces=[message_piece]) # Use await to handle the asynchronous call - response = await hf_chat.send_prompt_async(message=message) # type: ignore + response = await hf_chat.send_prompt_async(message=message) # type: ignore[arg-type] # Access the response text via message_pieces assert len(response) == 1 diff --git a/tests/unit/target/test_openai_chat_target.py b/tests/unit/target/test_openai_chat_target.py index b90ec97f2..846efe353 100644 --- a/tests/unit/target/test_openai_chat_target.py +++ b/tests/unit/target/test_openai_chat_target.py @@ -158,8 +158,8 @@ async def test_build_chat_messages_for_multi_modal(target: OpenAIChatTarget): assert len(messages) == 1 assert messages[0]["role"] == "user" - assert messages[0]["content"][0]["type"] == "text" # type: ignore - assert messages[0]["content"][1]["type"] == "image_url" # type: ignore + assert messages[0]["content"][0]["type"] == "text" # type: ignore[method-assign] + assert messages[0]["content"][1]["type"] == "image_url" # type: ignore[method-assign] os.remove(image_request.original_value) @@ -540,7 +540,7 @@ async def test_send_prompt_async_content_filter_200(target: OpenAIChatTarget): def test_validate_request_unsupported_data_types(target: OpenAIChatTarget): image_piece = get_image_message_piece() - image_piece.converted_value_data_type = "new_unknown_type" # type: ignore + image_piece.converted_value_data_type = "new_unknown_type" # type: ignore[method-assign] message = Message( message_pieces=[ MessagePiece( diff --git a/tests/unit/target/test_openai_response_target.py b/tests/unit/target/test_openai_response_target.py index fc2ab6955..2c6fd598f 100644 --- a/tests/unit/target/test_openai_response_target.py +++ b/tests/unit/target/test_openai_response_target.py @@ -182,13 +182,13 @@ async def test_build_input_for_multi_modal(target: OpenAIResponseTarget): assert len(messages) == 3 assert messages[0]["role"] == "user" - assert messages[0]["content"][0]["type"] == "input_text" # type: ignore - assert messages[0]["content"][1]["type"] == "input_image" # type: ignore + assert messages[0]["content"][0]["type"] == "input_text" # type: ignore[method-assign] + assert messages[0]["content"][1]["type"] == "input_image" # type: ignore[method-assign] assert messages[1]["role"] == "assistant" - assert messages[1]["content"][0]["type"] == "output_text" # type: ignore + assert messages[1]["content"][0]["type"] == "output_text" # type: ignore[method-assign] assert messages[2]["role"] == "user" - assert messages[2]["content"][0]["type"] == "input_text" # type: ignore - assert messages[2]["content"][1]["type"] == "input_image" # type: ignore + assert messages[2]["content"][0]["type"] == "input_text" # type: ignore[method-assign] + assert messages[2]["content"][1]["type"] == "input_image" # type: ignore[method-assign] os.remove(image_request.original_value) @@ -570,7 +570,7 @@ async def test_send_prompt_async_content_filter(target: OpenAIResponseTarget): def test_validate_request_unsupported_data_types(target: OpenAIResponseTarget): image_piece = get_image_message_piece() - image_piece.converted_value_data_type = "new_unknown_type" # type: ignore + image_piece.converted_value_data_type = "new_unknown_type" # type: ignore[method-assign] message = Message( message_pieces=[ MessagePiece(