mirror of
https://github.com/HackTricks-wiki/hacktricks.git
synced 2025-10-10 18:36:50 +00:00
Merge pull request #1380 from HackTricks-wiki/research_update_src_generic-methodologies-and-resources_phishing-methodology_detecting-phising_20250904_082429
Research Update Enhanced src/generic-methodologies-and-resou...
This commit is contained in:
commit
3efd161da9
32
.github/workflows/auto_merge_approved_prs.yml
vendored
32
.github/workflows/auto_merge_approved_prs.yml
vendored
@ -26,9 +26,6 @@ jobs:
|
|||||||
git config --global user.email "action@github.com"
|
git config --global user.email "action@github.com"
|
||||||
git config --global user.name "GitHub Action"
|
git config --global user.name "GitHub Action"
|
||||||
|
|
||||||
- name: Make conflict resolution script executable
|
|
||||||
run: chmod +x ./resolve_searchindex_conflicts.sh
|
|
||||||
|
|
||||||
- name: Check for running workflows
|
- name: Check for running workflows
|
||||||
id: check_workflows
|
id: check_workflows
|
||||||
run: |
|
run: |
|
||||||
@ -138,34 +135,7 @@ jobs:
|
|||||||
echo "Failed to merge PR #$pr_number: $pr_title"
|
echo "Failed to merge PR #$pr_number: $pr_title"
|
||||||
fi
|
fi
|
||||||
elif [ "$pr_mergeable" = "CONFLICTED" ] || [ "$pr_mergeable" = "CONFLICTING" ]; then
|
elif [ "$pr_mergeable" = "CONFLICTED" ] || [ "$pr_mergeable" = "CONFLICTING" ]; then
|
||||||
echo "PR #$pr_number has conflicts. Attempting to resolve searchindex.js conflicts..."
|
echo "PR #$pr_number has conflicts. Skipping auto-merge so it can be resolved manually."
|
||||||
|
|
||||||
# Try to resolve conflicts by updating the branch
|
|
||||||
export GITHUB_REPOSITORY="$GITHUB_REPOSITORY"
|
|
||||||
export GH_TOKEN="${{ secrets.PAT_TOKEN }}"
|
|
||||||
if ./resolve_searchindex_conflicts.sh "$pr_number" "$head_branch" "$base_branch"; then
|
|
||||||
echo "Successfully resolved searchindex.js conflicts for PR #$pr_number"
|
|
||||||
|
|
||||||
# Wait for GitHub to update the mergeable status after conflict resolution
|
|
||||||
echo "Waiting for GitHub to process the conflict resolution..."
|
|
||||||
sleep 15
|
|
||||||
|
|
||||||
# Check if it's now mergeable
|
|
||||||
pr_mergeable_after=$(gh pr view "$pr_number" --json mergeable --jq '.mergeable' --repo "$GITHUB_REPOSITORY")
|
|
||||||
if [ "$pr_mergeable_after" = "MERGEABLE" ]; then
|
|
||||||
if gh pr merge "$pr_number" --merge --delete-branch --repo "$GITHUB_REPOSITORY"; then
|
|
||||||
echo "Successfully merged PR #$pr_number after resolving conflicts: $pr_title"
|
|
||||||
current_count=$(cat /tmp/merged_count)
|
|
||||||
echo $((current_count + 1)) > /tmp/merged_count
|
|
||||||
else
|
|
||||||
echo "Failed to merge PR #$pr_number after conflict resolution: $pr_title"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "PR #$pr_number still not mergeable after conflict resolution (status: $pr_mergeable_after)"
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Failed to resolve conflicts for PR #$pr_number"
|
|
||||||
fi
|
|
||||||
else
|
else
|
||||||
echo "PR #$pr_number is not mergeable (status: $pr_mergeable)"
|
echo "PR #$pr_number is not mergeable (status: $pr_mergeable)"
|
||||||
fi
|
fi
|
||||||
|
63
.github/workflows/build_master.yml
vendored
63
.github/workflows/build_master.yml
vendored
@ -35,60 +35,33 @@ jobs:
|
|||||||
- name: Build mdBook
|
- name: Build mdBook
|
||||||
run: MDBOOK_BOOK__LANGUAGE=en mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
|
run: MDBOOK_BOOK__LANGUAGE=en mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
|
||||||
|
|
||||||
- name: Update searchindex in repo (purge history, keep current on HEAD)
|
- name: Publish search index release asset
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
PAT_TOKEN: ${{ secrets.PAT_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
ls -la
|
ASSET="book/searchindex.js"
|
||||||
ls -la book
|
TAG="searchindex-en"
|
||||||
|
TITLE="Search Index (en)"
|
||||||
|
|
||||||
git config --global --add safe.directory /__w/hacktricks/hacktricks
|
if [ ! -f "$ASSET" ]; then
|
||||||
git config --global user.email "build@example.com"
|
echo "Expected $ASSET to exist after build" >&2
|
||||||
git config --global user.name "Build master"
|
exit 1
|
||||||
git config pull.rebase false
|
|
||||||
|
|
||||||
# Ensure we're on the target branch and up to date
|
|
||||||
git fetch origin
|
|
||||||
git reset --hard origin/master
|
|
||||||
|
|
||||||
# Choose the file to keep at HEAD:
|
|
||||||
# 1) Prefer freshly built version from book/
|
|
||||||
# 2) Fallback to the file currently at HEAD (if it exists)
|
|
||||||
HAS_FILE=0
|
|
||||||
if [ -f "book/searchindex.js" ]; then
|
|
||||||
cp "book/searchindex.js" /tmp/sidx.js
|
|
||||||
HAS_FILE=1
|
|
||||||
elif git cat-file -e "HEAD:searchindex.js" 2>/dev/null; then
|
|
||||||
git show "HEAD:searchindex.js" > /tmp/sidx.js
|
|
||||||
HAS_FILE=1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Skip if there's nothing to purge AND nothing to keep
|
TOKEN="${PAT_TOKEN:-${GITHUB_TOKEN:-}}"
|
||||||
if [ "$HAS_FILE" = "1" ] || git rev-list -n 1 HEAD -- "searchindex.js" >/dev/null 2>&1; then
|
if [ -z "$TOKEN" ]; then
|
||||||
# Fail early if working tree is dirty (avoid confusing rewrites)
|
echo "No token available for GitHub CLI" >&2
|
||||||
git diff --quiet || { echo "Working tree has uncommitted changes; aborting purge." >&2; exit 1; }
|
exit 1
|
||||||
|
fi
|
||||||
|
export GH_TOKEN="$TOKEN"
|
||||||
|
|
||||||
# Install git-filter-repo and ensure it's on PATH
|
if ! gh release view "$TAG" >/dev/null 2>&1; then
|
||||||
python -m pip install --quiet --user git-filter-repo
|
gh release create "$TAG" "$ASSET" --title "$TITLE" --notes "Automated search index build for master" --repo "$GITHUB_REPOSITORY"
|
||||||
export PATH="$HOME/.local/bin:$PATH"
|
|
||||||
|
|
||||||
# Rewrite ONLY the current branch, dropping all historical blobs of searchindex.js
|
|
||||||
git filter-repo --force --path "searchindex.js" --invert-paths --refs "$(git symbolic-ref -q HEAD)"
|
|
||||||
|
|
||||||
# Re-add the current version on top of rewritten history (keep it in HEAD)
|
|
||||||
if [ "$HAS_FILE" = "1" ]; then
|
|
||||||
mv /tmp/sidx.js "searchindex.js"
|
|
||||||
git add "searchindex.js"
|
|
||||||
git commit -m "Update searchindex (purged history; keep current)"
|
|
||||||
else
|
|
||||||
echo "No current searchindex.js to re-add after purge."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Safer force push (only updates if remote hasn't advanced)
|
|
||||||
git push --force-with-lease
|
|
||||||
else
|
else
|
||||||
echo "Nothing to purge; skipping."
|
gh release upload "$TAG" "$ASSET" --clobber --repo "$GITHUB_REPOSITORY"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
60
.github/workflows/translate_all.yml
vendored
60
.github/workflows/translate_all.yml
vendored
@ -123,57 +123,35 @@ jobs:
|
|||||||
git pull
|
git pull
|
||||||
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
|
MDBOOK_BOOK__LANGUAGE=$BRANCH mdbook build || (echo "Error logs" && cat hacktricks-preprocessor-error.log && echo "" && echo "" && echo "Debug logs" && (cat hacktricks-preprocessor.log | tail -n 20) && exit 1)
|
||||||
|
|
||||||
- name: Update searchindex.js in repo (purge history, keep current on HEAD)
|
- name: Publish search index release asset
|
||||||
shell: bash
|
shell: bash
|
||||||
|
env:
|
||||||
|
PAT_TOKEN: ${{ secrets.PAT_TOKEN }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
|
|
||||||
# Be explicit about workspace trust (avoids "dubious ownership")
|
ASSET="book/searchindex.js"
|
||||||
git config --global --add safe.directory "$GITHUB_WORKSPACE"
|
TAG="searchindex-${BRANCH}"
|
||||||
|
TITLE="Search Index (${BRANCH})"
|
||||||
|
|
||||||
git checkout "$BRANCH"
|
if [ ! -f "$ASSET" ]; then
|
||||||
git fetch origin "$BRANCH" --quiet
|
echo "Expected $ASSET to exist after build" >&2
|
||||||
git pull --ff-only
|
exit 1
|
||||||
|
|
||||||
# Choose the file to keep at HEAD:
|
|
||||||
# 1) Prefer freshly built version from book/
|
|
||||||
# 2) Fallback to the file currently at HEAD (if it exists)
|
|
||||||
HAS_FILE=0
|
|
||||||
if [ -f "book/searchindex.js" ]; then
|
|
||||||
cp "book/searchindex.js" /tmp/sidx.js
|
|
||||||
HAS_FILE=1
|
|
||||||
elif git cat-file -e "HEAD:searchindex.js" 2>/dev/null; then
|
|
||||||
git show "HEAD:searchindex.js" > /tmp/sidx.js
|
|
||||||
HAS_FILE=1
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Skip if there's nothing to purge AND nothing to keep
|
TOKEN="${PAT_TOKEN:-${GITHUB_TOKEN:-}}"
|
||||||
if [ "$HAS_FILE" = "1" ] || git rev-list -n 1 "$BRANCH" -- "searchindex.js" >/dev/null 2>&1; then
|
if [ -z "$TOKEN" ]; then
|
||||||
# **Fail early if working tree is dirty** (prevents confusing filter results)
|
echo "No token available for GitHub CLI" >&2
|
||||||
git diff --quiet || { echo "Working tree has uncommitted changes; aborting purge." >&2; exit 1; }
|
exit 1
|
||||||
|
fi
|
||||||
|
export GH_TOKEN="$TOKEN"
|
||||||
|
|
||||||
# Make sure git-filter-repo is callable via `git filter-repo`
|
if ! gh release view "$TAG" >/dev/null 2>&1; then
|
||||||
python -m pip install --quiet --user git-filter-repo
|
gh release create "$TAG" "$ASSET" --title "$TITLE" --notes "Automated search index build for $BRANCH" --repo "$GITHUB_REPOSITORY"
|
||||||
export PATH="$HOME/.local/bin:$PATH"
|
|
||||||
|
|
||||||
# Rewrite ONLY this branch, dropping all historical blobs of searchindex.js
|
|
||||||
git filter-repo --force --path "searchindex.js" --invert-paths --refs "refs/heads/$BRANCH"
|
|
||||||
|
|
||||||
# Re-add the current version on top of rewritten history (keep it in HEAD)
|
|
||||||
if [ "$HAS_FILE" = "1" ]; then
|
|
||||||
mv /tmp/sidx.js "searchindex.js"
|
|
||||||
git add "searchindex.js"
|
|
||||||
git commit -m "Update searchindex (purged history; keep current)"
|
|
||||||
else
|
|
||||||
echo "No current searchindex.js to re-add after purge."
|
|
||||||
fi
|
|
||||||
|
|
||||||
# **Safer force push** (prevents clobbering unexpected remote updates)
|
|
||||||
git push --force-with-lease origin "$BRANCH"
|
|
||||||
else
|
else
|
||||||
echo "Nothing to purge; skipping."
|
gh release upload "$TAG" "$ASSET" --clobber --repo "$GITHUB_REPOSITORY"
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Login in AWs
|
# Login in AWs
|
||||||
- name: Configure AWS credentials using OIDC
|
- name: Configure AWS credentials using OIDC
|
||||||
uses: aws-actions/configure-aws-credentials@v3
|
uses: aws-actions/configure-aws-credentials@v3
|
||||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -11,3 +11,4 @@ book
|
|||||||
book/*
|
book/*
|
||||||
hacktricks-preprocessor.log
|
hacktricks-preprocessor.log
|
||||||
hacktricks-preprocessor-error.log
|
hacktricks-preprocessor-error.log
|
||||||
|
searchindex.js
|
||||||
|
@ -1,139 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
# Script to resolve searchindex.js conflicts by accepting master branch version
|
|
||||||
# This script is designed to handle merge conflicts that occur when PRs become
|
|
||||||
# desynchronized due to the auto-generated searchindex.js file.
|
|
||||||
#
|
|
||||||
# The searchindex.js file is automatically generated by the build process and
|
|
||||||
# frequently causes conflicts when multiple PRs are waiting to be merged.
|
|
||||||
# This script automatically resolves those conflicts by accepting the master
|
|
||||||
# branch version of the file.
|
|
||||||
#
|
|
||||||
# Usage: resolve_searchindex_conflicts.sh <pr_number> <head_branch> <base_branch>
|
|
||||||
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
# Validate arguments
|
|
||||||
if [ $# -ne 3 ]; then
|
|
||||||
echo "Usage: $0 <pr_number> <head_branch> <base_branch>"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
PR_NUMBER="$1"
|
|
||||||
HEAD_BRANCH="$2"
|
|
||||||
BASE_BRANCH="$3"
|
|
||||||
|
|
||||||
# Validate required environment variables
|
|
||||||
if [ -z "${GITHUB_REPOSITORY:-}" ]; then
|
|
||||||
echo "Error: GITHUB_REPOSITORY environment variable is required"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ -z "${GH_TOKEN:-}" ]; then
|
|
||||||
echo "Error: GH_TOKEN environment variable is required"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Resolving conflicts for PR #$PR_NUMBER (branch: $HEAD_BRANCH -> $BASE_BRANCH)"
|
|
||||||
|
|
||||||
# Get current directory for safety
|
|
||||||
ORIGINAL_DIR=$(pwd)
|
|
||||||
|
|
||||||
# Create a temporary directory for the operation
|
|
||||||
TEMP_DIR=$(mktemp -d)
|
|
||||||
echo "Working in temporary directory: $TEMP_DIR"
|
|
||||||
|
|
||||||
cleanup() {
|
|
||||||
echo "Cleaning up..."
|
|
||||||
cd "$ORIGINAL_DIR"
|
|
||||||
rm -rf "$TEMP_DIR"
|
|
||||||
}
|
|
||||||
trap cleanup EXIT
|
|
||||||
|
|
||||||
# Clone the repository to the temp directory
|
|
||||||
echo "Cloning repository..."
|
|
||||||
cd "$TEMP_DIR"
|
|
||||||
gh repo clone "$GITHUB_REPOSITORY" . -- --branch "$HEAD_BRANCH"
|
|
||||||
|
|
||||||
# Configure git
|
|
||||||
git config user.email "action@github.com"
|
|
||||||
git config user.name "GitHub Action"
|
|
||||||
|
|
||||||
# Fetch all branches
|
|
||||||
git fetch origin
|
|
||||||
|
|
||||||
# Make sure we're on the correct branch
|
|
||||||
git checkout "$HEAD_BRANCH"
|
|
||||||
|
|
||||||
# Try to merge the base branch
|
|
||||||
echo "Attempting to merge $BASE_BRANCH into $HEAD_BRANCH..."
|
|
||||||
if git merge "origin/$BASE_BRANCH" --no-edit; then
|
|
||||||
echo "No conflicts found, merge successful"
|
|
||||||
|
|
||||||
# Push the updated branch
|
|
||||||
echo "Pushing merged branch..."
|
|
||||||
git push origin "$HEAD_BRANCH"
|
|
||||||
exit 0
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Check what files have conflicts
|
|
||||||
echo "Checking for conflicts..."
|
|
||||||
conflicted_files=$(git diff --name-only --diff-filter=U)
|
|
||||||
echo "Conflicted files: $conflicted_files"
|
|
||||||
|
|
||||||
# Check if searchindex.js is the only conflict or if conflicts are only in acceptable files
|
|
||||||
acceptable_conflicts=true
|
|
||||||
searchindex_conflict=false
|
|
||||||
|
|
||||||
for file in $conflicted_files; do
|
|
||||||
case "$file" in
|
|
||||||
"searchindex.js")
|
|
||||||
searchindex_conflict=true
|
|
||||||
echo "Found searchindex.js conflict (acceptable)"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
echo "Found unacceptable conflict in: $file"
|
|
||||||
acceptable_conflicts=false
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ "$acceptable_conflicts" = false ]; then
|
|
||||||
echo "Cannot auto-resolve: conflicts found in files other than searchindex.js"
|
|
||||||
git merge --abort
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
if [ "$searchindex_conflict" = false ]; then
|
|
||||||
echo "No searchindex.js conflicts found, but merge failed for unknown reason"
|
|
||||||
git merge --abort
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "Resolving searchindex.js conflict by accepting $BASE_BRANCH version..."
|
|
||||||
|
|
||||||
# Accept the base branch version of searchindex.js (--theirs refers to the branch being merged in)
|
|
||||||
git checkout --theirs searchindex.js
|
|
||||||
git add searchindex.js
|
|
||||||
|
|
||||||
# Check if there are any other staged changes from the merge
|
|
||||||
staged_files=$(git diff --cached --name-only || true)
|
|
||||||
echo "Staged files after resolution: $staged_files"
|
|
||||||
|
|
||||||
# Complete the merge
|
|
||||||
if git commit --no-edit; then
|
|
||||||
echo "Successfully resolved merge conflicts"
|
|
||||||
|
|
||||||
# Push the updated branch
|
|
||||||
echo "Pushing resolved branch..."
|
|
||||||
if git push origin "$HEAD_BRANCH"; then
|
|
||||||
echo "Successfully pushed resolved branch"
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "Failed to push resolved branch"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
else
|
|
||||||
echo "Failed to commit merge resolution"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
File diff suppressed because one or more lines are too long
@ -3,6 +3,20 @@
|
|||||||
{{#include ../../banners/hacktricks-training.md}}
|
{{#include ../../banners/hacktricks-training.md}}
|
||||||
|
|
||||||
|
|
||||||
|
## iOS Exploit Mitigations
|
||||||
|
|
||||||
|
- **Code Signing** in iOS works by requiring every piece of executable code (apps, libraries, extensions, etc.) to be cryptographically signed with a certificate issued by Apple. When code is loaded, iOS verifies the digital signature against Apple’s trusted root. If the signature is invalid, missing, or modified, the OS refuses to run it. This prevents attackers from injecting malicious code into legitimate apps or running unsigned binaries, effectively stopping most exploit chains that rely on executing arbitrary or tampered code.
|
||||||
|
- **CoreTrust** is the iOS subsystem responsible for enforcing code signing at runtime. It directly verifies signatures using Apple’s root certificate without relying on cached trust stores, meaning only binaries signed by Apple (or with valid entitlements) can execute. CoreTrust ensures that even if an attacker tampers with an app after installation, modifies system libraries, or tries to load unsigned code, the system will block execution unless the code is still properly signed. This strict enforcement closes many post-exploitation vectors that older iOS versions allowed through weaker or bypassable signature checks.
|
||||||
|
- **Data Execution Prevention (DEP)** marks memory regions as non-executable unless they explicitly contain code. This stops attackers from injecting shellcode into data regions (like the stack or heap) and running it, forcing them to rely on more complex techniques like ROP (Return-Oriented Programming).
|
||||||
|
- **ASLR (Address Space Layout Randomization)** randomizes the memory addresses of code, libraries, stack, and heap every time the system runs. This makes it much harder for attackers to predict where useful instructions or gadgets are, breaking many exploit chains that depend on fixed memory layouts.
|
||||||
|
- **KASLR (Kernel ASLR)** applies the same randomization concept to the iOS kernel. By shuffling the kernel’s base address at each boot, it prevents attackers from reliably locating kernel functions or structures, raising the difficulty of kernel-level exploits that would otherwise gain full system control.
|
||||||
|
- **Kernel Patch Protection (KPP)** also known as **AMCC (Apple Mobile File Integrity)** in iOS, continuously monitors the kernel’s code pages to ensure they haven’t been modified. If any tampering is detected—such as an exploit trying to patch kernel functions or insert malicious code—the device will immediately panic and reboot. This protection makes persistent kernel exploits far harder, as attackers can’t simply hook or patch kernel instructions without triggering a system crash.
|
||||||
|
- **Kernel Text Readonly Region (KTRR)** is a hardware-based security feature introduced on iOS devices. It uses the CPU’s memory controller to mark the kernel’s code (text) section as permanently read-only after boot. Once locked, even the kernel itself cannot modify this memory region. This prevents attackers—and even privileged code—from patching kernel instructions at runtime, closing off a major class of exploits that relied on modifying kernel code directly.
|
||||||
|
- **Pointer Authentication Codes (PAC)** use cryptographic signatures embedded into unused bits of pointers to verify their integrity before use. When a pointer (like a return address or function pointer) is created, the CPU signs it with a secret key; before dereferencing, the CPU checks the signature. If the pointer was tampered with, the check fails and execution stops. This prevents attackers from forging or reusing corrupted pointers in memory corruption exploits, making techniques like ROP or JOP much harder to pull off reliably.
|
||||||
|
- **Privilege Access never (PAN)** is a hardware feature that prevents the kernel (privileged mode) from directly accessing user-space memory unless it explicitly enables access. This stops attackers who gained kernel code execution from easily reading or writing user memory to escalate exploits or steal sensitive data. By enforcing strict separation, PAN reduces the impact of kernel exploits and blocks many common privilege-escalation techniques.
|
||||||
|
- **Page Protection Layer (PPL)** is an iOS security mechanism that protects critical kernel-managed memory regions, especially those related to code signing and entitlements. It enforces strict write protections using the MMU (Memory Management Unit) and additional checks, ensuring that even privileged kernel code cannot arbitrarily modify sensitive pages. This prevents attackers who gain kernel-level execution from tampering with security-critical structures, making persistence and code-signing bypasses significantly harder.
|
||||||
|
|
||||||
|
|
||||||
## Physical use-after-free
|
## Physical use-after-free
|
||||||
|
|
||||||
This is a summary from the post from [https://alfiecg.uk/2024/09/24/Kernel-exploit.html](https://alfiecg.uk/2024/09/24/Kernel-exploit.html) moreover further information about exploit using this technique can be found in [https://github.com/felix-pb/kfd](https://github.com/felix-pb/kfd)
|
This is a summary from the post from [https://alfiecg.uk/2024/09/24/Kernel-exploit.html](https://alfiecg.uk/2024/09/24/Kernel-exploit.html) moreover further information about exploit using this technique can be found in [https://github.com/felix-pb/kfd](https://github.com/felix-pb/kfd)
|
||||||
|
@ -15,11 +15,13 @@ It's enough to **generate a list of the most probable phishing names** that an a
|
|||||||
|
|
||||||
### Finding suspicious domains
|
### Finding suspicious domains
|
||||||
|
|
||||||
For this purpose, you can use any of the following tools. Note that these tolls will also perform DNS requests automatically to check if the domain has any IP assigned to it:
|
For this purpose, you can use any of the following tools. Note that these tools will also perform DNS requests automatically to check if the domain has any IP assigned to it:
|
||||||
|
|
||||||
- [**dnstwist**](https://github.com/elceef/dnstwist)
|
- [**dnstwist**](https://github.com/elceef/dnstwist)
|
||||||
- [**urlcrazy**](https://github.com/urbanadventurer/urlcrazy)
|
- [**urlcrazy**](https://github.com/urbanadventurer/urlcrazy)
|
||||||
|
|
||||||
|
Tip: If you generate a candidate list, also feed it into your DNS resolver logs to detect **NXDOMAIN lookups from inside your org** (users trying to reach a typo before the attacker actually registers it). Sinkhole or pre-block these domains if policy allows.
|
||||||
|
|
||||||
### Bitflipping
|
### Bitflipping
|
||||||
|
|
||||||
**You can find a short the explanation of this technique in the parent page. Or read the original research in** [**https://www.bleepingcomputer.com/news/security/hijacking-traffic-to-microsoft-s-windowscom-with-bitflipping/**](https://www.bleepingcomputer.com/news/security/hijacking-traffic-to-microsoft-s-windowscom-with-bitflipping/)
|
**You can find a short the explanation of this technique in the parent page. Or read the original research in** [**https://www.bleepingcomputer.com/news/security/hijacking-traffic-to-microsoft-s-windowscom-with-bitflipping/**](https://www.bleepingcomputer.com/news/security/hijacking-traffic-to-microsoft-s-windowscom-with-bitflipping/)
|
||||||
@ -29,6 +31,12 @@ For example, a 1 bit modification in the domain microsoft.com can transform it i
|
|||||||
|
|
||||||
**All possible bit-flipping domain names should be also monitored.**
|
**All possible bit-flipping domain names should be also monitored.**
|
||||||
|
|
||||||
|
If you also need to consider homoglyph/IDN lookalikes (e.g., mixing Latin/Cyrillic characters), check:
|
||||||
|
|
||||||
|
{{#ref}}
|
||||||
|
homograph-attacks.md
|
||||||
|
{{#endref}}
|
||||||
|
|
||||||
### Basic checks
|
### Basic checks
|
||||||
|
|
||||||
Once you have a list of potential suspicious domain names you should **check** them (mainly the ports HTTP and HTTPS) to **see if they are using some login form similar** to someone of the victim's domain.\
|
Once you have a list of potential suspicious domain names you should **check** them (mainly the ports HTTP and HTTPS) to **see if they are using some login form similar** to someone of the victim's domain.\
|
||||||
@ -42,11 +50,78 @@ If you want to go one step further I would recommend you to **monitor those susp
|
|||||||
In order to **automate this** I would recommend having a list of login forms of the victim's domains, spider the suspicious web pages and comparing each login form found inside the suspicious domains with each login form of the victim's domain using something like `ssdeep`.\
|
In order to **automate this** I would recommend having a list of login forms of the victim's domains, spider the suspicious web pages and comparing each login form found inside the suspicious domains with each login form of the victim's domain using something like `ssdeep`.\
|
||||||
If you have located the login forms of the suspicious domains, you can try to **send junk credentials** and **check if it's redirecting you to the victim's domain**.
|
If you have located the login forms of the suspicious domains, you can try to **send junk credentials** and **check if it's redirecting you to the victim's domain**.
|
||||||
|
|
||||||
## Domain names using keywords
|
---
|
||||||
|
|
||||||
|
### Hunting by favicon and web fingerprints (Shodan/ZoomEye/Censys)
|
||||||
|
|
||||||
|
Many phishing kits reuse favicons from the brand they impersonate. Internet-wide scanners compute a MurmurHash3 of the base64-encoded favicon. You can generate the hash and pivot on it:
|
||||||
|
|
||||||
|
Python example (mmh3):
|
||||||
|
|
||||||
|
```python
|
||||||
|
import base64, requests, mmh3
|
||||||
|
url = "https://www.paypal.com/favicon.ico" # change to your brand icon
|
||||||
|
b64 = base64.encodebytes(requests.get(url, timeout=10).content)
|
||||||
|
print(mmh3.hash(b64)) # e.g., 309020573
|
||||||
|
```
|
||||||
|
|
||||||
|
- Query Shodan: `http.favicon.hash:309020573`
|
||||||
|
- With tooling: look at community tools like favfreak to generate hashes and dorks for Shodan/ZoomEye/Censys.
|
||||||
|
|
||||||
|
Notes
|
||||||
|
- Favicons are reused; treat matches as leads and validate content and certs before acting.
|
||||||
|
- Combine with domain-age and keyword heuristics for better precision.
|
||||||
|
|
||||||
|
### URL telemetry hunting (urlscan.io)
|
||||||
|
|
||||||
|
`urlscan.io` stores historical screenshots, DOM, requests and TLS metadata of submitted URLs. You can hunt for brand abuse and clones:
|
||||||
|
|
||||||
|
Example queries (UI or API):
|
||||||
|
- Find lookalikes excluding your legit domains: `page.domain:(/.*yourbrand.*/ AND NOT yourbrand.com AND NOT www.yourbrand.com)`
|
||||||
|
- Find sites hotlinking your assets: `domain:yourbrand.com AND NOT page.domain:yourbrand.com`
|
||||||
|
- Restrict to recent results: append `AND date:>now-7d`
|
||||||
|
|
||||||
|
API example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Search recent scans mentioning your brand
|
||||||
|
curl -s 'https://urlscan.io/api/v1/search/?q=page.domain:(/.*yourbrand.*/%20AND%20NOT%20yourbrand.com)%20AND%20date:>now-7d' \
|
||||||
|
-H 'API-Key: <YOUR_URLSCAN_KEY>' | jq '.results[].page.url'
|
||||||
|
```
|
||||||
|
|
||||||
|
From the JSON, pivot on:
|
||||||
|
- `page.tlsIssuer`, `page.tlsValidFrom`, `page.tlsAgeDays` to spot very new certs for lookalikes
|
||||||
|
- `task.source` values like `certstream-suspicious` to tie findings to CT monitoring
|
||||||
|
|
||||||
|
### Domain age via RDAP (scriptable)
|
||||||
|
|
||||||
|
RDAP returns machine-readable creation events. Useful to flag **newly registered domains (NRDs)**.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# .com/.net RDAP (Verisign)
|
||||||
|
curl -s https://rdap.verisign.com/com/v1/domain/suspicious-example.com | \
|
||||||
|
jq -r '.events[] | select(.eventAction=="registration") | .eventDate'
|
||||||
|
|
||||||
|
# Generic helper using rdap.net redirector
|
||||||
|
curl -s https://www.rdap.net/domain/suspicious-example.com | jq
|
||||||
|
```
|
||||||
|
|
||||||
|
Enrich your pipeline by tagging domains with registration age buckets (e.g., <7 days, <30 days) and prioritise triage accordingly.
|
||||||
|
|
||||||
|
### TLS/JAx fingerprints to spot AiTM infrastructure
|
||||||
|
|
||||||
|
Modern credential-phishing increasingly uses **Adversary-in-the-Middle (AiTM)** reverse proxies (e.g., Evilginx) to steal session tokens. You can add network-side detections:
|
||||||
|
|
||||||
|
- Log TLS/HTTP fingerprints (JA3/JA4/JA4S/JA4H) at egress. Some Evilginx builds have been observed with stable JA4 client/server values. Alert on known-bad fingerprints only as a weak signal and always confirm with content and domain intel.
|
||||||
|
- Proactively record TLS certificate metadata (issuer, SAN count, wildcard use, validity) for lookalike hosts discovered via CT or urlscan and correlate with DNS age and geolocation.
|
||||||
|
|
||||||
|
> Note: Treat fingerprints as enrichment, not as sole blockers; frameworks evolve and may randomise or obfuscate.
|
||||||
|
|
||||||
|
### Domain names using keywords
|
||||||
|
|
||||||
The parent page also mentions a domain name variation technique that consists of putting the **victim's domain name inside a bigger domain** (e.g. paypal-financial.com for paypal.com).
|
The parent page also mentions a domain name variation technique that consists of putting the **victim's domain name inside a bigger domain** (e.g. paypal-financial.com for paypal.com).
|
||||||
|
|
||||||
### Certificate Transparency
|
#### Certificate Transparency
|
||||||
|
|
||||||
It's not possible to take the previous "Brute-Force" approach but it's actually **possible to uncover such phishing attempts** also thanks to certificate transparency. Every time a certificate is emitted by a CA, the details are made public. This means that by reading the certificate transparency or even monitoring it, it's **possible to find domains that are using a keyword inside its name** For example, if an attacker generates a certificate of [https://paypal-financial.com](https://paypal-financial.com), seeing the certificate it's possible to find the keyword "paypal" and know that suspicious email is being used.
|
It's not possible to take the previous "Brute-Force" approach but it's actually **possible to uncover such phishing attempts** also thanks to certificate transparency. Every time a certificate is emitted by a CA, the details are made public. This means that by reading the certificate transparency or even monitoring it, it's **possible to find domains that are using a keyword inside its name** For example, if an attacker generates a certificate of [https://paypal-financial.com](https://paypal-financial.com), seeing the certificate it's possible to find the keyword "paypal" and know that suspicious email is being used.
|
||||||
|
|
||||||
@ -62,11 +137,17 @@ Using this last option you can even use the field Matching Identities to see if
|
|||||||
|
|
||||||
**Another alternative** is the fantastic project called [**CertStream**](https://medium.com/cali-dog-security/introducing-certstream-3fc13bb98067). CertStream provides a real-time stream of newly generated certificates which you can use to detect specified keywords in (near) real-time. In fact, there is a project called [**phishing_catcher**](https://github.com/x0rz/phishing_catcher) that does just that.
|
**Another alternative** is the fantastic project called [**CertStream**](https://medium.com/cali-dog-security/introducing-certstream-3fc13bb98067). CertStream provides a real-time stream of newly generated certificates which you can use to detect specified keywords in (near) real-time. In fact, there is a project called [**phishing_catcher**](https://github.com/x0rz/phishing_catcher) that does just that.
|
||||||
|
|
||||||
### **New domains**
|
Practical tip: when triaging CT hits, prioritise NRDs, untrusted/unknown registrars, privacy-proxy WHOIS, and certs with very recent `NotBefore` times. Maintain an allowlist of your owned domains/brands to reduce noise.
|
||||||
|
|
||||||
|
#### **New domains**
|
||||||
|
|
||||||
**One last alternative** is to gather a list of **newly registered domains** for some TLDs ([Whoxy](https://www.whoxy.com/newly-registered-domains/) provides such service) and **check the keywords in these domains**. However, long domains usually use one or more subdomains, therefore the keyword won't appear inside the FLD and you won't be able to find the phishing subdomain.
|
**One last alternative** is to gather a list of **newly registered domains** for some TLDs ([Whoxy](https://www.whoxy.com/newly-registered-domains/) provides such service) and **check the keywords in these domains**. However, long domains usually use one or more subdomains, therefore the keyword won't appear inside the FLD and you won't be able to find the phishing subdomain.
|
||||||
|
|
||||||
|
Additional heuristic: treat certain **file-extension TLDs** (e.g., `.zip`, `.mov`) with extra suspicion in alerting. These are commonly confused for filenames in lures; combine the TLD signal with brand keywords and NRD age for better precision.
|
||||||
|
|
||||||
|
## References
|
||||||
|
|
||||||
|
- urlscan.io – Search API reference: https://urlscan.io/docs/search/
|
||||||
|
- APNIC Blog – JA4+ network fingerprinting (includes Evilginx example): https://blog.apnic.net/2023/11/22/ja4-network-fingerprinting/
|
||||||
|
|
||||||
{{#include ../../banners/hacktricks-training.md}}
|
{{#include ../../banners/hacktricks-training.md}}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -24,13 +24,15 @@
|
|||||||
/* 2 — load a single index (remote → local) */
|
/* 2 — load a single index (remote → local) */
|
||||||
async function loadIndex(remote, local, isCloud=false){
|
async function loadIndex(remote, local, isCloud=false){
|
||||||
let rawLoaded = false;
|
let rawLoaded = false;
|
||||||
try {
|
if(remote){
|
||||||
const r = await fetch(remote,{mode:'cors'});
|
try {
|
||||||
if (!r.ok) throw new Error('HTTP '+r.status);
|
const r = await fetch(remote,{mode:'cors'});
|
||||||
importScripts(URL.createObjectURL(new Blob([await r.text()],{type:'application/javascript'})));
|
if (!r.ok) throw new Error('HTTP '+r.status);
|
||||||
rawLoaded = true;
|
importScripts(URL.createObjectURL(new Blob([await r.text()],{type:'application/javascript'})));
|
||||||
} catch(e){ console.warn('remote',remote,'failed →',e); }
|
rawLoaded = true;
|
||||||
if(!rawLoaded){
|
} catch(e){ console.warn('remote',remote,'failed →',e); }
|
||||||
|
}
|
||||||
|
if(!rawLoaded && local){
|
||||||
try { importScripts(abs(local)); rawLoaded = true; }
|
try { importScripts(abs(local)); rawLoaded = true; }
|
||||||
catch(e){ console.error('local',local,'failed →',e); }
|
catch(e){ console.error('local',local,'failed →',e); }
|
||||||
}
|
}
|
||||||
@ -40,13 +42,41 @@
|
|||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
async function loadWithFallback(remotes, local, isCloud=false){
|
||||||
|
if(remotes.length){
|
||||||
|
const [primary, ...secondary] = remotes;
|
||||||
|
const primaryData = await loadIndex(primary, null, isCloud);
|
||||||
|
if(primaryData) return primaryData;
|
||||||
|
|
||||||
|
if(local){
|
||||||
|
const localData = await loadIndex(null, local, isCloud);
|
||||||
|
if(localData) return localData;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const remote of secondary){
|
||||||
|
const data = await loadIndex(remote, null, isCloud);
|
||||||
|
if(data) return data;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return local ? loadIndex(null, local, isCloud) : null;
|
||||||
|
}
|
||||||
|
|
||||||
(async () => {
|
(async () => {
|
||||||
const MAIN_RAW = 'https://raw.githubusercontent.com/HackTricks-wiki/hacktricks/refs/heads/master/searchindex.js';
|
const htmlLang = (document.documentElement.lang || 'en').toLowerCase();
|
||||||
const CLOUD_RAW = 'https://raw.githubusercontent.com/HackTricks-wiki/hacktricks-cloud/refs/heads/master/searchindex.js';
|
const lang = htmlLang.split('-')[0];
|
||||||
|
const mainReleaseBase = 'https://github.com/HackTricks-wiki/hacktricks/releases/download';
|
||||||
|
const cloudReleaseBase = 'https://github.com/HackTricks-wiki/hacktricks-cloud/releases/download';
|
||||||
|
|
||||||
|
const mainTags = Array.from(new Set([`searchindex-${lang}`, 'searchindex-en', 'searchindex-master']));
|
||||||
|
const cloudTags = Array.from(new Set([`searchindex-${lang}`, 'searchindex-en', 'searchindex-master']));
|
||||||
|
|
||||||
|
const MAIN_REMOTE_SOURCES = mainTags.map(tag => `${mainReleaseBase}/${tag}/searchindex.js`);
|
||||||
|
const CLOUD_REMOTE_SOURCES = cloudTags.map(tag => `${cloudReleaseBase}/${tag}/searchindex.js`);
|
||||||
|
|
||||||
const indices = [];
|
const indices = [];
|
||||||
const main = await loadIndex(MAIN_RAW , '/searchindex.js', false); if(main) indices.push(main);
|
const main = await loadWithFallback(MAIN_REMOTE_SOURCES , '/searchindex.js', false); if(main) indices.push(main);
|
||||||
const cloud= await loadIndex(CLOUD_RAW, '/searchindex-cloud.js', true ); if(cloud) indices.push(cloud);
|
const cloud= await loadWithFallback(CLOUD_REMOTE_SOURCES, '/searchindex-cloud.js', true ); if(cloud) indices.push(cloud);
|
||||||
|
|
||||||
if(!indices.length){ postMessage({ready:false, error:'no-index'}); return; }
|
if(!indices.length){ postMessage({ready:false, error:'no-index'}); return; }
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user