2025-12-25 upload
67
venv/Lib/site-packages/mitmproxy/addons/__init__.py
Normal file
@@ -0,0 +1,67 @@
|
||||
from mitmproxy.addons import anticache
|
||||
from mitmproxy.addons import anticomp
|
||||
from mitmproxy.addons import block
|
||||
from mitmproxy.addons import blocklist
|
||||
from mitmproxy.addons import browser
|
||||
from mitmproxy.addons import clientplayback
|
||||
from mitmproxy.addons import command_history
|
||||
from mitmproxy.addons import comment
|
||||
from mitmproxy.addons import core
|
||||
from mitmproxy.addons import cut
|
||||
from mitmproxy.addons import disable_h2c
|
||||
from mitmproxy.addons import dns_resolver
|
||||
from mitmproxy.addons import export
|
||||
from mitmproxy.addons import maplocal
|
||||
from mitmproxy.addons import mapremote
|
||||
from mitmproxy.addons import modifybody
|
||||
from mitmproxy.addons import modifyheaders
|
||||
from mitmproxy.addons import next_layer
|
||||
from mitmproxy.addons import onboarding
|
||||
from mitmproxy.addons import proxyauth
|
||||
from mitmproxy.addons import proxyserver
|
||||
from mitmproxy.addons import save
|
||||
from mitmproxy.addons import savehar
|
||||
from mitmproxy.addons import script
|
||||
from mitmproxy.addons import serverplayback
|
||||
from mitmproxy.addons import stickyauth
|
||||
from mitmproxy.addons import stickycookie
|
||||
from mitmproxy.addons import strip_dns_https_records
|
||||
from mitmproxy.addons import tlsconfig
|
||||
from mitmproxy.addons import update_alt_svc
|
||||
from mitmproxy.addons import upstream_auth
|
||||
|
||||
|
||||
def default_addons():
|
||||
return [
|
||||
core.Core(),
|
||||
browser.Browser(),
|
||||
block.Block(),
|
||||
strip_dns_https_records.StripDnsHttpsRecords(),
|
||||
blocklist.BlockList(),
|
||||
anticache.AntiCache(),
|
||||
anticomp.AntiComp(),
|
||||
clientplayback.ClientPlayback(),
|
||||
command_history.CommandHistory(),
|
||||
comment.Comment(),
|
||||
cut.Cut(),
|
||||
disable_h2c.DisableH2C(),
|
||||
export.Export(),
|
||||
onboarding.Onboarding(),
|
||||
proxyauth.ProxyAuth(),
|
||||
proxyserver.Proxyserver(),
|
||||
script.ScriptLoader(),
|
||||
dns_resolver.DnsResolver(),
|
||||
next_layer.NextLayer(),
|
||||
serverplayback.ServerPlayback(),
|
||||
mapremote.MapRemote(),
|
||||
maplocal.MapLocal(),
|
||||
modifybody.ModifyBody(),
|
||||
modifyheaders.ModifyHeaders(),
|
||||
stickyauth.StickyAuth(),
|
||||
stickycookie.StickyCookie(),
|
||||
save.Save(),
|
||||
savehar.SaveHar(),
|
||||
tlsconfig.TlsConfig(),
|
||||
upstream_auth.UpstreamAuth(),
|
||||
update_alt_svc.UpdateAltSvc(),
|
||||
]
|
||||
18
venv/Lib/site-packages/mitmproxy/addons/anticache.py
Normal file
@@ -0,0 +1,18 @@
|
||||
from mitmproxy import ctx
|
||||
|
||||
|
||||
class AntiCache:
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"anticache",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
Strip out request headers that might cause the server to return
|
||||
304-not-modified.
|
||||
""",
|
||||
)
|
||||
|
||||
def request(self, flow):
|
||||
if ctx.options.anticache:
|
||||
flow.request.anticache()
|
||||
15
venv/Lib/site-packages/mitmproxy/addons/anticomp.py
Normal file
@@ -0,0 +1,15 @@
|
||||
from mitmproxy import ctx
|
||||
|
||||
|
||||
class AntiComp:
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"anticomp",
|
||||
bool,
|
||||
False,
|
||||
"Try to convince servers to send us un-compressed data.",
|
||||
)
|
||||
|
||||
def request(self, flow):
|
||||
if ctx.options.anticomp:
|
||||
flow.request.anticomp()
|
||||
144
venv/Lib/site-packages/mitmproxy/addons/asgiapp.py
Normal file
@@ -0,0 +1,144 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import urllib.parse
|
||||
|
||||
import asgiref.compatibility
|
||||
import asgiref.wsgi
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import http
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ASGIApp:
|
||||
"""
|
||||
An addon that hosts an ASGI/WSGI HTTP app within mitmproxy, at a specified hostname and port.
|
||||
|
||||
Some important caveats:
|
||||
- This implementation will block and wait until the entire HTTP response is completed before sending out data.
|
||||
- It currently only implements the HTTP protocol (Lifespan and WebSocket are unimplemented).
|
||||
"""
|
||||
|
||||
def __init__(self, asgi_app, host: str, port: int | None):
|
||||
asgi_app = asgiref.compatibility.guarantee_single_callable(asgi_app)
|
||||
self.asgi_app, self.host, self.port = asgi_app, host, port
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return f"asgiapp:{self.host}:{self.port}"
|
||||
|
||||
def should_serve(self, flow: http.HTTPFlow) -> bool:
|
||||
return bool(
|
||||
flow.request.pretty_host == self.host
|
||||
and (self.port is None or flow.request.port == self.port)
|
||||
and flow.live
|
||||
and not flow.error
|
||||
and not flow.response
|
||||
)
|
||||
|
||||
async def request(self, flow: http.HTTPFlow) -> None:
|
||||
if self.should_serve(flow):
|
||||
await serve(self.asgi_app, flow)
|
||||
|
||||
|
||||
class WSGIApp(ASGIApp):
|
||||
def __init__(self, wsgi_app, host: str, port: int | None):
|
||||
asgi_app = asgiref.wsgi.WsgiToAsgi(wsgi_app)
|
||||
super().__init__(asgi_app, host, port)
|
||||
|
||||
|
||||
HTTP_VERSION_MAP = {
|
||||
"HTTP/1.0": "1.0",
|
||||
"HTTP/1.1": "1.1",
|
||||
"HTTP/2.0": "2",
|
||||
}
|
||||
|
||||
|
||||
def make_scope(flow: http.HTTPFlow) -> dict:
|
||||
# %3F is a quoted question mark
|
||||
quoted_path = urllib.parse.quote_from_bytes(flow.request.data.path).split(
|
||||
"%3F", maxsplit=1
|
||||
)
|
||||
|
||||
# (Unicode string) – HTTP request target excluding any query string, with percent-encoded
|
||||
# sequences and UTF-8 byte sequences decoded into characters.
|
||||
path = quoted_path[0]
|
||||
|
||||
# (byte string) – URL portion after the ?, percent-encoded.
|
||||
query_string: bytes
|
||||
if len(quoted_path) > 1:
|
||||
query_string = urllib.parse.unquote(quoted_path[1]).encode()
|
||||
else:
|
||||
query_string = b""
|
||||
|
||||
return {
|
||||
"type": "http",
|
||||
"asgi": {
|
||||
"version": "3.0",
|
||||
"spec_version": "2.1",
|
||||
},
|
||||
"http_version": HTTP_VERSION_MAP.get(flow.request.http_version, "1.1"),
|
||||
"method": flow.request.method,
|
||||
"scheme": flow.request.scheme.upper(),
|
||||
"path": path,
|
||||
"raw_path": flow.request.path,
|
||||
"query_string": query_string,
|
||||
"headers": [
|
||||
(name.lower(), value) for (name, value) in flow.request.headers.fields
|
||||
],
|
||||
"client": flow.client_conn.peername,
|
||||
"extensions": {
|
||||
"mitmproxy.master": ctx.master,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
async def serve(app, flow: http.HTTPFlow):
|
||||
"""
|
||||
Serves app on flow.
|
||||
"""
|
||||
|
||||
scope = make_scope(flow)
|
||||
done = asyncio.Event()
|
||||
received_body = False
|
||||
sent_response = False
|
||||
|
||||
async def receive():
|
||||
nonlocal received_body
|
||||
if not received_body:
|
||||
received_body = True
|
||||
return {
|
||||
"type": "http.request",
|
||||
"body": flow.request.raw_content,
|
||||
}
|
||||
else: # pragma: no cover
|
||||
# We really don't expect this to be called a second time, but what to do?
|
||||
# We just wait until the request is done before we continue here with sending a disconnect.
|
||||
await done.wait()
|
||||
return {"type": "http.disconnect"}
|
||||
|
||||
async def send(event):
|
||||
if event["type"] == "http.response.start":
|
||||
flow.response = http.Response.make(
|
||||
event["status"], b"", event.get("headers", [])
|
||||
)
|
||||
flow.response.decode()
|
||||
elif event["type"] == "http.response.body":
|
||||
assert flow.response
|
||||
flow.response.content += event.get("body", b"")
|
||||
if not event.get("more_body", False):
|
||||
nonlocal sent_response
|
||||
sent_response = True
|
||||
else:
|
||||
raise AssertionError(f"Unexpected event: {event['type']}")
|
||||
|
||||
try:
|
||||
await app(scope, receive, send)
|
||||
if not sent_response:
|
||||
raise RuntimeError(f"no response sent.")
|
||||
except Exception as e:
|
||||
logger.exception(f"Error in asgi app: {e}")
|
||||
flow.response = http.Response.make(500, b"ASGI Error.")
|
||||
finally:
|
||||
done.set()
|
||||
48
venv/Lib/site-packages/mitmproxy/addons/block.py
Normal file
@@ -0,0 +1,48 @@
|
||||
import ipaddress
|
||||
import logging
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy.proxy import mode_specs
|
||||
|
||||
|
||||
class Block:
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"block_global",
|
||||
bool,
|
||||
True,
|
||||
"""
|
||||
Block connections from public IP addresses.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"block_private",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
Block connections from local (private) IP addresses.
|
||||
This option does not affect loopback addresses (connections from the local machine),
|
||||
which are always permitted.
|
||||
""",
|
||||
)
|
||||
|
||||
def client_connected(self, client):
|
||||
parts = client.peername[0].rsplit("%", 1)
|
||||
address = ipaddress.ip_address(parts[0])
|
||||
if isinstance(address, ipaddress.IPv6Address):
|
||||
address = address.ipv4_mapped or address
|
||||
|
||||
if address.is_loopback or isinstance(client.proxy_mode, mode_specs.LocalMode):
|
||||
return
|
||||
|
||||
if ctx.options.block_private and address.is_private:
|
||||
logging.warning(
|
||||
f"Client connection from {client.peername[0]} killed by block_private option."
|
||||
)
|
||||
client.error = "Connection killed by block_private."
|
||||
|
||||
if ctx.options.block_global and address.is_global:
|
||||
logging.warning(
|
||||
f"Client connection from {client.peername[0]} killed by block_global option."
|
||||
)
|
||||
client.error = "Connection killed by block_global."
|
||||
81
venv/Lib/site-packages/mitmproxy/addons/blocklist.py
Normal file
@@ -0,0 +1,81 @@
|
||||
from collections.abc import Sequence
|
||||
from typing import NamedTuple
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import http
|
||||
from mitmproxy import version
|
||||
from mitmproxy.net.http.status_codes import NO_RESPONSE
|
||||
|
||||
|
||||
class BlockSpec(NamedTuple):
|
||||
matches: flowfilter.TFilter
|
||||
status_code: int
|
||||
|
||||
|
||||
def parse_spec(option: str) -> BlockSpec:
|
||||
"""
|
||||
Parses strings in the following format, enforces number of segments:
|
||||
|
||||
/flow-filter/status
|
||||
|
||||
"""
|
||||
sep, rem = option[0], option[1:]
|
||||
|
||||
parts = rem.split(sep, 2)
|
||||
if len(parts) != 2:
|
||||
raise ValueError("Invalid number of parameters (2 are expected)")
|
||||
flow_patt, status = parts
|
||||
try:
|
||||
status_code = int(status)
|
||||
except ValueError:
|
||||
raise ValueError(f"Invalid HTTP status code: {status}")
|
||||
flow_filter = flowfilter.parse(flow_patt)
|
||||
|
||||
return BlockSpec(matches=flow_filter, status_code=status_code)
|
||||
|
||||
|
||||
class BlockList:
|
||||
def __init__(self) -> None:
|
||||
self.items: list[BlockSpec] = []
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"block_list",
|
||||
Sequence[str],
|
||||
[],
|
||||
"""
|
||||
Block matching requests and return an empty response with the specified HTTP status.
|
||||
Option syntax is "/flow-filter/status-code", where flow-filter describes
|
||||
which requests this rule should be applied to and status-code is the HTTP status code to return for
|
||||
blocked requests. The separator ("/" in the example) can be any character.
|
||||
Setting a non-standard status code of 444 will close the connection without sending a response.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "block_list" in updated:
|
||||
self.items = []
|
||||
for option in ctx.options.block_list:
|
||||
try:
|
||||
spec = parse_spec(option)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(
|
||||
f"Cannot parse block_list option {option}: {e}"
|
||||
) from e
|
||||
self.items.append(spec)
|
||||
|
||||
def request(self, flow: http.HTTPFlow) -> None:
|
||||
if flow.response or flow.error or not flow.live:
|
||||
return
|
||||
|
||||
for spec in self.items:
|
||||
if spec.matches(flow):
|
||||
flow.metadata["blocklisted"] = True
|
||||
if spec.status_code == NO_RESPONSE:
|
||||
flow.kill()
|
||||
else:
|
||||
flow.response = http.Response.make(
|
||||
spec.status_code, headers={"Server": version.MITMPROXY}
|
||||
)
|
||||
186
venv/Lib/site-packages/mitmproxy/addons/browser.py
Normal file
@@ -0,0 +1,186 @@
|
||||
import logging
|
||||
import shutil
|
||||
import subprocess
|
||||
import tempfile
|
||||
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy.log import ALERT
|
||||
|
||||
|
||||
def find_executable_cmd(*search_paths) -> list[str] | None:
|
||||
for browser in search_paths:
|
||||
if shutil.which(browser):
|
||||
return [browser]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def find_flatpak_cmd(*search_paths) -> list[str] | None:
|
||||
if shutil.which("flatpak"):
|
||||
for browser in search_paths:
|
||||
if (
|
||||
subprocess.run(
|
||||
["flatpak", "info", browser],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
).returncode
|
||||
== 0
|
||||
):
|
||||
return ["flatpak", "run", "-p", browser]
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class Browser:
|
||||
browser: list[subprocess.Popen] = []
|
||||
tdir: list[tempfile.TemporaryDirectory] = []
|
||||
|
||||
@command.command("browser.start")
|
||||
def start(self, browser: str = "chrome") -> None:
|
||||
if len(self.browser) > 0:
|
||||
logging.log(ALERT, "Starting additional browser")
|
||||
|
||||
if browser in ("chrome", "chromium"):
|
||||
self.launch_chrome()
|
||||
elif browser == "firefox":
|
||||
self.launch_firefox()
|
||||
else:
|
||||
logging.log(ALERT, "Invalid browser name.")
|
||||
|
||||
def launch_chrome(self) -> None:
|
||||
"""
|
||||
Start an isolated instance of Chrome that points to the currently
|
||||
running proxy.
|
||||
"""
|
||||
cmd = find_executable_cmd(
|
||||
"/Applications/Google Chrome.app/Contents/MacOS/Google Chrome",
|
||||
# https://stackoverflow.com/questions/40674914/google-chrome-path-in-windows-10
|
||||
r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe",
|
||||
r"C:\Program Files (x86)\Google\Application\chrome.exe",
|
||||
# Linux binary names from Python's webbrowser module.
|
||||
"google-chrome",
|
||||
"google-chrome-stable",
|
||||
"chrome",
|
||||
"chromium",
|
||||
"chromium-browser",
|
||||
"google-chrome-unstable",
|
||||
) or find_flatpak_cmd(
|
||||
"com.google.Chrome",
|
||||
"org.chromium.Chromium",
|
||||
"com.github.Eloston.UngoogledChromium",
|
||||
"com.google.ChromeDev",
|
||||
)
|
||||
|
||||
if not cmd:
|
||||
logging.log(
|
||||
ALERT, "Your platform is not supported yet - please submit a patch."
|
||||
)
|
||||
return
|
||||
|
||||
tdir = tempfile.TemporaryDirectory()
|
||||
self.tdir.append(tdir)
|
||||
self.browser.append(
|
||||
subprocess.Popen(
|
||||
[
|
||||
*cmd,
|
||||
"--user-data-dir=%s" % str(tdir.name),
|
||||
"--proxy-server={}:{}".format(
|
||||
ctx.options.listen_host or "127.0.0.1",
|
||||
ctx.options.listen_port or "8080",
|
||||
),
|
||||
"--disable-fre",
|
||||
"--no-default-browser-check",
|
||||
"--no-first-run",
|
||||
"--disable-extensions",
|
||||
"about:blank",
|
||||
],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
)
|
||||
|
||||
def launch_firefox(self) -> None:
|
||||
"""
|
||||
Start an isolated instance of Firefox that points to the currently
|
||||
running proxy.
|
||||
"""
|
||||
cmd = find_executable_cmd(
|
||||
"/Applications/Firefox.app/Contents/MacOS/firefox",
|
||||
r"C:\Program Files\Mozilla Firefox\firefox.exe",
|
||||
"firefox",
|
||||
"mozilla-firefox",
|
||||
"mozilla",
|
||||
) or find_flatpak_cmd("org.mozilla.firefox")
|
||||
|
||||
if not cmd:
|
||||
logging.log(
|
||||
ALERT, "Your platform is not supported yet - please submit a patch."
|
||||
)
|
||||
return
|
||||
|
||||
host = ctx.options.listen_host or "127.0.0.1"
|
||||
port = ctx.options.listen_port or 8080
|
||||
prefs = [
|
||||
'user_pref("datareporting.policy.firstRunURL", "");',
|
||||
'user_pref("network.proxy.type", 1);',
|
||||
'user_pref("network.proxy.share_proxy_settings", true);',
|
||||
'user_pref("datareporting.healthreport.uploadEnabled", false);',
|
||||
'user_pref("app.normandy.enabled", false);',
|
||||
'user_pref("app.update.auto", false);',
|
||||
'user_pref("app.update.enabled", false);',
|
||||
'user_pref("app.update.autoInstallEnabled", false);',
|
||||
'user_pref("app.shield.optoutstudies.enabled", false);'
|
||||
'user_pref("extensions.blocklist.enabled", false);',
|
||||
'user_pref("browser.safebrowsing.downloads.remote.enabled", false);',
|
||||
'user_pref("browser.region.network.url", "");',
|
||||
'user_pref("browser.region.update.enabled", false);',
|
||||
'user_pref("browser.region.local-geocoding", false);',
|
||||
'user_pref("extensions.pocket.enabled", false);',
|
||||
'user_pref("network.captive-portal-service.enabled", false);',
|
||||
'user_pref("network.connectivity-service.enabled", false);',
|
||||
'user_pref("toolkit.telemetry.server", "");',
|
||||
'user_pref("dom.push.serverURL", "");',
|
||||
'user_pref("services.settings.enabled", false);',
|
||||
'user_pref("browser.newtab.preload", false);',
|
||||
'user_pref("browser.safebrowsing.provider.google4.updateURL", "");',
|
||||
'user_pref("browser.safebrowsing.provider.mozilla.updateURL", "");',
|
||||
'user_pref("browser.newtabpage.activity-stream.feeds.topsites", false);',
|
||||
'user_pref("browser.newtabpage.activity-stream.default.sites", "");',
|
||||
'user_pref("browser.newtabpage.activity-stream.showSponsoredTopSites", false);',
|
||||
'user_pref("browser.bookmarks.restore_default_bookmarks", false);',
|
||||
'user_pref("browser.bookmarks.file", "");',
|
||||
]
|
||||
for service in ("http", "ssl"):
|
||||
prefs += [
|
||||
f'user_pref("network.proxy.{service}", "{host}");',
|
||||
f'user_pref("network.proxy.{service}_port", {port});',
|
||||
]
|
||||
|
||||
tdir = tempfile.TemporaryDirectory()
|
||||
|
||||
with open(tdir.name + "/prefs.js", "w") as file:
|
||||
file.writelines(prefs)
|
||||
|
||||
self.tdir.append(tdir)
|
||||
self.browser.append(
|
||||
subprocess.Popen(
|
||||
[
|
||||
*cmd,
|
||||
"--profile",
|
||||
str(tdir.name),
|
||||
"--new-window",
|
||||
"about:blank",
|
||||
],
|
||||
stdout=subprocess.DEVNULL,
|
||||
stderr=subprocess.DEVNULL,
|
||||
)
|
||||
)
|
||||
|
||||
def done(self):
|
||||
for browser in self.browser:
|
||||
browser.kill()
|
||||
for tdir in self.tdir:
|
||||
tdir.cleanup()
|
||||
self.browser = []
|
||||
self.tdir = []
|
||||
298
venv/Lib/site-packages/mitmproxy/addons/clientplayback.py
Normal file
@@ -0,0 +1,298 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import time
|
||||
from collections.abc import Sequence
|
||||
from types import TracebackType
|
||||
from typing import cast
|
||||
from typing import Literal
|
||||
|
||||
import mitmproxy.types
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import http
|
||||
from mitmproxy import io
|
||||
from mitmproxy.connection import ConnectionState
|
||||
from mitmproxy.connection import Server
|
||||
from mitmproxy.hooks import UpdateHook
|
||||
from mitmproxy.log import ALERT
|
||||
from mitmproxy.options import Options
|
||||
from mitmproxy.proxy import commands
|
||||
from mitmproxy.proxy import events
|
||||
from mitmproxy.proxy import layers
|
||||
from mitmproxy.proxy import server
|
||||
from mitmproxy.proxy.context import Context
|
||||
from mitmproxy.proxy.layer import CommandGenerator
|
||||
from mitmproxy.proxy.layers.http import HTTPMode
|
||||
from mitmproxy.proxy.mode_specs import UpstreamMode
|
||||
from mitmproxy.utils import asyncio_utils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MockServer(layers.http.HttpConnection):
|
||||
"""
|
||||
A mock HTTP "server" that just pretends it received a full HTTP request,
|
||||
which is then processed by the proxy core.
|
||||
"""
|
||||
|
||||
flow: http.HTTPFlow
|
||||
|
||||
def __init__(self, flow: http.HTTPFlow, context: Context):
|
||||
super().__init__(context, context.client)
|
||||
self.flow = flow
|
||||
|
||||
def _handle_event(self, event: events.Event) -> CommandGenerator[None]:
|
||||
if isinstance(event, events.Start):
|
||||
content = self.flow.request.raw_content
|
||||
self.flow.request.timestamp_start = self.flow.request.timestamp_end = (
|
||||
time.time()
|
||||
)
|
||||
yield layers.http.ReceiveHttp(
|
||||
layers.http.RequestHeaders(
|
||||
1,
|
||||
self.flow.request,
|
||||
end_stream=not (content or self.flow.request.trailers),
|
||||
replay_flow=self.flow,
|
||||
)
|
||||
)
|
||||
if content:
|
||||
yield layers.http.ReceiveHttp(layers.http.RequestData(1, content))
|
||||
if self.flow.request.trailers: # pragma: no cover
|
||||
# TODO: Cover this once we support HTTP/1 trailers.
|
||||
yield layers.http.ReceiveHttp(
|
||||
layers.http.RequestTrailers(1, self.flow.request.trailers)
|
||||
)
|
||||
yield layers.http.ReceiveHttp(layers.http.RequestEndOfMessage(1))
|
||||
elif isinstance(
|
||||
event,
|
||||
(
|
||||
layers.http.ResponseHeaders,
|
||||
layers.http.ResponseData,
|
||||
layers.http.ResponseTrailers,
|
||||
layers.http.ResponseEndOfMessage,
|
||||
layers.http.ResponseProtocolError,
|
||||
),
|
||||
):
|
||||
pass
|
||||
else: # pragma: no cover
|
||||
logger.warning(f"Unexpected event during replay: {event}")
|
||||
|
||||
|
||||
class ReplayHandler(server.ConnectionHandler):
|
||||
layer: layers.HttpLayer
|
||||
|
||||
def __init__(self, flow: http.HTTPFlow, options: Options) -> None:
|
||||
client = flow.client_conn.copy()
|
||||
client.state = ConnectionState.OPEN
|
||||
|
||||
context = Context(client, options)
|
||||
context.server = Server(address=(flow.request.host, flow.request.port))
|
||||
if flow.request.scheme == "https":
|
||||
context.server.tls = True
|
||||
context.server.sni = flow.request.pretty_host
|
||||
if options.mode and options.mode[0].startswith("upstream:"):
|
||||
mode = UpstreamMode.parse(options.mode[0])
|
||||
assert isinstance(mode, UpstreamMode) # remove once mypy supports Self.
|
||||
context.server.via = flow.server_conn.via = (mode.scheme, mode.address)
|
||||
|
||||
super().__init__(context)
|
||||
|
||||
if options.mode and options.mode[0].startswith("upstream:"):
|
||||
self.layer = layers.HttpLayer(context, HTTPMode.upstream)
|
||||
else:
|
||||
self.layer = layers.HttpLayer(context, HTTPMode.transparent)
|
||||
self.layer.connections[client] = MockServer(flow, context.fork())
|
||||
self.flow = flow
|
||||
self.done = asyncio.Event()
|
||||
|
||||
async def replay(self) -> None:
|
||||
await self.server_event(events.Start())
|
||||
await self.done.wait()
|
||||
|
||||
def log(
|
||||
self,
|
||||
message: str,
|
||||
level: int = logging.INFO,
|
||||
exc_info: Literal[True]
|
||||
| tuple[type[BaseException] | None, BaseException | None, TracebackType | None]
|
||||
| None = None,
|
||||
) -> None:
|
||||
assert isinstance(level, int)
|
||||
logger.log(level=level, msg=f"[replay] {message}")
|
||||
|
||||
async def handle_hook(self, hook: commands.StartHook) -> None:
|
||||
(data,) = hook.args()
|
||||
await ctx.master.addons.handle_lifecycle(hook)
|
||||
if isinstance(data, flow.Flow):
|
||||
await data.wait_for_resume()
|
||||
if isinstance(hook, (layers.http.HttpResponseHook, layers.http.HttpErrorHook)):
|
||||
if self.transports:
|
||||
# close server connections
|
||||
for x in self.transports.values():
|
||||
if x.handler:
|
||||
x.handler.cancel()
|
||||
await asyncio.wait(
|
||||
[x.handler for x in self.transports.values() if x.handler]
|
||||
)
|
||||
# signal completion
|
||||
self.done.set()
|
||||
|
||||
|
||||
class ClientPlayback:
|
||||
playback_task: asyncio.Task | None = None
|
||||
inflight: http.HTTPFlow | None
|
||||
queue: asyncio.Queue
|
||||
options: Options
|
||||
replay_tasks: set[asyncio.Task]
|
||||
|
||||
def __init__(self):
|
||||
self.queue = asyncio.Queue()
|
||||
self.inflight = None
|
||||
self.task = None
|
||||
self.replay_tasks = set()
|
||||
|
||||
def running(self):
|
||||
self.options = ctx.options
|
||||
self.playback_task = asyncio_utils.create_task(
|
||||
self.playback(),
|
||||
name="client playback",
|
||||
keep_ref=False,
|
||||
)
|
||||
|
||||
async def done(self):
|
||||
if self.playback_task:
|
||||
self.playback_task.cancel()
|
||||
try:
|
||||
await self.playback_task
|
||||
except asyncio.CancelledError:
|
||||
pass
|
||||
|
||||
async def playback(self):
|
||||
while True:
|
||||
self.inflight = await self.queue.get()
|
||||
try:
|
||||
assert self.inflight
|
||||
h = ReplayHandler(self.inflight, self.options)
|
||||
if ctx.options.client_replay_concurrency == -1:
|
||||
t = asyncio_utils.create_task(
|
||||
h.replay(),
|
||||
name="client playback awaiting response",
|
||||
keep_ref=False,
|
||||
)
|
||||
# keep a reference so this is not garbage collected
|
||||
self.replay_tasks.add(t)
|
||||
t.add_done_callback(self.replay_tasks.remove)
|
||||
else:
|
||||
await h.replay()
|
||||
except Exception:
|
||||
logger.exception(f"Client replay has crashed!")
|
||||
self.queue.task_done()
|
||||
self.inflight = None
|
||||
|
||||
def check(self, f: flow.Flow) -> str | None:
|
||||
if f.live or f == self.inflight:
|
||||
return "Can't replay live flow."
|
||||
if f.intercepted:
|
||||
return "Can't replay intercepted flow."
|
||||
if isinstance(f, http.HTTPFlow):
|
||||
if not f.request:
|
||||
return "Can't replay flow with missing request."
|
||||
if f.request.raw_content is None:
|
||||
return "Can't replay flow with missing content."
|
||||
if f.websocket is not None:
|
||||
return "Can't replay WebSocket flows."
|
||||
else:
|
||||
return "Can only replay HTTP flows."
|
||||
return None
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"client_replay",
|
||||
Sequence[str],
|
||||
[],
|
||||
"Replay client requests from a saved file.",
|
||||
)
|
||||
loader.add_option(
|
||||
"client_replay_concurrency",
|
||||
int,
|
||||
1,
|
||||
"Concurrency limit on in-flight client replay requests. Currently the only valid values are 1 and -1 (no limit).",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "client_replay" in updated and ctx.options.client_replay:
|
||||
try:
|
||||
flows = io.read_flows_from_paths(ctx.options.client_replay)
|
||||
except exceptions.FlowReadException as e:
|
||||
raise exceptions.OptionsError(str(e))
|
||||
self.start_replay(flows)
|
||||
|
||||
if "client_replay_concurrency" in updated:
|
||||
if ctx.options.client_replay_concurrency not in [-1, 1]:
|
||||
raise exceptions.OptionsError(
|
||||
"Currently the only valid client_replay_concurrency values are -1 and 1."
|
||||
)
|
||||
|
||||
@command.command("replay.client.count")
|
||||
def count(self) -> int:
|
||||
"""
|
||||
Approximate number of flows queued for replay.
|
||||
"""
|
||||
return self.queue.qsize() + int(bool(self.inflight))
|
||||
|
||||
@command.command("replay.client.stop")
|
||||
def stop_replay(self) -> None:
|
||||
"""
|
||||
Clear the replay queue.
|
||||
"""
|
||||
updated = []
|
||||
while True:
|
||||
try:
|
||||
f = self.queue.get_nowait()
|
||||
except asyncio.QueueEmpty:
|
||||
break
|
||||
else:
|
||||
self.queue.task_done()
|
||||
f.revert()
|
||||
updated.append(f)
|
||||
|
||||
ctx.master.addons.trigger(UpdateHook(updated))
|
||||
logger.log(ALERT, "Client replay queue cleared.")
|
||||
|
||||
@command.command("replay.client")
|
||||
def start_replay(self, flows: Sequence[flow.Flow]) -> None:
|
||||
"""
|
||||
Add flows to the replay queue, skipping flows that can't be replayed.
|
||||
"""
|
||||
updated: list[http.HTTPFlow] = []
|
||||
for f in flows:
|
||||
err = self.check(f)
|
||||
if err:
|
||||
logger.warning(err)
|
||||
continue
|
||||
|
||||
http_flow = cast(http.HTTPFlow, f)
|
||||
|
||||
# Prepare the flow for replay
|
||||
http_flow.backup()
|
||||
http_flow.is_replay = "request"
|
||||
http_flow.response = None
|
||||
http_flow.error = None
|
||||
self.queue.put_nowait(http_flow)
|
||||
updated.append(http_flow)
|
||||
ctx.master.addons.trigger(UpdateHook(updated))
|
||||
|
||||
@command.command("replay.client.file")
|
||||
def load_file(self, path: mitmproxy.types.Path) -> None:
|
||||
"""
|
||||
Load flows from file, and add them to the replay queue.
|
||||
"""
|
||||
try:
|
||||
flows = io.read_flows_from_paths([path])
|
||||
except exceptions.FlowReadException as e:
|
||||
raise exceptions.CommandError(str(e))
|
||||
self.start_replay(flows)
|
||||
96
venv/Lib/site-packages/mitmproxy/addons/command_history.py
Normal file
@@ -0,0 +1,96 @@
|
||||
import logging
|
||||
import os
|
||||
import pathlib
|
||||
from collections.abc import Sequence
|
||||
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
|
||||
|
||||
class CommandHistory:
|
||||
VACUUM_SIZE = 1024
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.history: list[str] = []
|
||||
self.filtered_history: list[str] = [""]
|
||||
self.current_index: int = 0
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"command_history",
|
||||
bool,
|
||||
True,
|
||||
"""Persist command history between mitmproxy invocations.""",
|
||||
)
|
||||
|
||||
@property
|
||||
def history_file(self) -> pathlib.Path:
|
||||
return pathlib.Path(os.path.expanduser(ctx.options.confdir)) / "command_history"
|
||||
|
||||
def running(self):
|
||||
# FIXME: We have a weird bug where the contract for configure is not followed and it is never called with
|
||||
# confdir or command_history as updated.
|
||||
self.configure("command_history") # pragma: no cover
|
||||
|
||||
def configure(self, updated):
|
||||
if "command_history" in updated or "confdir" in updated:
|
||||
if ctx.options.command_history and self.history_file.is_file():
|
||||
self.history = self.history_file.read_text().splitlines()
|
||||
self.set_filter("")
|
||||
|
||||
def done(self):
|
||||
if ctx.options.command_history and len(self.history) >= self.VACUUM_SIZE:
|
||||
# vacuum history so that it doesn't grow indefinitely.
|
||||
history_str = "\n".join(self.history[-self.VACUUM_SIZE // 2 :]) + "\n"
|
||||
try:
|
||||
self.history_file.write_text(history_str)
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed writing to {self.history_file}: {e}")
|
||||
|
||||
@command.command("commands.history.add")
|
||||
def add_command(self, command: str) -> None:
|
||||
if not command.strip():
|
||||
return
|
||||
|
||||
self.history.append(command)
|
||||
if ctx.options.command_history:
|
||||
try:
|
||||
with self.history_file.open("a") as f:
|
||||
f.write(f"{command}\n")
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed writing to {self.history_file}: {e}")
|
||||
|
||||
self.set_filter("")
|
||||
|
||||
@command.command("commands.history.get")
|
||||
def get_history(self) -> Sequence[str]:
|
||||
"""Get the entire command history."""
|
||||
return self.history.copy()
|
||||
|
||||
@command.command("commands.history.clear")
|
||||
def clear_history(self):
|
||||
if self.history_file.exists():
|
||||
try:
|
||||
self.history_file.unlink()
|
||||
except Exception as e:
|
||||
logging.warning(f"Failed deleting {self.history_file}: {e}")
|
||||
self.history = []
|
||||
self.set_filter("")
|
||||
|
||||
# Functionality to provide a filtered list that can be iterated through.
|
||||
|
||||
@command.command("commands.history.filter")
|
||||
def set_filter(self, prefix: str) -> None:
|
||||
self.filtered_history = [cmd for cmd in self.history if cmd.startswith(prefix)]
|
||||
self.filtered_history.append(prefix)
|
||||
self.current_index = len(self.filtered_history) - 1
|
||||
|
||||
@command.command("commands.history.next")
|
||||
def get_next(self) -> str:
|
||||
self.current_index = min(self.current_index + 1, len(self.filtered_history) - 1)
|
||||
return self.filtered_history[self.current_index]
|
||||
|
||||
@command.command("commands.history.prev")
|
||||
def get_prev(self) -> str:
|
||||
self.current_index = max(0, self.current_index - 1)
|
||||
return self.filtered_history[self.current_index]
|
||||
19
venv/Lib/site-packages/mitmproxy/addons/comment.py
Normal file
@@ -0,0 +1,19 @@
|
||||
from collections.abc import Sequence
|
||||
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import flow
|
||||
from mitmproxy.hooks import UpdateHook
|
||||
|
||||
|
||||
class Comment:
|
||||
@command.command("flow.comment")
|
||||
def comment(self, flow: Sequence[flow.Flow], comment: str) -> None:
|
||||
"Add a comment to a flow"
|
||||
|
||||
updated = []
|
||||
for f in flow:
|
||||
f.comment = comment
|
||||
updated.append(f)
|
||||
|
||||
ctx.master.addons.trigger(UpdateHook(updated))
|
||||
286
venv/Lib/site-packages/mitmproxy/addons/core.py
Normal file
@@ -0,0 +1,286 @@
|
||||
import logging
|
||||
import os
|
||||
from collections.abc import Sequence
|
||||
|
||||
import mitmproxy.types
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import hooks
|
||||
from mitmproxy import optmanager
|
||||
from mitmproxy.log import ALERT
|
||||
from mitmproxy.net.http import status_codes
|
||||
from mitmproxy.utils import emoji
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CONF_DIR = "~/.mitmproxy"
|
||||
LISTEN_PORT = 8080
|
||||
|
||||
|
||||
class Core:
|
||||
def configure(self, updated):
|
||||
opts = ctx.options
|
||||
if opts.add_upstream_certs_to_client_chain and not opts.upstream_cert:
|
||||
raise exceptions.OptionsError(
|
||||
"add_upstream_certs_to_client_chain requires the upstream_cert option to be enabled."
|
||||
)
|
||||
if "client_certs" in updated:
|
||||
if opts.client_certs:
|
||||
client_certs = os.path.expanduser(opts.client_certs)
|
||||
if not os.path.exists(client_certs):
|
||||
raise exceptions.OptionsError(
|
||||
f"Client certificate path does not exist: {opts.client_certs}"
|
||||
)
|
||||
|
||||
@command.command("set")
|
||||
def set(self, option: str, *value: str) -> None:
|
||||
"""
|
||||
Set an option. When the value is omitted, booleans are set to true,
|
||||
strings and integers are set to None (if permitted), and sequences
|
||||
are emptied. Boolean values can be true, false or toggle.
|
||||
Multiple values are concatenated with a single space.
|
||||
"""
|
||||
if value:
|
||||
specs = [f"{option}={v}" for v in value]
|
||||
else:
|
||||
specs = [option]
|
||||
try:
|
||||
ctx.options.set(*specs)
|
||||
except exceptions.OptionsError as e:
|
||||
raise exceptions.CommandError(e) from e
|
||||
|
||||
@command.command("flow.resume")
|
||||
def resume(self, flows: Sequence[flow.Flow]) -> None:
|
||||
"""
|
||||
Resume flows if they are intercepted.
|
||||
"""
|
||||
intercepted = [i for i in flows if i.intercepted]
|
||||
for f in intercepted:
|
||||
f.resume()
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(intercepted))
|
||||
|
||||
# FIXME: this will become view.mark later
|
||||
@command.command("flow.mark")
|
||||
def mark(self, flows: Sequence[flow.Flow], marker: mitmproxy.types.Marker) -> None:
|
||||
"""
|
||||
Mark flows.
|
||||
"""
|
||||
updated = []
|
||||
if not (marker == "" or marker in emoji.emoji):
|
||||
raise exceptions.CommandError(f"invalid marker value")
|
||||
|
||||
for i in flows:
|
||||
i.marked = marker
|
||||
updated.append(i)
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
|
||||
# FIXME: this will become view.mark.toggle later
|
||||
@command.command("flow.mark.toggle")
|
||||
def mark_toggle(self, flows: Sequence[flow.Flow]) -> None:
|
||||
"""
|
||||
Toggle mark for flows.
|
||||
"""
|
||||
for i in flows:
|
||||
if i.marked:
|
||||
i.marked = ""
|
||||
else:
|
||||
i.marked = ":default:"
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(flows))
|
||||
|
||||
@command.command("flow.kill")
|
||||
def kill(self, flows: Sequence[flow.Flow]) -> None:
|
||||
"""
|
||||
Kill running flows.
|
||||
"""
|
||||
updated = []
|
||||
for f in flows:
|
||||
if f.killable:
|
||||
f.kill()
|
||||
updated.append(f)
|
||||
logger.log(ALERT, "Killed %s flows." % len(updated))
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
|
||||
# FIXME: this will become view.revert later
|
||||
@command.command("flow.revert")
|
||||
def revert(self, flows: Sequence[flow.Flow]) -> None:
|
||||
"""
|
||||
Revert flow changes.
|
||||
"""
|
||||
updated = []
|
||||
for f in flows:
|
||||
if f.modified():
|
||||
f.revert()
|
||||
updated.append(f)
|
||||
logger.log(ALERT, "Reverted %s flows." % len(updated))
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
|
||||
@command.command("flow.set.options")
|
||||
def flow_set_options(self) -> Sequence[str]:
|
||||
return [
|
||||
"host",
|
||||
"status_code",
|
||||
"method",
|
||||
"path",
|
||||
"url",
|
||||
"reason",
|
||||
]
|
||||
|
||||
@command.command("flow.set")
|
||||
@command.argument("attr", type=mitmproxy.types.Choice("flow.set.options"))
|
||||
def flow_set(self, flows: Sequence[flow.Flow], attr: str, value: str) -> None:
|
||||
"""
|
||||
Quickly set a number of common values on flows.
|
||||
"""
|
||||
val: int | str = value
|
||||
if attr == "status_code":
|
||||
try:
|
||||
val = int(val) # type: ignore
|
||||
except ValueError as v:
|
||||
raise exceptions.CommandError(
|
||||
"Status code is not an integer: %s" % val
|
||||
) from v
|
||||
|
||||
updated = []
|
||||
for f in flows:
|
||||
req = getattr(f, "request", None)
|
||||
rupdate = True
|
||||
if req:
|
||||
if attr == "method":
|
||||
req.method = val
|
||||
elif attr == "host":
|
||||
req.host = val
|
||||
elif attr == "path":
|
||||
req.path = val
|
||||
elif attr == "url":
|
||||
try:
|
||||
req.url = val
|
||||
except ValueError as e:
|
||||
raise exceptions.CommandError(
|
||||
f"URL {val!r} is invalid: {e}"
|
||||
) from e
|
||||
else:
|
||||
self.rupdate = False
|
||||
|
||||
resp = getattr(f, "response", None)
|
||||
supdate = True
|
||||
if resp:
|
||||
if attr == "status_code":
|
||||
resp.status_code = val
|
||||
if val in status_codes.RESPONSES:
|
||||
resp.reason = status_codes.RESPONSES[val] # type: ignore
|
||||
elif attr == "reason":
|
||||
resp.reason = val
|
||||
else:
|
||||
supdate = False
|
||||
|
||||
if rupdate or supdate:
|
||||
updated.append(f)
|
||||
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
logger.log(ALERT, f"Set {attr} on {len(updated)} flows.")
|
||||
|
||||
@command.command("flow.decode")
|
||||
def decode(self, flows: Sequence[flow.Flow], part: str) -> None:
|
||||
"""
|
||||
Decode flows.
|
||||
"""
|
||||
updated = []
|
||||
for f in flows:
|
||||
p = getattr(f, part, None)
|
||||
if p:
|
||||
f.backup()
|
||||
p.decode()
|
||||
updated.append(f)
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
logger.log(ALERT, "Decoded %s flows." % len(updated))
|
||||
|
||||
@command.command("flow.encode.toggle")
|
||||
def encode_toggle(self, flows: Sequence[flow.Flow], part: str) -> None:
|
||||
"""
|
||||
Toggle flow encoding on and off, using deflate for encoding.
|
||||
"""
|
||||
updated = []
|
||||
for f in flows:
|
||||
p = getattr(f, part, None)
|
||||
if p:
|
||||
f.backup()
|
||||
current_enc = p.headers.get("content-encoding", "identity")
|
||||
if current_enc == "identity":
|
||||
p.encode("deflate")
|
||||
else:
|
||||
p.decode()
|
||||
updated.append(f)
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
logger.log(ALERT, "Toggled encoding on %s flows." % len(updated))
|
||||
|
||||
@command.command("flow.encode")
|
||||
@command.argument("encoding", type=mitmproxy.types.Choice("flow.encode.options"))
|
||||
def encode(
|
||||
self,
|
||||
flows: Sequence[flow.Flow],
|
||||
part: str,
|
||||
encoding: str,
|
||||
) -> None:
|
||||
"""
|
||||
Encode flows with a specified encoding.
|
||||
"""
|
||||
updated = []
|
||||
for f in flows:
|
||||
p = getattr(f, part, None)
|
||||
if p:
|
||||
current_enc = p.headers.get("content-encoding", "identity")
|
||||
if current_enc == "identity":
|
||||
f.backup()
|
||||
p.encode(encoding)
|
||||
updated.append(f)
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
logger.log(ALERT, "Encoded %s flows." % len(updated))
|
||||
|
||||
@command.command("flow.encode.options")
|
||||
def encode_options(self) -> Sequence[str]:
|
||||
"""
|
||||
The possible values for an encoding specification.
|
||||
"""
|
||||
return ["gzip", "deflate", "br", "zstd"]
|
||||
|
||||
@command.command("options.load")
|
||||
def options_load(self, path: mitmproxy.types.Path) -> None:
|
||||
"""
|
||||
Load options from a file.
|
||||
"""
|
||||
try:
|
||||
optmanager.load_paths(ctx.options, path)
|
||||
except (OSError, exceptions.OptionsError) as e:
|
||||
raise exceptions.CommandError("Could not load options - %s" % e) from e
|
||||
|
||||
@command.command("options.save")
|
||||
def options_save(self, path: mitmproxy.types.Path) -> None:
|
||||
"""
|
||||
Save options to a file.
|
||||
"""
|
||||
try:
|
||||
optmanager.save(ctx.options, path)
|
||||
except OSError as e:
|
||||
raise exceptions.CommandError("Could not save options - %s" % e) from e
|
||||
|
||||
@command.command("options.reset")
|
||||
def options_reset(self) -> None:
|
||||
"""
|
||||
Reset all options to defaults.
|
||||
"""
|
||||
ctx.options.reset()
|
||||
|
||||
@command.command("options.reset.one")
|
||||
def options_reset_one(self, name: str) -> None:
|
||||
"""
|
||||
Reset one option to its default value.
|
||||
"""
|
||||
if name not in ctx.options:
|
||||
raise exceptions.CommandError("No such option: %s" % name)
|
||||
setattr(
|
||||
ctx.options,
|
||||
name,
|
||||
ctx.options.default(name),
|
||||
)
|
||||
176
venv/Lib/site-packages/mitmproxy/addons/cut.py
Normal file
@@ -0,0 +1,176 @@
|
||||
import csv
|
||||
import io
|
||||
import logging
|
||||
import os.path
|
||||
from collections.abc import Sequence
|
||||
from typing import Any
|
||||
|
||||
import pyperclip
|
||||
|
||||
import mitmproxy.types
|
||||
from mitmproxy import certs
|
||||
from mitmproxy import command
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import http
|
||||
from mitmproxy.log import ALERT
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def headername(spec: str):
|
||||
if not (spec.startswith("header[") and spec.endswith("]")):
|
||||
raise exceptions.CommandError("Invalid header spec: %s" % spec)
|
||||
return spec[len("header[") : -1].strip()
|
||||
|
||||
|
||||
def is_addr(v):
|
||||
return isinstance(v, tuple) and len(v) > 1
|
||||
|
||||
|
||||
def extract(cut: str, f: flow.Flow) -> str | bytes:
|
||||
# Hack for https://github.com/mitmproxy/mitmproxy/issues/6721:
|
||||
# Make "save body" keybind work for WebSocket flows.
|
||||
# Ideally the keybind would be smarter and this here can get removed.
|
||||
if (
|
||||
isinstance(f, http.HTTPFlow)
|
||||
and f.websocket
|
||||
and cut in ("request.content", "response.content")
|
||||
):
|
||||
return f.websocket._get_formatted_messages()
|
||||
|
||||
path = cut.split(".")
|
||||
current: Any = f
|
||||
for i, spec in enumerate(path):
|
||||
if spec.startswith("_"):
|
||||
raise exceptions.CommandError("Can't access internal attribute %s" % spec)
|
||||
|
||||
part = getattr(current, spec, None)
|
||||
if i == len(path) - 1:
|
||||
if spec == "port" and is_addr(current):
|
||||
return str(current[1])
|
||||
if spec == "host" and is_addr(current):
|
||||
return str(current[0])
|
||||
elif spec.startswith("header["):
|
||||
if not current:
|
||||
return ""
|
||||
return current.headers.get(headername(spec), "")
|
||||
elif isinstance(part, bytes):
|
||||
return part
|
||||
elif isinstance(part, bool):
|
||||
return "true" if part else "false"
|
||||
elif isinstance(part, certs.Cert): # pragma: no cover
|
||||
return part.to_pem().decode("ascii")
|
||||
elif (
|
||||
isinstance(part, list)
|
||||
and len(part) > 0
|
||||
and isinstance(part[0], certs.Cert)
|
||||
):
|
||||
# TODO: currently this extracts only the very first cert as PEM-encoded string.
|
||||
return part[0].to_pem().decode("ascii")
|
||||
current = part
|
||||
return str(current or "")
|
||||
|
||||
|
||||
def extract_str(cut: str, f: flow.Flow) -> str:
|
||||
ret = extract(cut, f)
|
||||
if isinstance(ret, bytes):
|
||||
return repr(ret)
|
||||
else:
|
||||
return ret
|
||||
|
||||
|
||||
class Cut:
|
||||
@command.command("cut")
|
||||
def cut(
|
||||
self,
|
||||
flows: Sequence[flow.Flow],
|
||||
cuts: mitmproxy.types.CutSpec,
|
||||
) -> mitmproxy.types.Data:
|
||||
"""
|
||||
Cut data from a set of flows. Cut specifications are attribute paths
|
||||
from the base of the flow object, with a few conveniences - "port"
|
||||
and "host" retrieve parts of an address tuple, ".header[key]"
|
||||
retrieves a header value. Return values converted to strings or
|
||||
bytes: SSL certificates are converted to PEM format, bools are "true"
|
||||
or "false", "bytes" are preserved, and all other values are
|
||||
converted to strings.
|
||||
"""
|
||||
ret: list[list[str | bytes]] = []
|
||||
for f in flows:
|
||||
ret.append([extract(c, f) for c in cuts])
|
||||
return ret # type: ignore
|
||||
|
||||
@command.command("cut.save")
|
||||
def save(
|
||||
self,
|
||||
flows: Sequence[flow.Flow],
|
||||
cuts: mitmproxy.types.CutSpec,
|
||||
path: mitmproxy.types.Path,
|
||||
) -> None:
|
||||
"""
|
||||
Save cuts to file. If there are multiple flows or cuts, the format
|
||||
is UTF-8 encoded CSV. If there is exactly one row and one column,
|
||||
the data is written to file as-is, with raw bytes preserved. If the
|
||||
path is prefixed with a "+", values are appended if there is an
|
||||
existing file.
|
||||
"""
|
||||
append = False
|
||||
if path.startswith("+"):
|
||||
append = True
|
||||
epath = os.path.expanduser(path[1:])
|
||||
path = mitmproxy.types.Path(epath)
|
||||
try:
|
||||
if len(cuts) == 1 and len(flows) == 1:
|
||||
with open(path, "ab" if append else "wb") as fp:
|
||||
if fp.tell() > 0:
|
||||
# We're appending to a file that already exists and has content
|
||||
fp.write(b"\n")
|
||||
v = extract(cuts[0], flows[0])
|
||||
if isinstance(v, bytes):
|
||||
fp.write(v)
|
||||
else:
|
||||
fp.write(v.encode("utf8"))
|
||||
logger.log(ALERT, "Saved single cut.")
|
||||
else:
|
||||
with open(
|
||||
path, "a" if append else "w", newline="", encoding="utf8"
|
||||
) as tfp:
|
||||
writer = csv.writer(tfp)
|
||||
for f in flows:
|
||||
vals = [extract_str(c, f) for c in cuts]
|
||||
writer.writerow(vals)
|
||||
logger.log(
|
||||
ALERT,
|
||||
"Saved %s cuts over %d flows as CSV." % (len(cuts), len(flows)),
|
||||
)
|
||||
except OSError as e:
|
||||
logger.error(str(e))
|
||||
|
||||
@command.command("cut.clip")
|
||||
def clip(
|
||||
self,
|
||||
flows: Sequence[flow.Flow],
|
||||
cuts: mitmproxy.types.CutSpec,
|
||||
) -> None:
|
||||
"""
|
||||
Send cuts to the clipboard. If there are multiple flows or cuts, the
|
||||
format is UTF-8 encoded CSV. If there is exactly one row and one
|
||||
column, the data is written to file as-is, with raw bytes preserved.
|
||||
"""
|
||||
v: str | bytes
|
||||
fp = io.StringIO(newline="")
|
||||
if len(cuts) == 1 and len(flows) == 1:
|
||||
v = extract_str(cuts[0], flows[0])
|
||||
fp.write(v)
|
||||
logger.log(ALERT, "Clipped single cut.")
|
||||
else:
|
||||
writer = csv.writer(fp)
|
||||
for f in flows:
|
||||
vals = [extract_str(c, f) for c in cuts]
|
||||
writer.writerow(vals)
|
||||
logger.log(ALERT, "Clipped %s cuts as CSV." % len(cuts))
|
||||
try:
|
||||
pyperclip.copy(fp.getvalue())
|
||||
except pyperclip.PyperclipException as e:
|
||||
logger.error(str(e))
|
||||
42
venv/Lib/site-packages/mitmproxy/addons/disable_h2c.py
Normal file
@@ -0,0 +1,42 @@
|
||||
import logging
|
||||
|
||||
|
||||
class DisableH2C:
|
||||
"""
|
||||
We currently only support HTTP/2 over a TLS connection.
|
||||
|
||||
Some clients try to upgrade a connection from HTTP/1.1 to h2c. We need to
|
||||
remove those headers to avoid protocol errors if one endpoints suddenly
|
||||
starts sending HTTP/2 frames.
|
||||
|
||||
Some clients might use HTTP/2 Prior Knowledge to directly initiate a session
|
||||
by sending the connection preface. We just kill those flows.
|
||||
"""
|
||||
|
||||
def process_flow(self, f):
|
||||
if f.request.headers.get("upgrade", "") == "h2c":
|
||||
logging.warning(
|
||||
"HTTP/2 cleartext connections (h2c upgrade requests) are currently not supported."
|
||||
)
|
||||
del f.request.headers["upgrade"]
|
||||
if "connection" in f.request.headers:
|
||||
del f.request.headers["connection"]
|
||||
if "http2-settings" in f.request.headers:
|
||||
del f.request.headers["http2-settings"]
|
||||
|
||||
is_connection_preface = (
|
||||
f.request.method == "PRI"
|
||||
and f.request.path == "*"
|
||||
and f.request.http_version == "HTTP/2.0"
|
||||
)
|
||||
if is_connection_preface:
|
||||
if f.killable:
|
||||
f.kill()
|
||||
logging.warning(
|
||||
"Initiating HTTP/2 connections with prior knowledge are currently not supported."
|
||||
)
|
||||
|
||||
# Handlers
|
||||
|
||||
def request(self, f):
|
||||
self.process_flow(f)
|
||||
182
venv/Lib/site-packages/mitmproxy/addons/dns_resolver.py
Normal file
@@ -0,0 +1,182 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import ipaddress
|
||||
import logging
|
||||
import socket
|
||||
from collections.abc import Sequence
|
||||
from functools import cache
|
||||
from typing import Protocol
|
||||
|
||||
import mitmproxy_rs
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import dns
|
||||
from mitmproxy.flow import Error
|
||||
from mitmproxy.proxy import mode_specs
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DnsResolver:
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"dns_use_hosts_file",
|
||||
bool,
|
||||
True,
|
||||
"Use the hosts file for DNS lookups in regular DNS mode/wireguard mode.",
|
||||
)
|
||||
|
||||
loader.add_option(
|
||||
"dns_name_servers",
|
||||
Sequence[str],
|
||||
[],
|
||||
"Name servers to use for lookups in regular DNS mode/wireguard mode. Default: operating system's name servers",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "dns_use_hosts_file" in updated or "dns_name_servers" in updated:
|
||||
self.resolver.cache_clear()
|
||||
self.name_servers.cache_clear()
|
||||
|
||||
@cache
|
||||
def name_servers(self) -> list[str]:
|
||||
"""
|
||||
Returns the operating system's name servers unless custom name servers are set.
|
||||
On error, an empty list is returned.
|
||||
"""
|
||||
try:
|
||||
return (
|
||||
ctx.options.dns_name_servers
|
||||
or mitmproxy_rs.dns.get_system_dns_servers()
|
||||
)
|
||||
except RuntimeError as e:
|
||||
logger.warning(
|
||||
f"Failed to get system dns servers: {e}\n"
|
||||
f"The dns_name_servers option needs to be set manually."
|
||||
)
|
||||
return []
|
||||
|
||||
@cache
|
||||
def resolver(self) -> Resolver:
|
||||
"""
|
||||
Returns:
|
||||
The DNS resolver to use.
|
||||
Raises:
|
||||
MissingNameServers, if name servers are unknown and `dns_use_hosts_file` is disabled.
|
||||
"""
|
||||
if ns := self.name_servers():
|
||||
# We always want to use our own resolver if name server info is available.
|
||||
return mitmproxy_rs.dns.DnsResolver(
|
||||
name_servers=ns,
|
||||
use_hosts_file=ctx.options.dns_use_hosts_file,
|
||||
)
|
||||
elif ctx.options.dns_use_hosts_file:
|
||||
# Fallback to getaddrinfo as hickory's resolver isn't as reliable
|
||||
# as we would like it to be (https://github.com/mitmproxy/mitmproxy/issues/7064).
|
||||
return GetaddrinfoFallbackResolver()
|
||||
else:
|
||||
raise MissingNameServers()
|
||||
|
||||
async def dns_request(self, flow: dns.DNSFlow) -> None:
|
||||
if self._should_resolve(flow):
|
||||
all_ip_lookups = (
|
||||
flow.request.query
|
||||
and flow.request.op_code == dns.op_codes.QUERY
|
||||
and flow.request.question
|
||||
and flow.request.question.class_ == dns.classes.IN
|
||||
and flow.request.question.type in (dns.types.A, dns.types.AAAA)
|
||||
)
|
||||
if all_ip_lookups:
|
||||
try:
|
||||
flow.response = await self.resolve(flow.request)
|
||||
except MissingNameServers:
|
||||
flow.error = Error("Cannot resolve, dns_name_servers unknown.")
|
||||
elif name_servers := self.name_servers():
|
||||
# For other records, the best we can do is to forward the query
|
||||
# to an upstream server.
|
||||
flow.server_conn.address = (name_servers[0], 53)
|
||||
else:
|
||||
flow.error = Error("Cannot resolve, dns_name_servers unknown.")
|
||||
|
||||
@staticmethod
|
||||
def _should_resolve(flow: dns.DNSFlow) -> bool:
|
||||
return (
|
||||
(
|
||||
isinstance(flow.client_conn.proxy_mode, mode_specs.DnsMode)
|
||||
or (
|
||||
isinstance(flow.client_conn.proxy_mode, mode_specs.WireGuardMode)
|
||||
and flow.server_conn.address == ("10.0.0.53", 53)
|
||||
)
|
||||
)
|
||||
and flow.live
|
||||
and not flow.response
|
||||
and not flow.error
|
||||
)
|
||||
|
||||
async def resolve(
|
||||
self,
|
||||
message: dns.DNSMessage,
|
||||
) -> dns.DNSMessage:
|
||||
q = message.question
|
||||
assert q
|
||||
try:
|
||||
if q.type == dns.types.A:
|
||||
ip_addrs = await self.resolver().lookup_ipv4(q.name)
|
||||
else:
|
||||
ip_addrs = await self.resolver().lookup_ipv6(q.name)
|
||||
except socket.gaierror as e:
|
||||
match e.args[0]:
|
||||
case socket.EAI_NONAME:
|
||||
return message.fail(dns.response_codes.NXDOMAIN)
|
||||
case socket.EAI_NODATA:
|
||||
ip_addrs = []
|
||||
case _:
|
||||
return message.fail(dns.response_codes.SERVFAIL)
|
||||
|
||||
return message.succeed(
|
||||
[
|
||||
dns.ResourceRecord(
|
||||
name=q.name,
|
||||
type=q.type,
|
||||
class_=q.class_,
|
||||
ttl=dns.ResourceRecord.DEFAULT_TTL,
|
||||
data=ipaddress.ip_address(ip).packed,
|
||||
)
|
||||
for ip in ip_addrs
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
class Resolver(Protocol):
|
||||
async def lookup_ip(self, domain: str) -> list[str]: # pragma: no cover
|
||||
...
|
||||
|
||||
async def lookup_ipv4(self, domain: str) -> list[str]: # pragma: no cover
|
||||
...
|
||||
|
||||
async def lookup_ipv6(self, domain: str) -> list[str]: # pragma: no cover
|
||||
...
|
||||
|
||||
|
||||
class GetaddrinfoFallbackResolver(Resolver):
|
||||
async def lookup_ip(self, domain: str) -> list[str]:
|
||||
return await self._lookup(domain, socket.AF_UNSPEC)
|
||||
|
||||
async def lookup_ipv4(self, domain: str) -> list[str]:
|
||||
return await self._lookup(domain, socket.AF_INET)
|
||||
|
||||
async def lookup_ipv6(self, domain: str) -> list[str]:
|
||||
return await self._lookup(domain, socket.AF_INET6)
|
||||
|
||||
async def _lookup(self, domain: str, family: socket.AddressFamily) -> list[str]:
|
||||
addrinfos = await asyncio.get_running_loop().getaddrinfo(
|
||||
host=domain,
|
||||
port=None,
|
||||
family=family,
|
||||
type=socket.SOCK_STREAM,
|
||||
)
|
||||
return [addrinfo[4][0] for addrinfo in addrinfos]
|
||||
|
||||
|
||||
class MissingNameServers(RuntimeError):
|
||||
pass
|
||||
423
venv/Lib/site-packages/mitmproxy/addons/dumper.py
Normal file
@@ -0,0 +1,423 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import shutil
|
||||
import sys
|
||||
from typing import IO
|
||||
from typing import Optional
|
||||
|
||||
from wsproto.frame_protocol import CloseReason
|
||||
|
||||
import mitmproxy_rs
|
||||
from mitmproxy import contentviews
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import dns
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import http
|
||||
from mitmproxy.contrib import click as miniclick
|
||||
from mitmproxy.net.dns import response_codes
|
||||
from mitmproxy.options import CONTENT_VIEW_LINES_CUTOFF
|
||||
from mitmproxy.tcp import TCPFlow
|
||||
from mitmproxy.tcp import TCPMessage
|
||||
from mitmproxy.udp import UDPFlow
|
||||
from mitmproxy.udp import UDPMessage
|
||||
from mitmproxy.utils import human
|
||||
from mitmproxy.utils import strutils
|
||||
from mitmproxy.utils import vt_codes
|
||||
from mitmproxy.websocket import WebSocketData
|
||||
from mitmproxy.websocket import WebSocketMessage
|
||||
|
||||
|
||||
def indent(n: int, text: str) -> str:
|
||||
lines = str(text).strip().splitlines()
|
||||
pad = " " * n
|
||||
return "\n".join(pad + i for i in lines)
|
||||
|
||||
|
||||
CONTENTVIEW_STYLES: dict[str, dict[str, str | bool]] = {
|
||||
"name": dict(fg="yellow"),
|
||||
"string": dict(fg="green"),
|
||||
"number": dict(fg="blue"),
|
||||
"boolean": dict(fg="magenta"),
|
||||
"comment": dict(dim=True),
|
||||
"error": dict(fg="red"),
|
||||
}
|
||||
|
||||
|
||||
class Dumper:
|
||||
def __init__(self, outfile: IO[str] | None = None):
|
||||
self.filter: flowfilter.TFilter | None = None
|
||||
self.outfp: IO[str] = outfile or sys.stdout
|
||||
self.out_has_vt_codes = vt_codes.ensure_supported(self.outfp)
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"flow_detail",
|
||||
int,
|
||||
1,
|
||||
f"""
|
||||
The display detail level for flows in mitmdump: 0 (quiet) to 4 (very verbose).
|
||||
0: no output
|
||||
1: shortened request URL with response status code
|
||||
2: full request URL with response status code and HTTP headers
|
||||
3: 2 + truncated response content, content of WebSocket and TCP messages (content_view_lines_cutoff: {CONTENT_VIEW_LINES_CUTOFF})
|
||||
4: 3 + nothing is truncated
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"dumper_default_contentview",
|
||||
str,
|
||||
"auto",
|
||||
"The default content view mode.",
|
||||
choices=contentviews.registry.available_views(),
|
||||
)
|
||||
loader.add_option(
|
||||
"dumper_filter", Optional[str], None, "Limit which flows are dumped."
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "dumper_filter" in updated:
|
||||
if ctx.options.dumper_filter:
|
||||
try:
|
||||
self.filter = flowfilter.parse(ctx.options.dumper_filter)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
else:
|
||||
self.filter = None
|
||||
|
||||
def style(self, text: str, **style) -> str:
|
||||
if style and self.out_has_vt_codes:
|
||||
text = miniclick.style(text, **style)
|
||||
return text
|
||||
|
||||
def echo(self, text: str, ident=None, **style):
|
||||
if ident:
|
||||
text = indent(ident, text)
|
||||
text = self.style(text, **style)
|
||||
print(text, file=self.outfp)
|
||||
|
||||
def _echo_headers(self, headers: http.Headers):
|
||||
for k, v in headers.fields:
|
||||
ks = strutils.bytes_to_escaped_str(k)
|
||||
ks = self.style(ks, fg="blue")
|
||||
vs = strutils.bytes_to_escaped_str(v)
|
||||
self.echo(f"{ks}: {vs}", ident=4)
|
||||
|
||||
def _echo_trailers(self, trailers: http.Headers | None):
|
||||
if not trailers:
|
||||
return
|
||||
self.echo("--- HTTP Trailers", fg="magenta", ident=4)
|
||||
self._echo_headers(trailers)
|
||||
|
||||
def _echo_message(
|
||||
self,
|
||||
message: http.Message | TCPMessage | UDPMessage | WebSocketMessage,
|
||||
flow: http.HTTPFlow | TCPFlow | UDPFlow,
|
||||
):
|
||||
pretty = contentviews.prettify_message(
|
||||
message,
|
||||
flow,
|
||||
ctx.options.dumper_default_contentview,
|
||||
)
|
||||
|
||||
if ctx.options.flow_detail == 3:
|
||||
content_to_echo = strutils.cut_after_n_lines(
|
||||
pretty.text, ctx.options.content_view_lines_cutoff
|
||||
)
|
||||
else:
|
||||
content_to_echo = pretty.text
|
||||
|
||||
if content_to_echo:
|
||||
highlighted = mitmproxy_rs.syntax_highlight.highlight(
|
||||
pretty.text, pretty.syntax_highlight
|
||||
)
|
||||
self.echo("")
|
||||
self.echo(
|
||||
"".join(
|
||||
self.style(chunk, **CONTENTVIEW_STYLES.get(tag, {}))
|
||||
for tag, chunk in highlighted
|
||||
),
|
||||
ident=4,
|
||||
)
|
||||
|
||||
if len(content_to_echo) < len(pretty.text):
|
||||
self.echo("(cut off)", ident=4, dim=True)
|
||||
|
||||
if ctx.options.flow_detail >= 2:
|
||||
self.echo("")
|
||||
|
||||
def _fmt_client(self, flow: flow.Flow) -> str:
|
||||
if flow.is_replay == "request":
|
||||
return self.style("[replay]", fg="yellow", bold=True)
|
||||
elif flow.client_conn.peername:
|
||||
return self.style(
|
||||
strutils.escape_control_characters(
|
||||
human.format_address(flow.client_conn.peername)
|
||||
)
|
||||
)
|
||||
else: # pragma: no cover
|
||||
# this should not happen, but we're defensive here.
|
||||
return ""
|
||||
|
||||
def _echo_request_line(self, flow: http.HTTPFlow) -> None:
|
||||
client = self._fmt_client(flow)
|
||||
|
||||
pushed = " PUSH_PROMISE" if "h2-pushed-stream" in flow.metadata else ""
|
||||
method = flow.request.method + pushed
|
||||
method_color = dict(GET="green", DELETE="red").get(method.upper(), "magenta")
|
||||
method = self.style(
|
||||
strutils.escape_control_characters(method), fg=method_color, bold=True
|
||||
)
|
||||
if ctx.options.showhost:
|
||||
url = flow.request.pretty_url
|
||||
else:
|
||||
url = flow.request.url
|
||||
|
||||
if ctx.options.flow_detail == 1:
|
||||
# We need to truncate before applying styles, so we just focus on the URL.
|
||||
terminal_width_limit = max(shutil.get_terminal_size()[0] - 25, 50)
|
||||
if len(url) > terminal_width_limit:
|
||||
url = url[:terminal_width_limit] + "…"
|
||||
url = self.style(strutils.escape_control_characters(url), bold=True)
|
||||
|
||||
http_version = ""
|
||||
if not (
|
||||
flow.request.is_http10 or flow.request.is_http11
|
||||
) or flow.request.http_version != getattr(
|
||||
flow.response, "http_version", "HTTP/1.1"
|
||||
):
|
||||
# Hide version for h1 <-> h1 connections.
|
||||
http_version = " " + flow.request.http_version
|
||||
|
||||
self.echo(f"{client}: {method} {url}{http_version}")
|
||||
|
||||
def _echo_response_line(self, flow: http.HTTPFlow) -> None:
|
||||
if flow.is_replay == "response":
|
||||
replay_str = "[replay]"
|
||||
replay = self.style(replay_str, fg="yellow", bold=True)
|
||||
else:
|
||||
replay_str = ""
|
||||
replay = ""
|
||||
|
||||
assert flow.response
|
||||
code_int = flow.response.status_code
|
||||
code_color = None
|
||||
if 200 <= code_int < 300:
|
||||
code_color = "green"
|
||||
elif 300 <= code_int < 400:
|
||||
code_color = "magenta"
|
||||
elif 400 <= code_int < 600:
|
||||
code_color = "red"
|
||||
code = self.style(
|
||||
str(code_int),
|
||||
fg=code_color,
|
||||
bold=True,
|
||||
blink=(code_int == 418),
|
||||
)
|
||||
|
||||
if not (flow.response.is_http2 or flow.response.is_http3):
|
||||
reason = flow.response.reason
|
||||
else:
|
||||
reason = http.status_codes.RESPONSES.get(flow.response.status_code, "")
|
||||
reason = self.style(
|
||||
strutils.escape_control_characters(reason), fg=code_color, bold=True
|
||||
)
|
||||
|
||||
if flow.response.raw_content is None:
|
||||
size = "(content missing)"
|
||||
else:
|
||||
size = human.pretty_size(len(flow.response.raw_content))
|
||||
size = self.style(size, bold=True)
|
||||
|
||||
http_version = ""
|
||||
if (
|
||||
not (flow.response.is_http10 or flow.response.is_http11)
|
||||
or flow.request.http_version != flow.response.http_version
|
||||
):
|
||||
# Hide version for h1 <-> h1 connections.
|
||||
http_version = f"{flow.response.http_version} "
|
||||
|
||||
arrows = self.style(" <<", bold=True)
|
||||
if ctx.options.flow_detail == 1:
|
||||
# This aligns the HTTP response code with the HTTP request method:
|
||||
# 127.0.0.1:59519: GET http://example.com/
|
||||
# << 304 Not Modified 0b
|
||||
pad = max(
|
||||
0,
|
||||
len(human.format_address(flow.client_conn.peername))
|
||||
- (2 + len(http_version) + len(replay_str)),
|
||||
)
|
||||
arrows = " " * pad + arrows
|
||||
|
||||
self.echo(f"{replay}{arrows} {http_version}{code} {reason} {size}")
|
||||
|
||||
def echo_flow(self, f: http.HTTPFlow) -> None:
|
||||
if f.request:
|
||||
self._echo_request_line(f)
|
||||
if ctx.options.flow_detail >= 2:
|
||||
self._echo_headers(f.request.headers)
|
||||
if ctx.options.flow_detail >= 3:
|
||||
self._echo_message(f.request, f)
|
||||
if ctx.options.flow_detail >= 2:
|
||||
self._echo_trailers(f.request.trailers)
|
||||
|
||||
if f.response:
|
||||
self._echo_response_line(f)
|
||||
if ctx.options.flow_detail >= 2:
|
||||
self._echo_headers(f.response.headers)
|
||||
if ctx.options.flow_detail >= 3:
|
||||
self._echo_message(f.response, f)
|
||||
if ctx.options.flow_detail >= 2:
|
||||
self._echo_trailers(f.response.trailers)
|
||||
|
||||
if f.error:
|
||||
msg = strutils.escape_control_characters(f.error.msg)
|
||||
self.echo(f" << {msg}", bold=True, fg="red")
|
||||
|
||||
self.outfp.flush()
|
||||
|
||||
def match(self, f):
|
||||
if ctx.options.flow_detail == 0:
|
||||
return False
|
||||
if not self.filter:
|
||||
return True
|
||||
elif flowfilter.match(self.filter, f):
|
||||
return True
|
||||
return False
|
||||
|
||||
def response(self, f):
|
||||
if self.match(f):
|
||||
self.echo_flow(f)
|
||||
|
||||
def error(self, f):
|
||||
if self.match(f):
|
||||
self.echo_flow(f)
|
||||
|
||||
def websocket_message(self, f: http.HTTPFlow):
|
||||
assert f.websocket is not None # satisfy type checker
|
||||
if self.match(f):
|
||||
message = f.websocket.messages[-1]
|
||||
|
||||
direction = "->" if message.from_client else "<-"
|
||||
self.echo(
|
||||
f"{human.format_address(f.client_conn.peername)} "
|
||||
f"{direction} WebSocket {message.type.name.lower()} message "
|
||||
f"{direction} {human.format_address(f.server_conn.address)}{f.request.path}"
|
||||
)
|
||||
if ctx.options.flow_detail >= 3:
|
||||
self._echo_message(message, f)
|
||||
|
||||
def websocket_end(self, f: http.HTTPFlow):
|
||||
assert f.websocket is not None # satisfy type checker
|
||||
if self.match(f):
|
||||
if f.websocket.close_code in {1000, 1001, 1005}:
|
||||
c = "client" if f.websocket.closed_by_client else "server"
|
||||
self.echo(
|
||||
f"WebSocket connection closed by {c}: {f.websocket.close_code} {f.websocket.close_reason}"
|
||||
)
|
||||
else:
|
||||
error = flow.Error(
|
||||
f"WebSocket Error: {self.format_websocket_error(f.websocket)}"
|
||||
)
|
||||
self.echo(
|
||||
f"Error in WebSocket connection to {human.format_address(f.server_conn.address)}: {error}",
|
||||
fg="red",
|
||||
)
|
||||
|
||||
def format_websocket_error(self, websocket: WebSocketData) -> str:
|
||||
try:
|
||||
ret = CloseReason(websocket.close_code).name # type: ignore
|
||||
except ValueError:
|
||||
ret = f"UNKNOWN_ERROR={websocket.close_code}"
|
||||
if websocket.close_reason:
|
||||
ret += f" (reason: {websocket.close_reason})"
|
||||
return ret
|
||||
|
||||
def _proto_error(self, f):
|
||||
if self.match(f):
|
||||
self.echo(
|
||||
f"Error in {f.type.upper()} connection to {human.format_address(f.server_conn.address)}: {f.error}",
|
||||
fg="red",
|
||||
)
|
||||
|
||||
def tcp_error(self, f):
|
||||
self._proto_error(f)
|
||||
|
||||
def udp_error(self, f):
|
||||
self._proto_error(f)
|
||||
|
||||
def _proto_message(self, f: TCPFlow | UDPFlow) -> None:
|
||||
if self.match(f):
|
||||
message = f.messages[-1]
|
||||
direction = "->" if message.from_client else "<-"
|
||||
if f.client_conn.tls_version == "QUICv1":
|
||||
if f.type == "tcp":
|
||||
quic_type = "stream"
|
||||
else:
|
||||
quic_type = "dgrams"
|
||||
# TODO: This should not be metadata, this should be typed attributes.
|
||||
flow_type = (
|
||||
f"quic {quic_type} {f.metadata.get('quic_stream_id_client', '')} "
|
||||
f"{direction} mitmproxy {direction} "
|
||||
f"quic {quic_type} {f.metadata.get('quic_stream_id_server', '')}"
|
||||
)
|
||||
else:
|
||||
flow_type = f.type
|
||||
self.echo(
|
||||
"{client} {direction} {type} {direction} {server}".format(
|
||||
client=human.format_address(f.client_conn.peername),
|
||||
server=human.format_address(f.server_conn.address),
|
||||
direction=direction,
|
||||
type=flow_type,
|
||||
)
|
||||
)
|
||||
if ctx.options.flow_detail >= 3:
|
||||
self._echo_message(message, f)
|
||||
|
||||
def tcp_message(self, f):
|
||||
self._proto_message(f)
|
||||
|
||||
def udp_message(self, f):
|
||||
self._proto_message(f)
|
||||
|
||||
def _echo_dns_query(self, f: dns.DNSFlow) -> None:
|
||||
client = self._fmt_client(f)
|
||||
opcode = dns.op_codes.to_str(f.request.op_code)
|
||||
type = dns.types.to_str(f.request.questions[0].type)
|
||||
|
||||
desc = f"DNS {opcode} ({type})"
|
||||
desc_color = {
|
||||
"A": "green",
|
||||
"AAAA": "magenta",
|
||||
}.get(type, "red")
|
||||
desc = self.style(desc, fg=desc_color)
|
||||
|
||||
name = self.style(f.request.questions[0].name, bold=True)
|
||||
self.echo(f"{client}: {desc} {name}")
|
||||
|
||||
def dns_response(self, f: dns.DNSFlow):
|
||||
assert f.response
|
||||
if self.match(f):
|
||||
self._echo_dns_query(f)
|
||||
|
||||
arrows = self.style(" <<", bold=True)
|
||||
if f.response.answers:
|
||||
answers = ", ".join(
|
||||
self.style(str(x), fg="bright_blue") for x in f.response.answers
|
||||
)
|
||||
else:
|
||||
answers = self.style(
|
||||
response_codes.to_str(
|
||||
f.response.response_code,
|
||||
),
|
||||
fg="red",
|
||||
)
|
||||
self.echo(f"{arrows} {answers}")
|
||||
|
||||
def dns_error(self, f: dns.DNSFlow):
|
||||
assert f.error
|
||||
if self.match(f):
|
||||
self._echo_dns_query(f)
|
||||
msg = strutils.escape_control_characters(f.error.msg)
|
||||
self.echo(f" << {msg}", bold=True, fg="red")
|
||||
55
venv/Lib/site-packages/mitmproxy/addons/errorcheck.py
Normal file
@@ -0,0 +1,55 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from mitmproxy import log
|
||||
from mitmproxy.contrib import click as miniclick
|
||||
from mitmproxy.utils import vt_codes
|
||||
|
||||
|
||||
class ErrorCheck:
|
||||
"""Monitor startup for error log entries, and terminate immediately if there are some."""
|
||||
|
||||
repeat_errors_on_stderr: bool
|
||||
"""
|
||||
Repeat all errors on stderr before exiting.
|
||||
This is useful for the console UI, which otherwise swallows all output.
|
||||
"""
|
||||
|
||||
def __init__(self, repeat_errors_on_stderr: bool = False) -> None:
|
||||
self.repeat_errors_on_stderr = repeat_errors_on_stderr
|
||||
|
||||
self.logger = ErrorCheckHandler()
|
||||
self.logger.install()
|
||||
|
||||
def finish(self):
|
||||
self.logger.uninstall()
|
||||
|
||||
async def shutdown_if_errored(self):
|
||||
# don't run immediately, wait for all logging tasks to finish.
|
||||
await asyncio.sleep(0)
|
||||
if self.logger.has_errored:
|
||||
plural = "s" if len(self.logger.has_errored) > 1 else ""
|
||||
if self.repeat_errors_on_stderr:
|
||||
message = f"Error{plural} logged during startup:"
|
||||
if vt_codes.ensure_supported(sys.stderr): # pragma: no cover
|
||||
message = miniclick.style(message, fg="red")
|
||||
details = "\n".join(
|
||||
self.logger.format(r) for r in self.logger.has_errored
|
||||
)
|
||||
print(f"{message}\n{details}", file=sys.stderr)
|
||||
else:
|
||||
print(
|
||||
f"Error{plural} logged during startup, exiting...", file=sys.stderr
|
||||
)
|
||||
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
class ErrorCheckHandler(log.MitmLogHandler):
|
||||
def __init__(self) -> None:
|
||||
super().__init__(logging.ERROR)
|
||||
self.has_errored: list[logging.LogRecord] = []
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
self.has_errored.append(record)
|
||||
56
venv/Lib/site-packages/mitmproxy/addons/eventstore.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import asyncio
|
||||
import collections
|
||||
import logging
|
||||
from collections.abc import Callable
|
||||
|
||||
from mitmproxy import command
|
||||
from mitmproxy import log
|
||||
from mitmproxy.log import LogEntry
|
||||
from mitmproxy.utils import signals
|
||||
|
||||
|
||||
class EventStore:
|
||||
def __init__(self, size: int = 10000) -> None:
|
||||
self.data: collections.deque[LogEntry] = collections.deque(maxlen=size)
|
||||
self.sig_add = signals.SyncSignal(lambda entry: None)
|
||||
self.sig_refresh = signals.SyncSignal(lambda: None)
|
||||
|
||||
self.logger = CallbackLogger(self._add_log)
|
||||
self.logger.install()
|
||||
|
||||
def done(self):
|
||||
self.logger.uninstall()
|
||||
|
||||
def _add_log(self, entry: LogEntry) -> None:
|
||||
self.data.append(entry)
|
||||
self.sig_add.send(entry)
|
||||
|
||||
@property
|
||||
def size(self) -> int | None:
|
||||
return self.data.maxlen
|
||||
|
||||
@command.command("eventstore.clear")
|
||||
def clear(self) -> None:
|
||||
"""
|
||||
Clear the event log.
|
||||
"""
|
||||
self.data.clear()
|
||||
self.sig_refresh.send()
|
||||
|
||||
|
||||
class CallbackLogger(log.MitmLogHandler):
|
||||
def __init__(
|
||||
self,
|
||||
callback: Callable[[LogEntry], None],
|
||||
):
|
||||
super().__init__()
|
||||
self.callback = callback
|
||||
self.event_loop = asyncio.get_running_loop()
|
||||
self.formatter = log.MitmFormatter(colorize=False)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
entry = LogEntry(
|
||||
msg=self.format(record),
|
||||
level=log.LOGGING_LEVELS_TO_LOGENTRY.get(record.levelno, "error"),
|
||||
)
|
||||
self.event_loop.call_soon_threadsafe(self.callback, entry)
|
||||
232
venv/Lib/site-packages/mitmproxy/addons/export.py
Normal file
@@ -0,0 +1,232 @@
|
||||
import logging
|
||||
import shlex
|
||||
from collections.abc import Callable
|
||||
from collections.abc import Sequence
|
||||
|
||||
import pyperclip
|
||||
|
||||
import mitmproxy.types
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import http
|
||||
from mitmproxy.net.http.http1 import assemble
|
||||
from mitmproxy.utils import strutils
|
||||
|
||||
|
||||
def cleanup_request(f: flow.Flow) -> http.Request:
|
||||
if not getattr(f, "request", None):
|
||||
raise exceptions.CommandError("Can't export flow with no request.")
|
||||
assert isinstance(f, http.HTTPFlow)
|
||||
request = f.request.copy()
|
||||
request.decode(strict=False)
|
||||
return request
|
||||
|
||||
|
||||
def pop_headers(request: http.Request) -> None:
|
||||
"""Remove some headers that are redundant for curl/httpie export."""
|
||||
request.headers.pop("content-length", None)
|
||||
|
||||
if request.headers.get("host", "") == request.host:
|
||||
request.headers.pop("host")
|
||||
if request.headers.get(":authority", "") == request.host:
|
||||
request.headers.pop(":authority")
|
||||
|
||||
|
||||
def cleanup_response(f: flow.Flow) -> http.Response:
|
||||
if not getattr(f, "response", None):
|
||||
raise exceptions.CommandError("Can't export flow with no response.")
|
||||
assert isinstance(f, http.HTTPFlow)
|
||||
response = f.response.copy() # type: ignore
|
||||
response.decode(strict=False)
|
||||
return response
|
||||
|
||||
|
||||
def request_content_for_console(request: http.Request) -> str:
|
||||
try:
|
||||
text = request.get_text(strict=True)
|
||||
assert text
|
||||
except ValueError:
|
||||
# shlex.quote doesn't support a bytes object
|
||||
# see https://github.com/python/cpython/pull/10871
|
||||
raise exceptions.CommandError("Request content must be valid unicode")
|
||||
escape_control_chars = {chr(i): f"\\x{i:02x}" for i in range(32)}
|
||||
escaped_text = "".join(escape_control_chars.get(x, x) for x in text)
|
||||
if any(char in escape_control_chars for char in text):
|
||||
# Escaped chars need to be unescaped by the shell to be properly inperpreted by curl and httpie
|
||||
return f'"$(printf {shlex.quote(escaped_text)})"'
|
||||
|
||||
return shlex.quote(escaped_text)
|
||||
|
||||
|
||||
def curl_command(f: flow.Flow) -> str:
|
||||
request = cleanup_request(f)
|
||||
pop_headers(request)
|
||||
|
||||
args = ["curl"]
|
||||
|
||||
server_addr = f.server_conn.peername[0] if f.server_conn.peername else None
|
||||
|
||||
if (
|
||||
ctx.options.export_preserve_original_ip
|
||||
and server_addr
|
||||
and request.pretty_host != server_addr
|
||||
):
|
||||
resolve = f"{request.pretty_host}:{request.port}:[{server_addr}]"
|
||||
args.append("--resolve")
|
||||
args.append(resolve)
|
||||
|
||||
for k, v in request.headers.items(multi=True):
|
||||
if k.lower() == "accept-encoding":
|
||||
args.append("--compressed")
|
||||
else:
|
||||
args += ["-H", f"{k}: {v}"]
|
||||
|
||||
if request.method != "GET":
|
||||
if not request.content:
|
||||
# curl will not calculate content-length if there is no content
|
||||
# some server/verb combinations require content-length headers
|
||||
# (ex. nginx and POST)
|
||||
args += ["-H", "content-length: 0"]
|
||||
|
||||
args += ["-X", request.method]
|
||||
|
||||
args.append(request.pretty_url)
|
||||
|
||||
command = " ".join(shlex.quote(arg) for arg in args)
|
||||
if request.content:
|
||||
command += f" -d {request_content_for_console(request)}"
|
||||
return command
|
||||
|
||||
|
||||
def httpie_command(f: flow.Flow) -> str:
|
||||
request = cleanup_request(f)
|
||||
pop_headers(request)
|
||||
|
||||
# TODO: Once https://github.com/httpie/httpie/issues/414 is implemented, we
|
||||
# should ensure we always connect to the IP address specified in the flow,
|
||||
# similar to how it's done in curl_command.
|
||||
url = request.pretty_url
|
||||
|
||||
args = ["http", request.method, url]
|
||||
for k, v in request.headers.items(multi=True):
|
||||
args.append(f"{k}: {v}")
|
||||
cmd = " ".join(shlex.quote(arg) for arg in args)
|
||||
if request.content:
|
||||
cmd += " <<< " + request_content_for_console(request)
|
||||
return cmd
|
||||
|
||||
|
||||
def raw_request(f: flow.Flow) -> bytes:
|
||||
request = cleanup_request(f)
|
||||
if request.raw_content is None:
|
||||
raise exceptions.CommandError("Request content missing.")
|
||||
return assemble.assemble_request(request)
|
||||
|
||||
|
||||
def raw_response(f: flow.Flow) -> bytes:
|
||||
response = cleanup_response(f)
|
||||
if response.raw_content is None:
|
||||
raise exceptions.CommandError("Response content missing.")
|
||||
return assemble.assemble_response(response)
|
||||
|
||||
|
||||
def raw(f: flow.Flow, separator=b"\r\n\r\n") -> bytes:
|
||||
"""Return either the request or response if only one exists, otherwise return both"""
|
||||
request_present = (
|
||||
isinstance(f, http.HTTPFlow) and f.request and f.request.raw_content is not None
|
||||
)
|
||||
response_present = (
|
||||
isinstance(f, http.HTTPFlow)
|
||||
and f.response
|
||||
and f.response.raw_content is not None
|
||||
)
|
||||
|
||||
if request_present and response_present:
|
||||
parts = [raw_request(f), raw_response(f)]
|
||||
if isinstance(f, http.HTTPFlow) and f.websocket:
|
||||
parts.append(f.websocket._get_formatted_messages())
|
||||
return separator.join(parts)
|
||||
elif request_present:
|
||||
return raw_request(f)
|
||||
elif response_present:
|
||||
return raw_response(f)
|
||||
else:
|
||||
raise exceptions.CommandError("Can't export flow with no request or response.")
|
||||
|
||||
|
||||
formats: dict[str, Callable[[flow.Flow], str | bytes]] = dict(
|
||||
curl=curl_command,
|
||||
httpie=httpie_command,
|
||||
raw=raw,
|
||||
raw_request=raw_request,
|
||||
raw_response=raw_response,
|
||||
)
|
||||
|
||||
|
||||
class Export:
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"export_preserve_original_ip",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
When exporting a request as an external command, make an effort to
|
||||
connect to the same IP as in the original request. This helps with
|
||||
reproducibility in cases where the behaviour depends on the
|
||||
particular host we are connecting to. Currently this only affects
|
||||
curl exports.
|
||||
""",
|
||||
)
|
||||
|
||||
@command.command("export.formats")
|
||||
def formats(self) -> Sequence[str]:
|
||||
"""
|
||||
Return a list of the supported export formats.
|
||||
"""
|
||||
return list(sorted(formats.keys()))
|
||||
|
||||
@command.command("export.file")
|
||||
def file(self, format: str, flow: flow.Flow, path: mitmproxy.types.Path) -> None:
|
||||
"""
|
||||
Export a flow to path.
|
||||
"""
|
||||
if format not in formats:
|
||||
raise exceptions.CommandError("No such export format: %s" % format)
|
||||
v = formats[format](flow)
|
||||
try:
|
||||
with open(path, "wb") as fp:
|
||||
if isinstance(v, bytes):
|
||||
fp.write(v)
|
||||
else:
|
||||
fp.write(v.encode("utf-8", "surrogateescape"))
|
||||
except OSError as e:
|
||||
logging.error(str(e))
|
||||
|
||||
@command.command("export.clip")
|
||||
def clip(self, format: str, f: flow.Flow) -> None:
|
||||
"""
|
||||
Export a flow to the system clipboard.
|
||||
"""
|
||||
content = self.export_str(format, f)
|
||||
try:
|
||||
pyperclip.copy(content)
|
||||
except pyperclip.PyperclipException as e:
|
||||
logging.error(str(e))
|
||||
|
||||
@command.command("export")
|
||||
def export_str(self, format: str, f: flow.Flow) -> str:
|
||||
"""
|
||||
Export a flow and return the result.
|
||||
"""
|
||||
if format not in formats:
|
||||
raise exceptions.CommandError("No such export format: %s" % format)
|
||||
|
||||
content = formats[format](f)
|
||||
# The individual formatters may return surrogate-escaped UTF-8, but that may blow up in later steps.
|
||||
# For example, pyperclip on macOS does not like surrogates.
|
||||
# To fix this, We first surrogate-encode and then backslash-decode.
|
||||
content = strutils.always_bytes(content, "utf8", "surrogateescape")
|
||||
content = strutils.always_str(content, "utf8", "backslashreplace")
|
||||
return content
|
||||
63
venv/Lib/site-packages/mitmproxy/addons/intercept.py
Normal file
@@ -0,0 +1,63 @@
|
||||
from typing import Optional
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import flowfilter
|
||||
|
||||
|
||||
class Intercept:
|
||||
filt: flowfilter.TFilter | None = None
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option("intercept_active", bool, False, "Intercept toggle")
|
||||
loader.add_option(
|
||||
"intercept", Optional[str], None, "Intercept filter expression."
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "intercept" in updated:
|
||||
if ctx.options.intercept:
|
||||
try:
|
||||
self.filt = flowfilter.parse(ctx.options.intercept)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
ctx.options.intercept_active = True
|
||||
else:
|
||||
self.filt = None
|
||||
ctx.options.intercept_active = False
|
||||
|
||||
def should_intercept(self, f: flow.Flow) -> bool:
|
||||
return bool(
|
||||
ctx.options.intercept_active
|
||||
and self.filt
|
||||
and self.filt(f)
|
||||
and not f.is_replay
|
||||
)
|
||||
|
||||
def process_flow(self, f: flow.Flow) -> None:
|
||||
if self.should_intercept(f):
|
||||
f.intercept()
|
||||
|
||||
# Handlers
|
||||
|
||||
def request(self, f):
|
||||
self.process_flow(f)
|
||||
|
||||
def response(self, f):
|
||||
self.process_flow(f)
|
||||
|
||||
def tcp_message(self, f):
|
||||
self.process_flow(f)
|
||||
|
||||
def udp_message(self, f):
|
||||
self.process_flow(f)
|
||||
|
||||
def dns_request(self, f):
|
||||
self.process_flow(f)
|
||||
|
||||
def dns_response(self, f):
|
||||
self.process_flow(f)
|
||||
|
||||
def websocket_message(self, f):
|
||||
self.process_flow(f)
|
||||
54
venv/Lib/site-packages/mitmproxy/addons/keepserving.py
Normal file
@@ -0,0 +1,54 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy.utils import asyncio_utils
|
||||
|
||||
|
||||
class KeepServing:
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"keepserving",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
Continue serving after client playback, server playback or file
|
||||
read. This option is ignored by interactive tools, which always keep
|
||||
serving.
|
||||
""",
|
||||
)
|
||||
|
||||
def keepgoing(self) -> bool:
|
||||
# Checking for proxyserver.active_connections is important for server replay,
|
||||
# the addon may report that replay is finished but not the entire response has been sent yet.
|
||||
# (https://github.com/mitmproxy/mitmproxy/issues/7569)
|
||||
checks = [
|
||||
"readfile.reading",
|
||||
"replay.client.count",
|
||||
"replay.server.count",
|
||||
"proxyserver.active_connections",
|
||||
]
|
||||
return any([ctx.master.commands.call(c) for c in checks])
|
||||
|
||||
def shutdown(self): # pragma: no cover
|
||||
ctx.master.shutdown()
|
||||
|
||||
async def watch(self):
|
||||
while True:
|
||||
await asyncio.sleep(0.1)
|
||||
if not self.keepgoing():
|
||||
self.shutdown()
|
||||
|
||||
def running(self):
|
||||
opts = [
|
||||
ctx.options.client_replay,
|
||||
ctx.options.server_replay,
|
||||
ctx.options.rfile,
|
||||
]
|
||||
if any(opts) and not ctx.options.keepserving:
|
||||
asyncio_utils.create_task(
|
||||
self.watch(),
|
||||
name="keepserving",
|
||||
keep_ref=True,
|
||||
)
|
||||
151
venv/Lib/site-packages/mitmproxy/addons/maplocal.py
Normal file
@@ -0,0 +1,151 @@
|
||||
import logging
|
||||
import mimetypes
|
||||
import re
|
||||
import urllib.parse
|
||||
from collections.abc import Sequence
|
||||
from pathlib import Path
|
||||
from typing import NamedTuple
|
||||
|
||||
from werkzeug.security import safe_join
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import http
|
||||
from mitmproxy import version
|
||||
from mitmproxy.utils.spec import parse_spec
|
||||
|
||||
|
||||
class MapLocalSpec(NamedTuple):
|
||||
matches: flowfilter.TFilter
|
||||
regex: str
|
||||
local_path: Path
|
||||
|
||||
|
||||
def parse_map_local_spec(option: str) -> MapLocalSpec:
|
||||
filter, regex, replacement = parse_spec(option)
|
||||
|
||||
try:
|
||||
re.compile(regex)
|
||||
except re.error as e:
|
||||
raise ValueError(f"Invalid regular expression {regex!r} ({e})")
|
||||
|
||||
try:
|
||||
path = Path(replacement).expanduser().resolve(strict=True)
|
||||
except FileNotFoundError as e:
|
||||
raise ValueError(f"Invalid file path: {replacement} ({e})")
|
||||
|
||||
return MapLocalSpec(filter, regex, path)
|
||||
|
||||
|
||||
def _safe_path_join(root: Path, untrusted: str) -> Path:
|
||||
"""Join a Path element with an untrusted str.
|
||||
|
||||
This is a convenience wrapper for werkzeug's safe_join,
|
||||
raising a ValueError if the path is malformed."""
|
||||
untrusted_parts = Path(untrusted).parts
|
||||
joined = safe_join(root.as_posix(), *untrusted_parts)
|
||||
if joined is None:
|
||||
raise ValueError("Untrusted paths.")
|
||||
return Path(joined)
|
||||
|
||||
|
||||
def file_candidates(url: str, spec: MapLocalSpec) -> list[Path]:
|
||||
"""
|
||||
Get all potential file candidates given a URL and a mapping spec ordered by preference.
|
||||
This function already assumes that the spec regex matches the URL.
|
||||
"""
|
||||
m = re.search(spec.regex, url)
|
||||
assert m
|
||||
if m.groups():
|
||||
suffix = m.group(1)
|
||||
else:
|
||||
suffix = re.split(spec.regex, url, maxsplit=1)[1]
|
||||
suffix = suffix.split("?")[0] # remove query string
|
||||
suffix = suffix.strip("/")
|
||||
|
||||
if suffix:
|
||||
decoded_suffix = urllib.parse.unquote(suffix)
|
||||
suffix_candidates = [decoded_suffix, f"{decoded_suffix}/index.html"]
|
||||
|
||||
escaped_suffix = re.sub(r"[^0-9a-zA-Z\-_.=(),/]", "_", decoded_suffix)
|
||||
if decoded_suffix != escaped_suffix:
|
||||
suffix_candidates.extend([escaped_suffix, f"{escaped_suffix}/index.html"])
|
||||
try:
|
||||
return [_safe_path_join(spec.local_path, x) for x in suffix_candidates]
|
||||
except ValueError:
|
||||
return []
|
||||
else:
|
||||
return [spec.local_path / "index.html"]
|
||||
|
||||
|
||||
class MapLocal:
|
||||
def __init__(self) -> None:
|
||||
self.replacements: list[MapLocalSpec] = []
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"map_local",
|
||||
Sequence[str],
|
||||
[],
|
||||
"""
|
||||
Map remote resources to a local file using a pattern of the form
|
||||
"[/flow-filter]/url-regex/file-or-directory-path", where the
|
||||
separator can be any character.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "map_local" in updated:
|
||||
self.replacements = []
|
||||
for option in ctx.options.map_local:
|
||||
try:
|
||||
spec = parse_map_local_spec(option)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(
|
||||
f"Cannot parse map_local option {option}: {e}"
|
||||
) from e
|
||||
|
||||
self.replacements.append(spec)
|
||||
|
||||
def request(self, flow: http.HTTPFlow) -> None:
|
||||
if flow.response or flow.error or not flow.live:
|
||||
return
|
||||
|
||||
url = flow.request.pretty_url
|
||||
|
||||
all_candidates = []
|
||||
for spec in self.replacements:
|
||||
if spec.matches(flow) and re.search(spec.regex, url):
|
||||
if spec.local_path.is_file():
|
||||
candidates = [spec.local_path]
|
||||
else:
|
||||
candidates = file_candidates(url, spec)
|
||||
all_candidates.extend(candidates)
|
||||
|
||||
local_file = None
|
||||
for candidate in candidates:
|
||||
if candidate.is_file():
|
||||
local_file = candidate
|
||||
break
|
||||
|
||||
if local_file:
|
||||
headers = {"Server": version.MITMPROXY}
|
||||
mimetype = mimetypes.guess_type(str(local_file))[0]
|
||||
if mimetype:
|
||||
headers["Content-Type"] = mimetype
|
||||
|
||||
try:
|
||||
contents = local_file.read_bytes()
|
||||
except OSError as e:
|
||||
logging.warning(f"Could not read file: {e}")
|
||||
continue
|
||||
|
||||
flow.response = http.Response.make(200, contents, headers)
|
||||
# only set flow.response once, for the first matching rule
|
||||
return
|
||||
if all_candidates:
|
||||
flow.response = http.Response.make(404)
|
||||
logging.info(
|
||||
f"None of the local file candidates exist: {', '.join(str(x) for x in all_candidates)}"
|
||||
)
|
||||
68
venv/Lib/site-packages/mitmproxy/addons/mapremote.py
Normal file
@@ -0,0 +1,68 @@
|
||||
import re
|
||||
from collections.abc import Sequence
|
||||
from typing import NamedTuple
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import http
|
||||
from mitmproxy.utils.spec import parse_spec
|
||||
|
||||
|
||||
class MapRemoteSpec(NamedTuple):
|
||||
matches: flowfilter.TFilter
|
||||
subject: str
|
||||
replacement: str
|
||||
|
||||
|
||||
def parse_map_remote_spec(option: str) -> MapRemoteSpec:
|
||||
spec = MapRemoteSpec(*parse_spec(option))
|
||||
|
||||
try:
|
||||
re.compile(spec.subject)
|
||||
except re.error as e:
|
||||
raise ValueError(f"Invalid regular expression {spec.subject!r} ({e})")
|
||||
|
||||
return spec
|
||||
|
||||
|
||||
class MapRemote:
|
||||
def __init__(self) -> None:
|
||||
self.replacements: list[MapRemoteSpec] = []
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"map_remote",
|
||||
Sequence[str],
|
||||
[],
|
||||
"""
|
||||
Map remote resources to another remote URL using a pattern of the form
|
||||
"[/flow-filter]/url-regex/replacement", where the separator can
|
||||
be any character.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "map_remote" in updated:
|
||||
self.replacements = []
|
||||
for option in ctx.options.map_remote:
|
||||
try:
|
||||
spec = parse_map_remote_spec(option)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(
|
||||
f"Cannot parse map_remote option {option}: {e}"
|
||||
) from e
|
||||
|
||||
self.replacements.append(spec)
|
||||
|
||||
def request(self, flow: http.HTTPFlow) -> None:
|
||||
if flow.response or flow.error or not flow.live:
|
||||
return
|
||||
for spec in self.replacements:
|
||||
if spec.matches(flow):
|
||||
url = flow.request.pretty_url
|
||||
new_url = re.sub(spec.subject, spec.replacement, url)
|
||||
# this is a bit messy: setting .url also updates the host header,
|
||||
# so we really only do that if the replacement affected the URL.
|
||||
if url != new_url:
|
||||
flow.request.url = new_url # type: ignore
|
||||
83
venv/Lib/site-packages/mitmproxy/addons/modifybody.py
Normal file
@@ -0,0 +1,83 @@
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import Sequence
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy.addons.modifyheaders import ModifySpec
|
||||
from mitmproxy.addons.modifyheaders import parse_modify_spec
|
||||
from mitmproxy.log import ALERT
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ModifyBody:
|
||||
def __init__(self) -> None:
|
||||
self.replacements: list[ModifySpec] = []
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"modify_body",
|
||||
Sequence[str],
|
||||
[],
|
||||
"""
|
||||
Replacement pattern of the form "[/flow-filter]/regex/[@]replacement", where
|
||||
the separator can be any character. The @ allows to provide a file path that
|
||||
is used to read the replacement string.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "modify_body" in updated:
|
||||
self.replacements = []
|
||||
for option in ctx.options.modify_body:
|
||||
try:
|
||||
spec = parse_modify_spec(option, True)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(
|
||||
f"Cannot parse modify_body option {option}: {e}"
|
||||
) from e
|
||||
|
||||
self.replacements.append(spec)
|
||||
|
||||
stream_and_modify_conflict = (
|
||||
ctx.options.modify_body
|
||||
and ctx.options.stream_large_bodies
|
||||
and ("modify_body" in updated or "stream_large_bodies" in updated)
|
||||
)
|
||||
if stream_and_modify_conflict:
|
||||
logger.log(
|
||||
ALERT,
|
||||
"Both modify_body and stream_large_bodies are active. "
|
||||
"Streamed bodies will not be modified.",
|
||||
)
|
||||
|
||||
def request(self, flow):
|
||||
if flow.response or flow.error or not flow.live:
|
||||
return
|
||||
self.run(flow)
|
||||
|
||||
def response(self, flow):
|
||||
if flow.error or not flow.live:
|
||||
return
|
||||
self.run(flow)
|
||||
|
||||
def run(self, flow):
|
||||
for spec in self.replacements:
|
||||
if spec.matches(flow):
|
||||
try:
|
||||
replacement = spec.read_replacement()
|
||||
except OSError as e:
|
||||
logging.warning(f"Could not read replacement file: {e}")
|
||||
continue
|
||||
if flow.response:
|
||||
flow.response.content = re.sub(
|
||||
spec.subject,
|
||||
replacement,
|
||||
flow.response.content,
|
||||
flags=re.DOTALL,
|
||||
)
|
||||
else:
|
||||
flow.request.content = re.sub(
|
||||
spec.subject, replacement, flow.request.content, flags=re.DOTALL
|
||||
)
|
||||
117
venv/Lib/site-packages/mitmproxy/addons/modifyheaders.py
Normal file
@@ -0,0 +1,117 @@
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import Sequence
|
||||
from pathlib import Path
|
||||
from typing import NamedTuple
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import http
|
||||
from mitmproxy.http import Headers
|
||||
from mitmproxy.utils import strutils
|
||||
from mitmproxy.utils.spec import parse_spec
|
||||
|
||||
|
||||
class ModifySpec(NamedTuple):
|
||||
matches: flowfilter.TFilter
|
||||
subject: bytes
|
||||
replacement_str: str
|
||||
|
||||
def read_replacement(self) -> bytes:
|
||||
"""
|
||||
Process the replacement str. This usually just involves converting it to bytes.
|
||||
However, if it starts with `@`, we interpret the rest as a file path to read from.
|
||||
|
||||
Raises:
|
||||
- IOError if the file cannot be read.
|
||||
"""
|
||||
if self.replacement_str.startswith("@"):
|
||||
return Path(self.replacement_str[1:]).expanduser().read_bytes()
|
||||
else:
|
||||
# We could cache this at some point, but unlikely to be a problem.
|
||||
return strutils.escaped_str_to_bytes(self.replacement_str)
|
||||
|
||||
|
||||
def parse_modify_spec(option: str, subject_is_regex: bool) -> ModifySpec:
|
||||
flow_filter, subject_str, replacement = parse_spec(option)
|
||||
|
||||
subject = strutils.escaped_str_to_bytes(subject_str)
|
||||
if subject_is_regex:
|
||||
try:
|
||||
re.compile(subject)
|
||||
except re.error as e:
|
||||
raise ValueError(f"Invalid regular expression {subject!r} ({e})")
|
||||
|
||||
spec = ModifySpec(flow_filter, subject, replacement)
|
||||
|
||||
try:
|
||||
spec.read_replacement()
|
||||
except OSError as e:
|
||||
raise ValueError(f"Invalid file path: {replacement[1:]} ({e})")
|
||||
|
||||
return spec
|
||||
|
||||
|
||||
class ModifyHeaders:
|
||||
def __init__(self) -> None:
|
||||
self.replacements: list[ModifySpec] = []
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"modify_headers",
|
||||
Sequence[str],
|
||||
[],
|
||||
"""
|
||||
Header modify pattern of the form "[/flow-filter]/header-name/[@]header-value", where the
|
||||
separator can be any character. The @ allows to provide a file path that is used to read
|
||||
the header value string. An empty header-value removes existing header-name headers.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "modify_headers" in updated:
|
||||
self.replacements = []
|
||||
for option in ctx.options.modify_headers:
|
||||
try:
|
||||
spec = parse_modify_spec(option, False)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(
|
||||
f"Cannot parse modify_headers option {option}: {e}"
|
||||
) from e
|
||||
self.replacements.append(spec)
|
||||
|
||||
def requestheaders(self, flow):
|
||||
if flow.response or flow.error or not flow.live:
|
||||
return
|
||||
self.run(flow, flow.request.headers)
|
||||
|
||||
def responseheaders(self, flow):
|
||||
if flow.error or not flow.live:
|
||||
return
|
||||
self.run(flow, flow.response.headers)
|
||||
|
||||
def run(self, flow: http.HTTPFlow, hdrs: Headers) -> None:
|
||||
matches = []
|
||||
|
||||
# first check all the filters against the original, unmodified flow
|
||||
for spec in self.replacements:
|
||||
matches.append(spec.matches(flow))
|
||||
|
||||
# unset all specified headers
|
||||
for i, spec in enumerate(self.replacements):
|
||||
if matches[i]:
|
||||
hdrs.pop(spec.subject, None)
|
||||
|
||||
# set all specified headers if the replacement string is not empty
|
||||
|
||||
for i, spec in enumerate(self.replacements):
|
||||
if matches[i]:
|
||||
try:
|
||||
replacement = spec.read_replacement()
|
||||
except OSError as e:
|
||||
logging.warning(f"Could not read replacement file: {e}")
|
||||
continue
|
||||
else:
|
||||
if replacement:
|
||||
hdrs.add(spec.subject, replacement)
|
||||
474
venv/Lib/site-packages/mitmproxy/addons/next_layer.py
Normal file
@@ -0,0 +1,474 @@
|
||||
"""
|
||||
This addon determines the next protocol layer in our proxy stack.
|
||||
Whenever a protocol layer in the proxy wants to pass a connection to a child layer and isn't sure which protocol comes
|
||||
next, it calls the `next_layer` hook, which ends up here.
|
||||
For example, if mitmproxy runs as a regular proxy, we first need to determine if
|
||||
new clients start with a TLS handshake right away (Secure Web Proxy) or send a plaintext HTTP CONNECT request.
|
||||
This addon here peeks at the incoming bytes and then makes a decision based on proxy mode, mitmproxy options, etc.
|
||||
|
||||
For a typical HTTPS request, this addon is called a couple of times: First to determine that we start with an HTTP layer
|
||||
which processes the `CONNECT` request, a second time to determine that the client then starts negotiating TLS, and a
|
||||
third time when we check if the protocol within that TLS stream is actually HTTP or something else.
|
||||
|
||||
Sometimes it's useful to hardcode specific logic in next_layer when one wants to do fancy things.
|
||||
In that case it's not necessary to modify mitmproxy's source, adding a custom addon with a next_layer event hook
|
||||
that sets nextlayer.layer works just as well.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import re
|
||||
import sys
|
||||
from collections.abc import Iterable
|
||||
from collections.abc import Sequence
|
||||
from typing import Any
|
||||
from typing import cast
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy.connection import Address
|
||||
from mitmproxy.net.tls import starts_like_dtls_record
|
||||
from mitmproxy.net.tls import starts_like_tls_record
|
||||
from mitmproxy.proxy import layer
|
||||
from mitmproxy.proxy import layers
|
||||
from mitmproxy.proxy import mode_specs
|
||||
from mitmproxy.proxy import tunnel
|
||||
from mitmproxy.proxy.context import Context
|
||||
from mitmproxy.proxy.layer import Layer
|
||||
from mitmproxy.proxy.layers import ClientQuicLayer
|
||||
from mitmproxy.proxy.layers import ClientTLSLayer
|
||||
from mitmproxy.proxy.layers import DNSLayer
|
||||
from mitmproxy.proxy.layers import HttpLayer
|
||||
from mitmproxy.proxy.layers import modes
|
||||
from mitmproxy.proxy.layers import RawQuicLayer
|
||||
from mitmproxy.proxy.layers import ServerQuicLayer
|
||||
from mitmproxy.proxy.layers import ServerTLSLayer
|
||||
from mitmproxy.proxy.layers import TCPLayer
|
||||
from mitmproxy.proxy.layers import UDPLayer
|
||||
from mitmproxy.proxy.layers.http import HTTPMode
|
||||
from mitmproxy.proxy.layers.quic import quic_parse_client_hello_from_datagrams
|
||||
from mitmproxy.proxy.layers.tls import dtls_parse_client_hello
|
||||
from mitmproxy.proxy.layers.tls import HTTP_ALPNS
|
||||
from mitmproxy.proxy.layers.tls import parse_client_hello
|
||||
from mitmproxy.tls import ClientHello
|
||||
|
||||
if sys.version_info < (3, 11):
|
||||
from typing_extensions import assert_never
|
||||
else:
|
||||
from typing import assert_never
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def stack_match(
|
||||
context: Context, layers: Sequence[type[Layer] | tuple[type[Layer], ...]]
|
||||
) -> bool:
|
||||
if len(context.layers) != len(layers):
|
||||
return False
|
||||
return all(
|
||||
expected is Any or isinstance(actual, expected)
|
||||
for actual, expected in zip(context.layers, layers)
|
||||
)
|
||||
|
||||
|
||||
class NeedsMoreData(Exception):
|
||||
"""Signal that the decision on which layer to put next needs to be deferred within the NextLayer addon."""
|
||||
|
||||
|
||||
class NextLayer:
|
||||
ignore_hosts: Sequence[re.Pattern] = ()
|
||||
allow_hosts: Sequence[re.Pattern] = ()
|
||||
tcp_hosts: Sequence[re.Pattern] = ()
|
||||
udp_hosts: Sequence[re.Pattern] = ()
|
||||
|
||||
def configure(self, updated):
|
||||
if "tcp_hosts" in updated:
|
||||
self.tcp_hosts = [
|
||||
re.compile(x, re.IGNORECASE) for x in ctx.options.tcp_hosts
|
||||
]
|
||||
if "udp_hosts" in updated:
|
||||
self.udp_hosts = [
|
||||
re.compile(x, re.IGNORECASE) for x in ctx.options.udp_hosts
|
||||
]
|
||||
if "allow_hosts" in updated or "ignore_hosts" in updated:
|
||||
self.ignore_hosts = [
|
||||
re.compile(x, re.IGNORECASE) for x in ctx.options.ignore_hosts
|
||||
]
|
||||
self.allow_hosts = [
|
||||
re.compile(x, re.IGNORECASE) for x in ctx.options.allow_hosts
|
||||
]
|
||||
|
||||
def next_layer(self, nextlayer: layer.NextLayer):
|
||||
if nextlayer.layer:
|
||||
return # do not override something another addon has set.
|
||||
try:
|
||||
nextlayer.layer = self._next_layer(
|
||||
nextlayer.context,
|
||||
nextlayer.data_client(),
|
||||
nextlayer.data_server(),
|
||||
)
|
||||
except NeedsMoreData:
|
||||
logger.debug(
|
||||
f"Deferring layer decision, not enough data: {nextlayer.data_client().hex()!r}"
|
||||
)
|
||||
|
||||
def _next_layer(
|
||||
self, context: Context, data_client: bytes, data_server: bytes
|
||||
) -> Layer | None:
|
||||
assert context.layers
|
||||
|
||||
def s(*layers):
|
||||
return stack_match(context, layers)
|
||||
|
||||
tcp_based = context.client.transport_protocol == "tcp"
|
||||
udp_based = context.client.transport_protocol == "udp"
|
||||
|
||||
# 1) check for --ignore/--allow
|
||||
if self._ignore_connection(context, data_client, data_server):
|
||||
return (
|
||||
layers.TCPLayer(context, ignore=not ctx.options.show_ignored_hosts)
|
||||
if tcp_based
|
||||
else layers.UDPLayer(context, ignore=not ctx.options.show_ignored_hosts)
|
||||
)
|
||||
|
||||
# 2) Handle proxy modes with well-defined next protocol
|
||||
# 2a) Reverse proxy: derive from spec
|
||||
if s(modes.ReverseProxy):
|
||||
return self._setup_reverse_proxy(context, data_client)
|
||||
# 2b) Explicit HTTP proxies
|
||||
if s((modes.HttpProxy, modes.HttpUpstreamProxy)):
|
||||
return self._setup_explicit_http_proxy(context, data_client)
|
||||
|
||||
# 3) Handle security protocols
|
||||
# 3a) TLS/DTLS
|
||||
is_tls_or_dtls = (
|
||||
tcp_based
|
||||
and starts_like_tls_record(data_client)
|
||||
or udp_based
|
||||
and starts_like_dtls_record(data_client)
|
||||
)
|
||||
if is_tls_or_dtls:
|
||||
server_tls = ServerTLSLayer(context)
|
||||
server_tls.child_layer = ClientTLSLayer(context)
|
||||
return server_tls
|
||||
# 3b) QUIC
|
||||
if udp_based and _starts_like_quic(data_client, context.server.address):
|
||||
server_quic = ServerQuicLayer(context)
|
||||
server_quic.child_layer = ClientQuicLayer(context)
|
||||
return server_quic
|
||||
|
||||
# 4) Check for --tcp/--udp
|
||||
if tcp_based and self._is_destination_in_hosts(context, self.tcp_hosts):
|
||||
return layers.TCPLayer(context)
|
||||
if udp_based and self._is_destination_in_hosts(context, self.udp_hosts):
|
||||
return layers.UDPLayer(context)
|
||||
|
||||
# 5) Handle application protocol
|
||||
# 5a) Do we have a known ALPN negotiation?
|
||||
if context.client.alpn:
|
||||
if context.client.alpn in HTTP_ALPNS:
|
||||
return layers.HttpLayer(context, HTTPMode.transparent)
|
||||
elif context.client.tls_version == "QUICv1":
|
||||
# TODO: Once we support more QUIC-based protocols, relax force_raw here.
|
||||
return layers.RawQuicLayer(context, force_raw=True)
|
||||
# 5b) Is it DNS?
|
||||
if context.server.address and context.server.address[1] in (53, 5353):
|
||||
return layers.DNSLayer(context)
|
||||
# 5c) We have no other specialized layers for UDP, so we fall back to raw forwarding.
|
||||
if udp_based:
|
||||
return layers.UDPLayer(context)
|
||||
# 5d) Check for raw tcp mode.
|
||||
probably_no_http = (
|
||||
# the first three bytes should be the HTTP verb, so A-Za-z is expected.
|
||||
len(data_client) < 3
|
||||
# HTTP would require whitespace...
|
||||
or b" " not in data_client
|
||||
# ...and that whitespace needs to be in the first line.
|
||||
or (data_client.find(b" ") > data_client.find(b"\n"))
|
||||
or not data_client[:3].isalpha()
|
||||
# a server greeting would be uncharacteristic.
|
||||
or data_server
|
||||
or data_client.startswith(b"SSH")
|
||||
)
|
||||
if ctx.options.rawtcp and probably_no_http:
|
||||
return layers.TCPLayer(context)
|
||||
# 5c) Assume HTTP by default.
|
||||
return layers.HttpLayer(context, HTTPMode.transparent)
|
||||
|
||||
def _ignore_connection(
|
||||
self,
|
||||
context: Context,
|
||||
data_client: bytes,
|
||||
data_server: bytes,
|
||||
) -> bool | None:
|
||||
"""
|
||||
Returns:
|
||||
True, if the connection should be ignored.
|
||||
False, if it should not be ignored.
|
||||
|
||||
Raises:
|
||||
NeedsMoreData, if we need to wait for more input data.
|
||||
"""
|
||||
if not ctx.options.ignore_hosts and not ctx.options.allow_hosts:
|
||||
return False
|
||||
# Special handling for wireguard mode: if the hostname is "10.0.0.53", do not ignore the connection
|
||||
if isinstance(
|
||||
context.client.proxy_mode, mode_specs.WireGuardMode
|
||||
) and context.server.address == ("10.0.0.53", 53):
|
||||
return False
|
||||
hostnames: list[str] = []
|
||||
if context.server.peername:
|
||||
host, port, *_ = context.server.peername
|
||||
hostnames.append(f"{host}:{port}")
|
||||
if context.server.address:
|
||||
host, port, *_ = context.server.address
|
||||
hostnames.append(f"{host}:{port}")
|
||||
|
||||
# We also want to check for TLS SNI and HTTP host headers, but in order to ignore connections based on that
|
||||
# they must have a destination address. If they don't, we don't know how to establish an upstream connection
|
||||
# if we ignore.
|
||||
if host_header := self._get_host_header(context, data_client, data_server):
|
||||
if not re.search(r":\d+$", host_header):
|
||||
host_header = f"{host_header}:{port}"
|
||||
hostnames.append(host_header)
|
||||
if (
|
||||
client_hello := self._get_client_hello(context, data_client)
|
||||
) and client_hello.sni:
|
||||
hostnames.append(f"{client_hello.sni}:{port}")
|
||||
if context.client.sni:
|
||||
# Hostname may be allowed, TLS is already established, and we have another next layer decision.
|
||||
hostnames.append(f"{context.client.sni}:{port}")
|
||||
|
||||
if not hostnames:
|
||||
return False
|
||||
|
||||
if ctx.options.allow_hosts:
|
||||
not_allowed = not any(
|
||||
re.search(rex, host, re.IGNORECASE)
|
||||
for host in hostnames
|
||||
for rex in ctx.options.allow_hosts
|
||||
)
|
||||
if not_allowed:
|
||||
return True
|
||||
|
||||
if ctx.options.ignore_hosts:
|
||||
ignored = any(
|
||||
re.search(rex, host, re.IGNORECASE)
|
||||
for host in hostnames
|
||||
for rex in ctx.options.ignore_hosts
|
||||
)
|
||||
if ignored:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def _get_host_header(
|
||||
context: Context,
|
||||
data_client: bytes,
|
||||
data_server: bytes,
|
||||
) -> str | None:
|
||||
"""
|
||||
Try to read a host header from data_client.
|
||||
|
||||
Returns:
|
||||
The host header value, or None, if no host header was found.
|
||||
|
||||
Raises:
|
||||
NeedsMoreData, if the HTTP request is incomplete.
|
||||
"""
|
||||
if context.client.transport_protocol != "tcp" or data_server:
|
||||
return None
|
||||
|
||||
host_header_expected = re.match(
|
||||
rb"[A-Z]{3,}.+HTTP/", data_client, re.IGNORECASE
|
||||
)
|
||||
if host_header_expected:
|
||||
if m := re.search(
|
||||
rb"\r\n(?:Host:\s+(.+?)\s*)?\r\n", data_client, re.IGNORECASE
|
||||
):
|
||||
if host := m.group(1):
|
||||
return host.decode("utf-8", "surrogateescape")
|
||||
else:
|
||||
return None # \r\n\r\n - header end came first.
|
||||
else:
|
||||
raise NeedsMoreData
|
||||
else:
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _get_client_hello(context: Context, data_client: bytes) -> ClientHello | None:
|
||||
"""
|
||||
Try to read a TLS/DTLS/QUIC ClientHello from data_client.
|
||||
|
||||
Returns:
|
||||
A complete ClientHello, or None, if no ClientHello was found.
|
||||
|
||||
Raises:
|
||||
NeedsMoreData, if the ClientHello is incomplete.
|
||||
"""
|
||||
match context.client.transport_protocol:
|
||||
case "tcp":
|
||||
if starts_like_tls_record(data_client):
|
||||
try:
|
||||
ch = parse_client_hello(data_client)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if ch is None:
|
||||
raise NeedsMoreData
|
||||
return ch
|
||||
return None
|
||||
case "udp":
|
||||
try:
|
||||
return quic_parse_client_hello_from_datagrams([data_client])
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
try:
|
||||
ch = dtls_parse_client_hello(data_client)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
if ch is None:
|
||||
raise NeedsMoreData
|
||||
return ch
|
||||
return None
|
||||
case _: # pragma: no cover
|
||||
assert_never(context.client.transport_protocol)
|
||||
|
||||
@staticmethod
|
||||
def _setup_reverse_proxy(context: Context, data_client: bytes) -> Layer:
|
||||
spec = cast(mode_specs.ReverseMode, context.client.proxy_mode)
|
||||
stack = tunnel.LayerStack()
|
||||
|
||||
match spec.scheme:
|
||||
case "http":
|
||||
if starts_like_tls_record(data_client):
|
||||
stack /= ClientTLSLayer(context)
|
||||
stack /= HttpLayer(context, HTTPMode.transparent)
|
||||
case "https":
|
||||
if context.client.transport_protocol == "udp":
|
||||
stack /= ServerQuicLayer(context)
|
||||
stack /= ClientQuicLayer(context)
|
||||
stack /= HttpLayer(context, HTTPMode.transparent)
|
||||
else:
|
||||
stack /= ServerTLSLayer(context)
|
||||
if starts_like_tls_record(data_client):
|
||||
stack /= ClientTLSLayer(context)
|
||||
stack /= HttpLayer(context, HTTPMode.transparent)
|
||||
|
||||
case "tcp":
|
||||
if starts_like_tls_record(data_client):
|
||||
stack /= ClientTLSLayer(context)
|
||||
stack /= TCPLayer(context)
|
||||
case "tls":
|
||||
stack /= ServerTLSLayer(context)
|
||||
if starts_like_tls_record(data_client):
|
||||
stack /= ClientTLSLayer(context)
|
||||
stack /= TCPLayer(context)
|
||||
|
||||
case "udp":
|
||||
if starts_like_dtls_record(data_client):
|
||||
stack /= ClientTLSLayer(context)
|
||||
stack /= UDPLayer(context)
|
||||
case "dtls":
|
||||
stack /= ServerTLSLayer(context)
|
||||
if starts_like_dtls_record(data_client):
|
||||
stack /= ClientTLSLayer(context)
|
||||
stack /= UDPLayer(context)
|
||||
|
||||
case "dns":
|
||||
# TODO: DNS-over-TLS / DNS-over-DTLS
|
||||
# is_tls_or_dtls = (
|
||||
# context.client.transport_protocol == "tcp" and starts_like_tls_record(data_client)
|
||||
# or
|
||||
# context.client.transport_protocol == "udp" and starts_like_dtls_record(data_client)
|
||||
# )
|
||||
# if is_tls_or_dtls:
|
||||
# stack /= ClientTLSLayer(context)
|
||||
stack /= DNSLayer(context)
|
||||
|
||||
case "http3":
|
||||
stack /= ServerQuicLayer(context)
|
||||
stack /= ClientQuicLayer(context)
|
||||
stack /= HttpLayer(context, HTTPMode.transparent)
|
||||
case "quic":
|
||||
stack /= ServerQuicLayer(context)
|
||||
stack /= ClientQuicLayer(context)
|
||||
stack /= RawQuicLayer(context, force_raw=True)
|
||||
|
||||
case _: # pragma: no cover
|
||||
assert_never(spec.scheme)
|
||||
|
||||
return stack[0]
|
||||
|
||||
@staticmethod
|
||||
def _setup_explicit_http_proxy(context: Context, data_client: bytes) -> Layer:
|
||||
stack = tunnel.LayerStack()
|
||||
|
||||
if context.client.transport_protocol == "udp":
|
||||
stack /= layers.ClientQuicLayer(context)
|
||||
elif starts_like_tls_record(data_client):
|
||||
stack /= layers.ClientTLSLayer(context)
|
||||
|
||||
if isinstance(context.layers[0], modes.HttpUpstreamProxy):
|
||||
stack /= layers.HttpLayer(context, HTTPMode.upstream)
|
||||
else:
|
||||
stack /= layers.HttpLayer(context, HTTPMode.regular)
|
||||
|
||||
return stack[0]
|
||||
|
||||
@staticmethod
|
||||
def _is_destination_in_hosts(context: Context, hosts: Iterable[re.Pattern]) -> bool:
|
||||
return any(
|
||||
(context.server.address and rex.search(context.server.address[0]))
|
||||
or (context.client.sni and rex.search(context.client.sni))
|
||||
for rex in hosts
|
||||
)
|
||||
|
||||
|
||||
# https://www.iana.org/assignments/quic/quic.xhtml
|
||||
KNOWN_QUIC_VERSIONS = {
|
||||
0x00000001, # QUIC v1
|
||||
0x51303433, # Google QUIC Q043
|
||||
0x51303436, # Google QUIC Q046
|
||||
0x51303530, # Google QUIC Q050
|
||||
0x6B3343CF, # QUIC v2
|
||||
0x709A50C4, # QUIC v2 draft codepoint
|
||||
}
|
||||
|
||||
TYPICAL_QUIC_PORTS = {80, 443, 8443}
|
||||
|
||||
|
||||
def _starts_like_quic(data_client: bytes, server_address: Address | None) -> bool:
|
||||
"""
|
||||
Make an educated guess on whether this could be QUIC.
|
||||
This turns out to be quite hard in practice as 1-RTT packets are hardly distinguishable from noise.
|
||||
|
||||
Returns:
|
||||
True, if the passed bytes could be the start of a QUIC packet.
|
||||
False, otherwise.
|
||||
"""
|
||||
# Minimum size: 1 flag byte + 1+ packet number bytes + 16+ bytes encrypted payload
|
||||
if len(data_client) < 18:
|
||||
return False
|
||||
if starts_like_dtls_record(data_client):
|
||||
return False
|
||||
# TODO: Add more checks here to detect true negatives.
|
||||
|
||||
# Long Header Packets
|
||||
if data_client[0] & 0x80:
|
||||
version = int.from_bytes(data_client[1:5], "big")
|
||||
if version in KNOWN_QUIC_VERSIONS:
|
||||
return True
|
||||
# https://www.rfc-editor.org/rfc/rfc9000.html#name-versions
|
||||
# Versions that follow the pattern 0x?a?a?a?a are reserved for use in forcing version negotiation
|
||||
if version & 0x0F0F0F0F == 0x0A0A0A0A:
|
||||
return True
|
||||
else:
|
||||
# ¯\_(ツ)_/¯
|
||||
# We can't even rely on the QUIC bit, see https://datatracker.ietf.org/doc/rfc9287/.
|
||||
pass
|
||||
|
||||
return bool(server_address and server_address[1] in TYPICAL_QUIC_PORTS)
|
||||
34
venv/Lib/site-packages/mitmproxy/addons/onboarding.py
Normal file
@@ -0,0 +1,34 @@
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy.addons import asgiapp
|
||||
from mitmproxy.addons.onboardingapp import app
|
||||
|
||||
APP_HOST = "mitm.it"
|
||||
|
||||
|
||||
class Onboarding(asgiapp.WSGIApp):
|
||||
name = "onboarding"
|
||||
|
||||
def __init__(self):
|
||||
super().__init__(app, APP_HOST, None)
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"onboarding", bool, True, "Toggle the mitmproxy onboarding app."
|
||||
)
|
||||
loader.add_option(
|
||||
"onboarding_host",
|
||||
str,
|
||||
APP_HOST,
|
||||
"""
|
||||
Onboarding app domain. For transparent mode, use an IP when a DNS
|
||||
entry for the app domain is not present.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
self.host = ctx.options.onboarding_host
|
||||
app.config["CONFDIR"] = ctx.options.confdir
|
||||
|
||||
async def request(self, f):
|
||||
if ctx.options.onboarding:
|
||||
await super().request(f)
|
||||
@@ -0,0 +1,63 @@
|
||||
import os
|
||||
|
||||
from flask import Flask
|
||||
from flask import render_template
|
||||
|
||||
from mitmproxy.options import CONF_BASENAME
|
||||
from mitmproxy.options import CONF_DIR
|
||||
from mitmproxy.utils.magisk import write_magisk_module
|
||||
|
||||
app = Flask(__name__)
|
||||
# will be overridden in the addon, setting this here so that the Flask app can be run standalone.
|
||||
app.config["CONFDIR"] = CONF_DIR
|
||||
|
||||
|
||||
@app.route("/")
|
||||
def index():
|
||||
return render_template("index.html")
|
||||
|
||||
|
||||
@app.route("/cert/pem")
|
||||
def pem():
|
||||
return read_cert("pem", "application/x-x509-ca-cert")
|
||||
|
||||
|
||||
@app.route("/cert/p12")
|
||||
def p12():
|
||||
return read_cert("p12", "application/x-pkcs12")
|
||||
|
||||
|
||||
@app.route("/cert/cer")
|
||||
def cer():
|
||||
return read_cert("cer", "application/x-x509-ca-cert")
|
||||
|
||||
|
||||
@app.route("/cert/magisk")
|
||||
def magisk():
|
||||
filename = CONF_BASENAME + f"-magisk-module.zip"
|
||||
p = os.path.join(app.config["CONFDIR"], filename)
|
||||
p = os.path.expanduser(p)
|
||||
|
||||
if not os.path.exists(p):
|
||||
write_magisk_module(p)
|
||||
|
||||
with open(p, "rb") as f:
|
||||
cert = f.read()
|
||||
|
||||
return cert, {
|
||||
"Content-Type": "application/zip",
|
||||
"Content-Disposition": f"attachment; {filename=!s}",
|
||||
}
|
||||
|
||||
|
||||
def read_cert(ext, content_type):
|
||||
filename = CONF_BASENAME + f"-ca-cert.{ext}"
|
||||
p = os.path.join(app.config["CONFDIR"], filename)
|
||||
p = os.path.expanduser(p)
|
||||
with open(p, "rb") as f:
|
||||
cert = f.read()
|
||||
|
||||
return cert, {
|
||||
"Content-Type": content_type,
|
||||
"Content-Disposition": f"attachment; {filename=!s}",
|
||||
}
|
||||
6
venv/Lib/site-packages/mitmproxy/addons/onboardingapp/static/bootstrap.min.css
vendored
Normal file
|
After Width: | Height: | Size: 5.3 KiB |
|
After Width: | Height: | Size: 121 KiB |
@@ -0,0 +1,44 @@
|
||||
.media {
|
||||
min-height: 110px;
|
||||
}
|
||||
.media svg {
|
||||
width: 64px;
|
||||
margin-right: 1rem !important;
|
||||
}
|
||||
|
||||
.instructions {
|
||||
padding-top: 1rem;
|
||||
padding-bottom: 1rem;
|
||||
}
|
||||
|
||||
/* CSS-only collapsible */
|
||||
.show-instructions:target, .hide-instructions, .instructions {
|
||||
display: none;
|
||||
}
|
||||
.show-instructions:target ~ .hide-instructions {
|
||||
display: inline-block;
|
||||
}
|
||||
.show-instructions:target ~ .instructions {
|
||||
display: inherit;
|
||||
}
|
||||
|
||||
.fa-apple {
|
||||
color: #666;
|
||||
}
|
||||
|
||||
.fa-windows {
|
||||
color: #0078D7;
|
||||
}
|
||||
|
||||
.fa-firefox-browser {
|
||||
color: #E25821;
|
||||
}
|
||||
|
||||
.fa-android {
|
||||
margin-top: 10px;
|
||||
color: #3DDC84;
|
||||
}
|
||||
|
||||
.fa-certificate {
|
||||
color: #FFBB00;
|
||||
}
|
||||
@@ -0,0 +1 @@
|
||||
<svg aria-hidden="true" focusable="false" data-prefix="fab" data-icon="android" class="svg-inline--fa fa-android fa-w-18" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 576 512"><path fill="currentColor" d="M420.55,301.93a24,24,0,1,1,24-24,24,24,0,0,1-24,24m-265.1,0a24,24,0,1,1,24-24,24,24,0,0,1-24,24m273.7-144.48,47.94-83a10,10,0,1,0-17.27-10h0l-48.54,84.07a301.25,301.25,0,0,0-246.56,0L116.18,64.45a10,10,0,1,0-17.27,10h0l47.94,83C64.53,202.22,8.24,285.55,0,384H576c-8.24-98.45-64.54-181.78-146.85-226.55"></path></svg>
|
||||
|
After Width: | Height: | Size: 535 B |
@@ -0,0 +1 @@
|
||||
<svg aria-hidden="true" focusable="false" data-prefix="fab" data-icon="apple" class="svg-inline--fa fa-apple fa-w-12" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 384 512"><path fill="currentColor" d="M318.7 268.7c-.2-36.7 16.4-64.4 50-84.8-18.8-26.9-47.2-41.7-84.7-44.6-35.5-2.8-74.3 20.7-88.5 20.7-15 0-49.4-19.7-76.4-19.7C63.3 141.2 4 184.8 4 273.5q0 39.3 14.4 81.2c12.8 36.7 59 126.7 107.2 125.2 25.2-.6 43-17.9 75.8-17.9 31.8 0 48.3 17.9 76.4 17.9 48.6-.7 90.4-82.5 102.6-119.3-65.2-30.7-61.7-90-61.7-91.9zm-56.6-164.2c27.3-32.4 24.8-61.9 24-72.5-24.1 1.4-52 16.4-67.9 34.9-17.5 19.8-27.8 44.3-25.6 71.9 26.1 2 49.9-11.4 69.5-34.3z"></path></svg>
|
||||
|
After Width: | Height: | Size: 665 B |
@@ -0,0 +1 @@
|
||||
<svg aria-hidden="true" focusable="false" data-prefix="fas" data-icon="certificate" class="svg-inline--fa fa-certificate fa-w-16" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M458.622 255.92l45.985-45.005c13.708-12.977 7.316-36.039-10.664-40.339l-62.65-15.99 17.661-62.015c4.991-17.838-11.829-34.663-29.661-29.671l-61.994 17.667-15.984-62.671C337.085.197 313.765-6.276 300.99 7.228L256 53.57 211.011 7.229c-12.63-13.351-36.047-7.234-40.325 10.668l-15.984 62.671-61.995-17.667C74.87 57.907 58.056 74.738 63.046 92.572l17.661 62.015-62.65 15.99C.069 174.878-6.31 197.944 7.392 210.915l45.985 45.005-45.985 45.004c-13.708 12.977-7.316 36.039 10.664 40.339l62.65 15.99-17.661 62.015c-4.991 17.838 11.829 34.663 29.661 29.671l61.994-17.667 15.984 62.671c4.439 18.575 27.696 24.018 40.325 10.668L256 458.61l44.989 46.001c12.5 13.488 35.987 7.486 40.325-10.668l15.984-62.671 61.994 17.667c17.836 4.994 34.651-11.837 29.661-29.671l-17.661-62.015 62.65-15.99c17.987-4.302 24.366-27.367 10.664-40.339l-45.984-45.004z"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.0 KiB |
@@ -0,0 +1 @@
|
||||
<svg aria-hidden="true" focusable="false" data-prefix="fab" data-icon="firefox-browser" class="svg-inline--fa fa-firefox-browser fa-w-16" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M189.37,152.86Zm-58.74-29.37C130.79,123.5,130.71,123.5,130.63,123.49Zm351.42,45.35c-10.61-25.5-32.08-53-48.94-61.73,13.72,26.89,21.67,53.88,24.7,74,0,0,0,.14.05.41-27.58-68.75-74.35-96.47-112.55-156.83-1.93-3.05-3.86-6.11-5.74-9.33-1-1.65-1.86-3.34-2.69-5.05A44.88,44.88,0,0,1,333.24.69a.63.63,0,0,0-.55-.66.9.9,0,0,0-.46,0l-.12.07-.18.1.1-.14c-54.23,31.77-76.72,87.38-82.5,122.78a130,130,0,0,0-48.33,12.33,6.25,6.25,0,0,0-3.09,7.75,6.13,6.13,0,0,0,7.79,3.79l.52-.21a117.84,117.84,0,0,1,42.11-11l1.42-.1c2-.12,4-.2,6-.22A122.61,122.61,0,0,1,291,140c.67.2,1.32.42,2,.63,1.89.57,3.76,1.2,5.62,1.87,1.36.5,2.71,1,4.05,1.58,1.09.44,2.18.88,3.25,1.35q2.52,1.13,5,2.35c.75.37,1.5.74,2.25,1.13q2.4,1.26,4.74,2.63,1.51.87,3,1.8a124.89,124.89,0,0,1,42.66,44.13c-13-9.15-36.35-18.19-58.82-14.28,87.74,43.86,64.18,194.9-57.39,189.2a108.43,108.43,0,0,1-31.74-6.12c-2.42-.91-4.8-1.89-7.16-2.93-1.38-.63-2.76-1.27-4.12-2C174.5,346,149.9,316.92,146.83,281.59c0,0,11.25-41.95,80.62-41.95,7.5,0,28.93-20.92,29.33-27-.09-2-42.54-18.87-59.09-35.18-8.85-8.71-13.05-12.91-16.77-16.06a69.58,69.58,0,0,0-6.31-4.77A113.05,113.05,0,0,1,173.92,97c-25.06,11.41-44.55,29.45-58.71,45.37h-.12c-9.67-12.25-9-52.65-8.43-61.08-.12-.53-7.22,3.68-8.15,4.31a178.54,178.54,0,0,0-23.84,20.43A214,214,0,0,0,51.9,133.36l0,0a.08.08,0,0,1,0,0,205.84,205.84,0,0,0-32.73,73.9c-.06.27-2.33,10.21-4,22.48q-.42,2.87-.78,5.74c-.57,3.69-1,7.71-1.44,14,0,.24,0,.48-.05.72-.18,2.71-.34,5.41-.49,8.12,0,.41,0,.82,0,1.24,0,134.7,109.21,243.89,243.92,243.89,120.64,0,220.82-87.58,240.43-202.62.41-3.12.74-6.26,1.11-9.41,4.85-41.83-.54-85.79-15.82-122.55Z"></path></svg>
|
||||
|
After Width: | Height: | Size: 1.8 KiB |
@@ -0,0 +1 @@
|
||||
<svg aria-hidden="true" focusable="false" data-prefix="fab" data-icon="linux" class="svg-inline--fa fa-linux fa-w-14" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><path fill="currentColor" d="M220.8 123.3c1 .5 1.8 1.7 3 1.7 1.1 0 2.8-.4 2.9-1.5.2-1.4-1.9-2.3-3.2-2.9-1.7-.7-3.9-1-5.5-.1-.4.2-.8.7-.6 1.1.3 1.3 2.3 1.1 3.4 1.7zm-21.9 1.7c1.2 0 2-1.2 3-1.7 1.1-.6 3.1-.4 3.5-1.6.2-.4-.2-.9-.6-1.1-1.6-.9-3.8-.6-5.5.1-1.3.6-3.4 1.5-3.2 2.9.1 1 1.8 1.5 2.8 1.4zM420 403.8c-3.6-4-5.3-11.6-7.2-19.7-1.8-8.1-3.9-16.8-10.5-22.4-1.3-1.1-2.6-2.1-4-2.9-1.3-.8-2.7-1.5-4.1-2 9.2-27.3 5.6-54.5-3.7-79.1-11.4-30.1-31.3-56.4-46.5-74.4-17.1-21.5-33.7-41.9-33.4-72C311.1 85.4 315.7.1 234.8 0 132.4-.2 158 103.4 156.9 135.2c-1.7 23.4-6.4 41.8-22.5 64.7-18.9 22.5-45.5 58.8-58.1 96.7-6 17.9-8.8 36.1-6.2 53.3-6.5 5.8-11.4 14.7-16.6 20.2-4.2 4.3-10.3 5.9-17 8.3s-14 6-18.5 14.5c-2.1 3.9-2.8 8.1-2.8 12.4 0 3.9.6 7.9 1.2 11.8 1.2 8.1 2.5 15.7.8 20.8-5.2 14.4-5.9 24.4-2.2 31.7 3.8 7.3 11.4 10.5 20.1 12.3 17.3 3.6 40.8 2.7 59.3 12.5 19.8 10.4 39.9 14.1 55.9 10.4 11.6-2.6 21.1-9.6 25.9-20.2 12.5-.1 26.3-5.4 48.3-6.6 14.9-1.2 33.6 5.3 55.1 4.1.6 2.3 1.4 4.6 2.5 6.7v.1c8.3 16.7 23.8 24.3 40.3 23 16.6-1.3 34.1-11 48.3-27.9 13.6-16.4 36-23.2 50.9-32.2 7.4-4.5 13.4-10.1 13.9-18.3.4-8.2-4.4-17.3-15.5-29.7zM223.7 87.3c9.8-22.2 34.2-21.8 44-.4 6.5 14.2 3.6 30.9-4.3 40.4-1.6-.8-5.9-2.6-12.6-4.9 1.1-1.2 3.1-2.7 3.9-4.6 4.8-11.8-.2-27-9.1-27.3-7.3-.5-13.9 10.8-11.8 23-4.1-2-9.4-3.5-13-4.4-1-6.9-.3-14.6 2.9-21.8zM183 75.8c10.1 0 20.8 14.2 19.1 33.5-3.5 1-7.1 2.5-10.2 4.6 1.2-8.9-3.3-20.1-9.6-19.6-8.4.7-9.8 21.2-1.8 28.1 1 .8 1.9-.2-5.9 5.5-15.6-14.6-10.5-52.1 8.4-52.1zm-13.6 60.7c6.2-4.6 13.6-10 14.1-10.5 4.7-4.4 13.5-14.2 27.9-14.2 7.1 0 15.6 2.3 25.9 8.9 6.3 4.1 11.3 4.4 22.6 9.3 8.4 3.5 13.7 9.7 10.5 18.2-2.6 7.1-11 14.4-22.7 18.1-11.1 3.6-19.8 16-38.2 14.9-3.9-.2-7-1-9.6-2.1-8-3.5-12.2-10.4-20-15-8.6-4.8-13.2-10.4-14.7-15.3-1.4-4.9 0-9 4.2-12.3zm3.3 334c-2.7 35.1-43.9 34.4-75.3 18-29.9-15.8-68.6-6.5-76.5-21.9-2.4-4.7-2.4-12.7 2.6-26.4v-.2c2.4-7.6.6-16-.6-23.9-1.2-7.8-1.8-15 .9-20 3.5-6.7 8.5-9.1 14.8-11.3 10.3-3.7 11.8-3.4 19.6-9.9 5.5-5.7 9.5-12.9 14.3-18 5.1-5.5 10-8.1 17.7-6.9 8.1 1.2 15.1 6.8 21.9 16l19.6 35.6c9.5 19.9 43.1 48.4 41 68.9zm-1.4-25.9c-4.1-6.6-9.6-13.6-14.4-19.6 7.1 0 14.2-2.2 16.7-8.9 2.3-6.2 0-14.9-7.4-24.9-13.5-18.2-38.3-32.5-38.3-32.5-13.5-8.4-21.1-18.7-24.6-29.9s-3-23.3-.3-35.2c5.2-22.9 18.6-45.2 27.2-59.2 2.3-1.7.8 3.2-8.7 20.8-8.5 16.1-24.4 53.3-2.6 82.4.6-20.7 5.5-41.8 13.8-61.5 12-27.4 37.3-74.9 39.3-112.7 1.1.8 4.6 3.2 6.2 4.1 4.6 2.7 8.1 6.7 12.6 10.3 12.4 10 28.5 9.2 42.4 1.2 6.2-3.5 11.2-7.5 15.9-9 9.9-3.1 17.8-8.6 22.3-15 7.7 30.4 25.7 74.3 37.2 95.7 6.1 11.4 18.3 35.5 23.6 64.6 3.3-.1 7 .4 10.9 1.4 13.8-35.7-11.7-74.2-23.3-84.9-4.7-4.6-4.9-6.6-2.6-6.5 12.6 11.2 29.2 33.7 35.2 59 2.8 11.6 3.3 23.7.4 35.7 16.4 6.8 35.9 17.9 30.7 34.8-2.2-.1-3.2 0-4.2 0 3.2-10.1-3.9-17.6-22.8-26.1-19.6-8.6-36-8.6-38.3 12.5-12.1 4.2-18.3 14.7-21.4 27.3-2.8 11.2-3.6 24.7-4.4 39.9-.5 7.7-3.6 18-6.8 29-32.1 22.9-76.7 32.9-114.3 7.2zm257.4-11.5c-.9 16.8-41.2 19.9-63.2 46.5-13.2 15.7-29.4 24.4-43.6 25.5s-26.5-4.8-33.7-19.3c-4.7-11.1-2.4-23.1 1.1-36.3 3.7-14.2 9.2-28.8 9.9-40.6.8-15.2 1.7-28.5 4.2-38.7 2.6-10.3 6.6-17.2 13.7-21.1.3-.2.7-.3 1-.5.8 13.2 7.3 26.6 18.8 29.5 12.6 3.3 30.7-7.5 38.4-16.3 9-.3 15.7-.9 22.6 5.1 9.9 8.5 7.1 30.3 17.1 41.6 10.6 11.6 14 19.5 13.7 24.6zM173.3 148.7c2 1.9 4.7 4.5 8 7.1 6.6 5.2 15.8 10.6 27.3 10.6 11.6 0 22.5-5.9 31.8-10.8 4.9-2.6 10.9-7 14.8-10.4s5.9-6.3 3.1-6.6-2.6 2.6-6 5.1c-4.4 3.2-9.7 7.4-13.9 9.8-7.4 4.2-19.5 10.2-29.9 10.2s-18.7-4.8-24.9-9.7c-3.1-2.5-5.7-5-7.7-6.9-1.5-1.4-1.9-4.6-4.3-4.9-1.4-.1-1.8 3.7 1.7 6.5z"></path></svg>
|
||||
|
After Width: | Height: | Size: 3.6 KiB |
@@ -0,0 +1 @@
|
||||
<svg aria-hidden="true" focusable="false" data-prefix="fab" data-icon="windows" class="svg-inline--fa fa-windows fa-w-14" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><path fill="currentColor" d="M0 93.7l183.6-25.3v177.4H0V93.7zm0 324.6l183.6 25.3V268.4H0v149.9zm203.8 28L448 480V268.4H203.8v177.9zm0-380.6v180.1H448V32L203.8 65.7z"></path></svg>
|
||||
|
After Width: | Height: | Size: 369 B |
@@ -0,0 +1,143 @@
|
||||
{% extends "layout.html" %}
|
||||
{% block content %}
|
||||
<div class="row justify-content-md-center">
|
||||
<div class="col-md-9">
|
||||
<!--suppress HtmlUnknownTag -->
|
||||
<h3 class="my-4">Install mitmproxy's Certificate Authority</h3>
|
||||
|
||||
{% macro entry(title, icon, filetype="pem") -%}
|
||||
<li class="media">
|
||||
{% include 'icons/' + icon + '-brands.svg' %}
|
||||
<div class="media-body">
|
||||
<h5 class="mt-0">{{ title | safe }}</h5>
|
||||
<a class="btn btn-sm btn-success" href="/cert/{{ filetype }}" target="_blank">🔏 Get mitmproxy-ca-cert.{{
|
||||
filetype }}</a>
|
||||
<a class="btn btn-sm btn-info show-instructions" href="#{{ title.split(' ')[0] }}" id="{{ title.split(' ')[0] }}">📖
|
||||
Show Instructions</a>
|
||||
<a class="btn btn-sm btn-info hide-instructions" href="#/">📖 Hide Instructions</a>
|
||||
<div class="instructions">{{ caller() }}</div>
|
||||
</div>
|
||||
</li>
|
||||
{%- endmacro %}
|
||||
|
||||
<ul class="list-unstyled">
|
||||
{% call entry('Windows', 'windows', 'p12') %}
|
||||
<h5>Manual Installation</h5>
|
||||
<ol>
|
||||
<li>Double-click the P12 file to start the import wizard.</li>
|
||||
<li>Select a certificate store location. This determines who will trust the certificate – only the current
|
||||
Windows user or everyone on the machine. Click <samp>Next</samp>.
|
||||
</li>
|
||||
<li>Click <samp>Next</samp> again.</li>
|
||||
<li>Leave <samp>Password</samp> blank and click <samp>Next</samp>.</li>
|
||||
<li><span class="text-danger">Select <samp>Place all certificates in the following store</samp></span>,
|
||||
then click <samp>Browse</samp>, and select <samp>Trusted Root Certification Authorities</samp>.<br>
|
||||
Click <samp>OK</samp> and <samp>Next</samp>.
|
||||
</li>
|
||||
<li>Click <samp>Finish</samp>.</li>
|
||||
<li>Click <samp>Yes</samp> to confirm the warning dialog.</li>
|
||||
</ol>
|
||||
<h5>Automated Installation</h5>
|
||||
<ol>
|
||||
<li>Run <code>certutil.exe -addstore root mitmproxy-ca-cert.cer</code>
|
||||
(<a href="https://technet.microsoft.com/en-us/library/cc732443.aspx">details</a>).
|
||||
</li>
|
||||
</ol>
|
||||
{% endcall %}
|
||||
{% call entry('Linux', 'linux') %}
|
||||
<h5>Ubuntu/Debian</h5>
|
||||
<ol>
|
||||
<li><code>mv mitmproxy-ca-cert.pem /usr/local/share/ca-certificates/mitmproxy.crt</code></li>
|
||||
<li><code>sudo update-ca-certificates</code></li>
|
||||
</ol>
|
||||
<h5>Fedora</h5>
|
||||
<ol>
|
||||
<li><code>mv mitmproxy-ca-cert.pem /etc/pki/ca-trust/source/anchors/</code></li>
|
||||
<li><code>sudo update-ca-trust</code></li>
|
||||
</ol>
|
||||
<h5>Arch Linux</h5>
|
||||
<ol>
|
||||
<code>sudo trust anchor --store mitmproxy-ca-cert.pem</code>
|
||||
</ol>
|
||||
{% endcall %}
|
||||
{% call entry('macOS', 'apple') %}
|
||||
<h5>Manual Installation</h5>
|
||||
<ol>
|
||||
<li>Double-click the PEM file to open the <samp>Keychain Access</samp> application.</li>
|
||||
<li>Locate the new certificate "mitmproxy" in the list and double-click it.</li>
|
||||
<li>Change <samp>Secure Socket Layer (SSL)</samp> to <samp>Always Trust</samp>.</li>
|
||||
<li>Close the dialog window and enter your password if prompted.</li>
|
||||
</ol>
|
||||
<h5>Automated Installation</h5>
|
||||
<ol>
|
||||
<li><code>sudo security add-trusted-cert -d -p ssl -p basic -k /Library/Keychains/System.keychain mitmproxy-ca-cert.pem</code></li>
|
||||
</ol>
|
||||
{% endcall %}
|
||||
{% call entry('iOS <small>– please read the instructions!</small>', 'apple') %}
|
||||
<h5>iOS 13+</h5>
|
||||
<ol>
|
||||
<li>Use Safari to download the certificate. Other browsers may not open the proper installation prompt.</li>
|
||||
<li>Install the new Profile (<samp>Settings -> General -> VPN & Device Management</samp>).</li>
|
||||
<li><span class="text-danger"><strong>Important:</strong> Go to <samp>Settings -> General -> About -> Certificate Trust Settings</samp>.
|
||||
Toggle <samp>mitmproxy</samp> to <samp>ON</samp>.</span></li>
|
||||
</ol>
|
||||
{% endcall %}
|
||||
{% call entry('Android', 'android', 'cer') %}
|
||||
<h5>Android 10+</h5>
|
||||
<ol class="mb-2">
|
||||
<li>Open the downloaded CER file.</li>
|
||||
<li>Enter <samp>mitmproxy</samp> (or anything else) as the certificate name.</li>
|
||||
<li>For credential use, select <samp>VPN and apps</samp>.</li>
|
||||
<li>Click OK.</li>
|
||||
</ol>
|
||||
|
||||
<p>Some Android distributions require you to install the certificate via <samp>Settings -> Security -> Advanced ->
|
||||
Encryption and credentials -> Install a certificate -> CA certificate</samp> (or similar) instead.</p>
|
||||
|
||||
<div class="alert alert-warning" role="alert">
|
||||
<p><strong>Warning: </strong>Apps that target Android API Level 24 (introduced in 2016) and above only accept
|
||||
certificates from the system trust store
|
||||
(<a href="https://github.com/mitmproxy/mitmproxy/issues/2054">#2054</a>).
|
||||
User-added CAs are not accepted unless the application manually opts in. Except for browsers, you need to
|
||||
patch most apps manually
|
||||
(<a href="https://developer.android.com/training/articles/security-config">Android network security config</a>).
|
||||
</p>
|
||||
<p>
|
||||
Alternatively, if you have rooted the device and have Magisk installed, you can install <a href="/cert/magisk">this Magisk module</a> via the Magisk Manager app.
|
||||
</p>
|
||||
</div>
|
||||
{% endcall %}
|
||||
{% call entry('Firefox <small>(does not use the OS root certificates)</small>', 'firefox-browser') %}
|
||||
<h5>Firefox</h5>
|
||||
<ol>
|
||||
<li>Open <samp>Options -> Privacy & Security</samp> and click <samp>View Certificates...</samp>
|
||||
at the bottom of the page.</li>
|
||||
<li>Click <samp>Import...</samp> and select the downloaded certificate.</li>
|
||||
<li>Enable <samp>Trust this CA to identify websites</samp> and click <samp>OK</samp>.</li>
|
||||
</ol>
|
||||
{% endcall %}
|
||||
<li class="media">
|
||||
{% include 'icons/certificate-solid.svg' %}
|
||||
<div class="media-body">
|
||||
<h5 class="mt-0">Other Platforms</h5>
|
||||
<a class="btn btn-sm btn-success" href="/cert/pem" target="_blank">🔏 Get mitmproxy-ca-cert.pem</a>
|
||||
<a class="btn btn-sm btn-success" href="/cert/p12" target="_blank">🔏 Get mitmproxy-ca-cert.p12</a>
|
||||
</div>
|
||||
</li>
|
||||
</ul>
|
||||
</div>
|
||||
|
||||
</div>
|
||||
<hr/>
|
||||
|
||||
<p>
|
||||
Other mitmproxy users cannot intercept your connection.
|
||||
<span class="text-muted">
|
||||
This page is served by your local mitmproxy instance. The certificate you are about to install has been uniquely
|
||||
generated on mitmproxy's first run and is not shared
|
||||
between mitmproxy installations.
|
||||
</span>
|
||||
</p>
|
||||
|
||||
|
||||
{% endblock %}
|
||||
@@ -0,0 +1,29 @@
|
||||
<!doctype html>
|
||||
<html lang="en">
|
||||
<head>
|
||||
<meta charset="utf-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
|
||||
|
||||
<title>mitmproxy</title>
|
||||
<link href="static/bootstrap.min.css" rel="stylesheet">
|
||||
<link href="static/mitmproxy.css" rel="stylesheet">
|
||||
<link rel="icon" href="static/images/favicon.ico" type="image/x-icon"/>
|
||||
</head>
|
||||
|
||||
<body>
|
||||
<nav class="navbar navbar-dark bg-dark mb-3">
|
||||
<div class="container">
|
||||
<div class="navbar-header">
|
||||
<div class="navbar-brand">
|
||||
<img src="static/images/mitmproxy-long.png" height="30" alt="mitmproxy logo" class="align-top"/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
</nav>
|
||||
<div class="container">
|
||||
{% block content %}
|
||||
{% endblock %}
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
||||
294
venv/Lib/site-packages/mitmproxy/addons/proxyauth.py
Normal file
@@ -0,0 +1,294 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import binascii
|
||||
import pathlib
|
||||
import weakref
|
||||
from abc import ABC
|
||||
from abc import abstractmethod
|
||||
from collections.abc import MutableMapping
|
||||
from typing import Optional
|
||||
|
||||
import ldap3
|
||||
|
||||
from mitmproxy import connection
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import http
|
||||
from mitmproxy.net.http import status_codes
|
||||
from mitmproxy.proxy import mode_specs
|
||||
from mitmproxy.proxy.layers import modes
|
||||
from mitmproxy.utils import htpasswd
|
||||
|
||||
REALM = "mitmproxy"
|
||||
|
||||
|
||||
class ProxyAuth:
|
||||
validator: Validator | None = None
|
||||
|
||||
def __init__(self) -> None:
|
||||
self.authenticated: MutableMapping[connection.Client, tuple[str, str]] = (
|
||||
weakref.WeakKeyDictionary()
|
||||
)
|
||||
"""Contains all connections that are permanently authenticated after an HTTP CONNECT"""
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"proxyauth",
|
||||
Optional[str],
|
||||
None,
|
||||
"""
|
||||
Require proxy authentication. Format:
|
||||
"username:pass",
|
||||
"any" to accept any user/pass combination,
|
||||
"@path" to use an Apache htpasswd file,
|
||||
or "ldap[s]:url_server_ldap[:port]:dn_auth:password:dn_subtree[?search_filter_key=...]" for LDAP authentication.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "proxyauth" in updated:
|
||||
auth = ctx.options.proxyauth
|
||||
if auth:
|
||||
if auth == "any":
|
||||
self.validator = AcceptAll()
|
||||
elif auth.startswith("@"):
|
||||
self.validator = Htpasswd(auth)
|
||||
elif ctx.options.proxyauth.startswith("ldap"):
|
||||
self.validator = Ldap(auth)
|
||||
elif ":" in ctx.options.proxyauth:
|
||||
self.validator = SingleUser(auth)
|
||||
else:
|
||||
raise exceptions.OptionsError("Invalid proxyauth specification.")
|
||||
else:
|
||||
self.validator = None
|
||||
|
||||
def socks5_auth(self, data: modes.Socks5AuthData) -> None:
|
||||
if self.validator and self.validator(data.username, data.password):
|
||||
data.valid = True
|
||||
self.authenticated[data.client_conn] = data.username, data.password
|
||||
|
||||
def http_connect(self, f: http.HTTPFlow) -> None:
|
||||
if self.validator and self.authenticate_http(f):
|
||||
# Make a note that all further requests over this connection are ok.
|
||||
self.authenticated[f.client_conn] = f.metadata["proxyauth"]
|
||||
|
||||
def requestheaders(self, f: http.HTTPFlow) -> None:
|
||||
if self.validator:
|
||||
# Is this connection authenticated by a previous HTTP CONNECT?
|
||||
if f.client_conn in self.authenticated:
|
||||
f.metadata["proxyauth"] = self.authenticated[f.client_conn]
|
||||
elif f.is_replay:
|
||||
pass
|
||||
else:
|
||||
self.authenticate_http(f)
|
||||
|
||||
def authenticate_http(self, f: http.HTTPFlow) -> bool:
|
||||
"""
|
||||
Authenticate an HTTP request, returns if authentication was successful.
|
||||
|
||||
If valid credentials are found, the matching authentication header is removed.
|
||||
In no or invalid credentials are found, flow.response is set to an error page.
|
||||
"""
|
||||
assert self.validator
|
||||
username = None
|
||||
password = None
|
||||
is_valid = False
|
||||
|
||||
is_proxy = is_http_proxy(f)
|
||||
auth_header = http_auth_header(is_proxy)
|
||||
try:
|
||||
auth_value = f.request.headers.get(auth_header, "")
|
||||
scheme, username, password = parse_http_basic_auth(auth_value)
|
||||
is_valid = self.validator(username, password)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
if is_valid:
|
||||
f.metadata["proxyauth"] = (username, password)
|
||||
del f.request.headers[auth_header]
|
||||
return True
|
||||
else:
|
||||
f.response = make_auth_required_response(is_proxy)
|
||||
return False
|
||||
|
||||
|
||||
def make_auth_required_response(is_proxy: bool) -> http.Response:
|
||||
if is_proxy:
|
||||
status_code = status_codes.PROXY_AUTH_REQUIRED
|
||||
headers = {"Proxy-Authenticate": f'Basic realm="{REALM}"'}
|
||||
else:
|
||||
status_code = status_codes.UNAUTHORIZED
|
||||
headers = {"WWW-Authenticate": f'Basic realm="{REALM}"'}
|
||||
|
||||
reason = http.status_codes.RESPONSES[status_code]
|
||||
return http.Response.make(
|
||||
status_code,
|
||||
(
|
||||
f"<html>"
|
||||
f"<head><title>{status_code} {reason}</title></head>"
|
||||
f"<body><h1>{status_code} {reason}</h1></body>"
|
||||
f"</html>"
|
||||
),
|
||||
headers,
|
||||
)
|
||||
|
||||
|
||||
def http_auth_header(is_proxy: bool) -> str:
|
||||
if is_proxy:
|
||||
return "Proxy-Authorization"
|
||||
else:
|
||||
return "Authorization"
|
||||
|
||||
|
||||
def is_http_proxy(f: http.HTTPFlow) -> bool:
|
||||
"""
|
||||
Returns:
|
||||
- True, if authentication is done as if mitmproxy is a proxy
|
||||
- False, if authentication is done as if mitmproxy is an HTTP server
|
||||
"""
|
||||
return isinstance(
|
||||
f.client_conn.proxy_mode, (mode_specs.RegularMode, mode_specs.UpstreamMode)
|
||||
)
|
||||
|
||||
|
||||
def mkauth(username: str, password: str, scheme: str = "basic") -> str:
|
||||
"""
|
||||
Craft a basic auth string
|
||||
"""
|
||||
v = binascii.b2a_base64((username + ":" + password).encode("utf8")).decode("ascii")
|
||||
return scheme + " " + v
|
||||
|
||||
|
||||
def parse_http_basic_auth(s: str) -> tuple[str, str, str]:
|
||||
"""
|
||||
Parse a basic auth header.
|
||||
Raises a ValueError if the input is invalid.
|
||||
"""
|
||||
scheme, authinfo = s.split()
|
||||
if scheme.lower() != "basic":
|
||||
raise ValueError("Unknown scheme")
|
||||
try:
|
||||
user, password = (
|
||||
binascii.a2b_base64(authinfo.encode()).decode("utf8", "replace").split(":")
|
||||
)
|
||||
except binascii.Error as e:
|
||||
raise ValueError(str(e))
|
||||
return scheme, user, password
|
||||
|
||||
|
||||
class Validator(ABC):
|
||||
"""Base class for all username/password validators."""
|
||||
|
||||
@abstractmethod
|
||||
def __call__(self, username: str, password: str) -> bool:
|
||||
raise NotImplementedError
|
||||
|
||||
|
||||
class AcceptAll(Validator):
|
||||
def __call__(self, username: str, password: str) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
class SingleUser(Validator):
|
||||
def __init__(self, proxyauth: str):
|
||||
try:
|
||||
self.username, self.password = proxyauth.split(":")
|
||||
except ValueError:
|
||||
raise exceptions.OptionsError("Invalid single-user auth specification.")
|
||||
|
||||
def __call__(self, username: str, password: str) -> bool:
|
||||
return self.username == username and self.password == password
|
||||
|
||||
|
||||
class Htpasswd(Validator):
|
||||
def __init__(self, proxyauth: str):
|
||||
path = pathlib.Path(proxyauth[1:]).expanduser()
|
||||
try:
|
||||
self.htpasswd = htpasswd.HtpasswdFile.from_file(path)
|
||||
except (ValueError, OSError) as e:
|
||||
raise exceptions.OptionsError(
|
||||
f"Could not open htpasswd file: {path}"
|
||||
) from e
|
||||
|
||||
def __call__(self, username: str, password: str) -> bool:
|
||||
return self.htpasswd.check_password(username, password)
|
||||
|
||||
|
||||
class Ldap(Validator):
|
||||
conn: ldap3.Connection
|
||||
server: ldap3.Server
|
||||
dn_subtree: str
|
||||
filter_key: str
|
||||
|
||||
def __init__(self, proxyauth: str):
|
||||
(
|
||||
use_ssl,
|
||||
url,
|
||||
port,
|
||||
ldap_user,
|
||||
ldap_pass,
|
||||
self.dn_subtree,
|
||||
self.filter_key,
|
||||
) = self.parse_spec(proxyauth)
|
||||
server = ldap3.Server(url, port=port, use_ssl=use_ssl)
|
||||
conn = ldap3.Connection(server, ldap_user, ldap_pass, auto_bind=True)
|
||||
self.conn = conn
|
||||
self.server = server
|
||||
|
||||
@staticmethod
|
||||
def parse_spec(spec: str) -> tuple[bool, str, int | None, str, str, str, str]:
|
||||
try:
|
||||
if spec.count(":") > 4:
|
||||
(
|
||||
security,
|
||||
url,
|
||||
port_str,
|
||||
ldap_user,
|
||||
ldap_pass,
|
||||
dn_subtree,
|
||||
) = spec.split(":")
|
||||
port = int(port_str)
|
||||
else:
|
||||
security, url, ldap_user, ldap_pass, dn_subtree = spec.split(":")
|
||||
port = None
|
||||
|
||||
if "?" in dn_subtree:
|
||||
dn_subtree, search_str = dn_subtree.split("?")
|
||||
key, value = search_str.split("=")
|
||||
if key == "search_filter_key":
|
||||
search_filter_key = value
|
||||
else:
|
||||
raise ValueError
|
||||
else:
|
||||
search_filter_key = "cn"
|
||||
|
||||
if security == "ldaps":
|
||||
use_ssl = True
|
||||
elif security == "ldap":
|
||||
use_ssl = False
|
||||
else:
|
||||
raise ValueError
|
||||
|
||||
return (
|
||||
use_ssl,
|
||||
url,
|
||||
port,
|
||||
ldap_user,
|
||||
ldap_pass,
|
||||
dn_subtree,
|
||||
search_filter_key,
|
||||
)
|
||||
except ValueError:
|
||||
raise exceptions.OptionsError(f"Invalid LDAP specification: {spec}")
|
||||
|
||||
def __call__(self, username: str, password: str) -> bool:
|
||||
if not username or not password:
|
||||
return False
|
||||
self.conn.search(self.dn_subtree, f"({self.filter_key}={username})")
|
||||
if self.conn.response:
|
||||
c = ldap3.Connection(
|
||||
self.server, self.conn.response[0]["dn"], password, auto_bind=True
|
||||
)
|
||||
if c:
|
||||
return True
|
||||
return False
|
||||
393
venv/Lib/site-packages/mitmproxy/addons/proxyserver.py
Normal file
@@ -0,0 +1,393 @@
|
||||
"""
|
||||
This addon is responsible for starting/stopping the proxy server sockets/instances specified by the mode option.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import collections
|
||||
import ipaddress
|
||||
import logging
|
||||
from collections.abc import Iterable
|
||||
from collections.abc import Iterator
|
||||
from contextlib import contextmanager
|
||||
from typing import Optional
|
||||
|
||||
from wsproto.frame_protocol import Opcode
|
||||
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import http
|
||||
from mitmproxy import platform
|
||||
from mitmproxy import tcp
|
||||
from mitmproxy import udp
|
||||
from mitmproxy import websocket
|
||||
from mitmproxy.connection import Address
|
||||
from mitmproxy.flow import Flow
|
||||
from mitmproxy.proxy import events
|
||||
from mitmproxy.proxy import mode_specs
|
||||
from mitmproxy.proxy import server_hooks
|
||||
from mitmproxy.proxy.layers.tcp import TcpMessageInjected
|
||||
from mitmproxy.proxy.layers.udp import UdpMessageInjected
|
||||
from mitmproxy.proxy.layers.websocket import WebSocketMessageInjected
|
||||
from mitmproxy.proxy.mode_servers import ProxyConnectionHandler
|
||||
from mitmproxy.proxy.mode_servers import ServerInstance
|
||||
from mitmproxy.proxy.mode_servers import ServerManager
|
||||
from mitmproxy.utils import asyncio_utils
|
||||
from mitmproxy.utils import human
|
||||
from mitmproxy.utils import signals
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Servers:
|
||||
def __init__(self, manager: ServerManager):
|
||||
self.changed = signals.AsyncSignal(lambda: None)
|
||||
self._instances: dict[mode_specs.ProxyMode, ServerInstance] = dict()
|
||||
self._lock = asyncio.Lock()
|
||||
self._manager = manager
|
||||
|
||||
@property
|
||||
def is_updating(self) -> bool:
|
||||
return self._lock.locked()
|
||||
|
||||
async def update(self, modes: Iterable[mode_specs.ProxyMode]) -> bool:
|
||||
all_ok = True
|
||||
|
||||
async with self._lock:
|
||||
new_instances: dict[mode_specs.ProxyMode, ServerInstance] = {}
|
||||
|
||||
start_tasks = []
|
||||
if ctx.options.server:
|
||||
# Create missing modes and keep existing ones.
|
||||
for spec in modes:
|
||||
if spec in self._instances:
|
||||
instance = self._instances[spec]
|
||||
else:
|
||||
instance = ServerInstance.make(spec, self._manager)
|
||||
start_tasks.append(instance.start())
|
||||
new_instances[spec] = instance
|
||||
|
||||
# Shutdown modes that have been removed from the list.
|
||||
stop_tasks = [
|
||||
s.stop()
|
||||
for spec, s in self._instances.items()
|
||||
if spec not in new_instances
|
||||
]
|
||||
|
||||
if not start_tasks and not stop_tasks:
|
||||
return (
|
||||
True # nothing to do, so we don't need to trigger `self.changed`.
|
||||
)
|
||||
|
||||
self._instances = new_instances
|
||||
# Notify listeners about the new not-yet-started servers.
|
||||
await self.changed.send()
|
||||
|
||||
# We first need to free ports before starting new servers.
|
||||
for ret in await asyncio.gather(*stop_tasks, return_exceptions=True):
|
||||
if ret:
|
||||
all_ok = False
|
||||
logger.error(str(ret))
|
||||
for ret in await asyncio.gather(*start_tasks, return_exceptions=True):
|
||||
if ret:
|
||||
all_ok = False
|
||||
logger.error(str(ret))
|
||||
|
||||
await self.changed.send()
|
||||
return all_ok
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._instances)
|
||||
|
||||
def __iter__(self) -> Iterator[ServerInstance]:
|
||||
return iter(self._instances.values())
|
||||
|
||||
def __getitem__(self, mode: str | mode_specs.ProxyMode) -> ServerInstance:
|
||||
if isinstance(mode, str):
|
||||
mode = mode_specs.ProxyMode.parse(mode)
|
||||
return self._instances[mode]
|
||||
|
||||
|
||||
class Proxyserver(ServerManager):
|
||||
"""
|
||||
This addon runs the actual proxy server.
|
||||
"""
|
||||
|
||||
connections: dict[tuple | str, ProxyConnectionHandler]
|
||||
servers: Servers
|
||||
|
||||
is_running: bool
|
||||
_connect_addr: Address | None = None
|
||||
|
||||
def __init__(self):
|
||||
self.connections = {}
|
||||
self.servers = Servers(self)
|
||||
self.is_running = False
|
||||
|
||||
def __repr__(self):
|
||||
return f"Proxyserver({len(self.connections)} active conns)"
|
||||
|
||||
@command.command("proxyserver.active_connections")
|
||||
def active_connections(self) -> int:
|
||||
return len(self.connections)
|
||||
|
||||
@contextmanager
|
||||
def register_connection(
|
||||
self, connection_id: tuple | str, handler: ProxyConnectionHandler
|
||||
):
|
||||
self.connections[connection_id] = handler
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
del self.connections[connection_id]
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"store_streamed_bodies",
|
||||
bool,
|
||||
False,
|
||||
"Store HTTP request and response bodies when streamed (see `stream_large_bodies`). "
|
||||
"This increases memory consumption, but makes it possible to inspect streamed bodies.",
|
||||
)
|
||||
loader.add_option(
|
||||
"connection_strategy",
|
||||
str,
|
||||
"eager",
|
||||
"Determine when server connections should be established. When set to lazy, mitmproxy "
|
||||
"tries to defer establishing an upstream connection as long as possible. This makes it possible to "
|
||||
"use server replay while being offline. When set to eager, mitmproxy can detect protocols with "
|
||||
"server-side greetings, as well as accurately mirror TLS ALPN negotiation.",
|
||||
choices=("eager", "lazy"),
|
||||
)
|
||||
loader.add_option(
|
||||
"stream_large_bodies",
|
||||
Optional[str],
|
||||
None,
|
||||
"""
|
||||
Stream data to the client if request or response body exceeds the given
|
||||
threshold. If streamed, the body will not be stored in any way,
|
||||
and such responses cannot be modified. Understands k/m/g
|
||||
suffixes, i.e. 3m for 3 megabytes. To store streamed bodies, see `store_streamed_bodies`.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"body_size_limit",
|
||||
Optional[str],
|
||||
None,
|
||||
"""
|
||||
Byte size limit of HTTP request and response bodies. Understands
|
||||
k/m/g suffixes, i.e. 3m for 3 megabytes.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"keep_host_header",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
Reverse Proxy: Keep the original host header instead of rewriting it
|
||||
to the reverse proxy target.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"proxy_debug",
|
||||
bool,
|
||||
False,
|
||||
"Enable debug logs in the proxy core.",
|
||||
)
|
||||
loader.add_option(
|
||||
"normalize_outbound_headers",
|
||||
bool,
|
||||
True,
|
||||
"""
|
||||
Normalize outgoing HTTP/2 header names, but emit a warning when doing so.
|
||||
HTTP/2 does not allow uppercase header names. This option makes sure that HTTP/2 headers set
|
||||
in custom scripts are lowercased before they are sent.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"validate_inbound_headers",
|
||||
bool,
|
||||
True,
|
||||
"""
|
||||
Make sure that incoming HTTP requests are not malformed.
|
||||
Disabling this option makes mitmproxy vulnerable to HTTP smuggling attacks.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"connect_addr",
|
||||
Optional[str],
|
||||
None,
|
||||
"""Set the local IP address that mitmproxy should use when connecting to upstream servers.""",
|
||||
)
|
||||
|
||||
def running(self):
|
||||
self.is_running = True
|
||||
|
||||
def configure(self, updated) -> None:
|
||||
if "stream_large_bodies" in updated:
|
||||
try:
|
||||
human.parse_size(ctx.options.stream_large_bodies)
|
||||
except ValueError:
|
||||
raise exceptions.OptionsError(
|
||||
f"Invalid stream_large_bodies specification: "
|
||||
f"{ctx.options.stream_large_bodies}"
|
||||
)
|
||||
if "body_size_limit" in updated:
|
||||
try:
|
||||
human.parse_size(ctx.options.body_size_limit)
|
||||
except ValueError:
|
||||
raise exceptions.OptionsError(
|
||||
f"Invalid body_size_limit specification: "
|
||||
f"{ctx.options.body_size_limit}"
|
||||
)
|
||||
if "connect_addr" in updated:
|
||||
try:
|
||||
if ctx.options.connect_addr:
|
||||
self._connect_addr = (
|
||||
str(ipaddress.ip_address(ctx.options.connect_addr)),
|
||||
0,
|
||||
)
|
||||
else:
|
||||
self._connect_addr = None
|
||||
except ValueError:
|
||||
raise exceptions.OptionsError(
|
||||
f"Invalid value for connect_addr: {ctx.options.connect_addr!r}. Specify a valid IP address."
|
||||
)
|
||||
if "mode" in updated or "server" in updated:
|
||||
# Make sure that all modes are syntactically valid...
|
||||
modes: list[mode_specs.ProxyMode] = []
|
||||
for mode in ctx.options.mode:
|
||||
try:
|
||||
modes.append(mode_specs.ProxyMode.parse(mode))
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(
|
||||
f"Invalid proxy mode specification: {mode} ({e})"
|
||||
)
|
||||
|
||||
# ...and don't listen on the same address.
|
||||
listen_addrs = []
|
||||
for m in modes:
|
||||
if m.transport_protocol == "both":
|
||||
protocols = ["tcp", "udp"]
|
||||
else:
|
||||
protocols = [m.transport_protocol]
|
||||
host = m.listen_host(ctx.options.listen_host)
|
||||
port = m.listen_port(ctx.options.listen_port)
|
||||
if port is None:
|
||||
continue
|
||||
for proto in protocols:
|
||||
listen_addrs.append((host, port, proto))
|
||||
if len(set(listen_addrs)) != len(listen_addrs):
|
||||
(host, port, _) = collections.Counter(listen_addrs).most_common(1)[0][0]
|
||||
dup_addr = human.format_address((host or "0.0.0.0", port))
|
||||
raise exceptions.OptionsError(
|
||||
f"Cannot spawn multiple servers on the same address: {dup_addr}"
|
||||
)
|
||||
|
||||
if ctx.options.mode and not ctx.master.addons.get("nextlayer"):
|
||||
logger.warning("Warning: Running proxyserver without nextlayer addon!")
|
||||
if any(isinstance(m, mode_specs.TransparentMode) for m in modes):
|
||||
if platform.original_addr:
|
||||
platform.init_transparent_mode()
|
||||
else:
|
||||
raise exceptions.OptionsError(
|
||||
"Transparent mode not supported on this platform."
|
||||
)
|
||||
|
||||
if self.is_running:
|
||||
asyncio_utils.create_task(
|
||||
self.servers.update(modes),
|
||||
name="update servers",
|
||||
keep_ref=True,
|
||||
)
|
||||
|
||||
async def setup_servers(self) -> bool:
|
||||
"""Setup proxy servers. This may take an indefinite amount of time to complete (e.g. on permission prompts)."""
|
||||
return await self.servers.update(
|
||||
[mode_specs.ProxyMode.parse(m) for m in ctx.options.mode]
|
||||
)
|
||||
|
||||
def listen_addrs(self) -> list[Address]:
|
||||
return [addr for server in self.servers for addr in server.listen_addrs]
|
||||
|
||||
def inject_event(self, event: events.MessageInjected):
|
||||
connection_id: str | tuple
|
||||
if event.flow.client_conn.transport_protocol != "udp":
|
||||
connection_id = event.flow.client_conn.id
|
||||
else: # pragma: no cover
|
||||
# temporary workaround: for UDP we don't have persistent client IDs yet.
|
||||
connection_id = (
|
||||
event.flow.client_conn.peername,
|
||||
event.flow.client_conn.sockname,
|
||||
)
|
||||
if connection_id not in self.connections:
|
||||
raise ValueError("Flow is not from a live connection.")
|
||||
|
||||
asyncio_utils.create_task(
|
||||
self.connections[connection_id].server_event(event),
|
||||
name=f"inject_event",
|
||||
keep_ref=True,
|
||||
client=event.flow.client_conn.peername,
|
||||
)
|
||||
|
||||
@command.command("inject.websocket")
|
||||
def inject_websocket(
|
||||
self, flow: Flow, to_client: bool, message: bytes, is_text: bool = True
|
||||
):
|
||||
if not isinstance(flow, http.HTTPFlow) or not flow.websocket:
|
||||
logger.warning("Cannot inject WebSocket messages into non-WebSocket flows.")
|
||||
|
||||
msg = websocket.WebSocketMessage(
|
||||
Opcode.TEXT if is_text else Opcode.BINARY, not to_client, message
|
||||
)
|
||||
event = WebSocketMessageInjected(flow, msg)
|
||||
try:
|
||||
self.inject_event(event)
|
||||
except ValueError as e:
|
||||
logger.warning(str(e))
|
||||
|
||||
@command.command("inject.tcp")
|
||||
def inject_tcp(self, flow: Flow, to_client: bool, message: bytes):
|
||||
if not isinstance(flow, tcp.TCPFlow):
|
||||
logger.warning("Cannot inject TCP messages into non-TCP flows.")
|
||||
|
||||
event = TcpMessageInjected(flow, tcp.TCPMessage(not to_client, message))
|
||||
try:
|
||||
self.inject_event(event)
|
||||
except ValueError as e:
|
||||
logger.warning(str(e))
|
||||
|
||||
@command.command("inject.udp")
|
||||
def inject_udp(self, flow: Flow, to_client: bool, message: bytes):
|
||||
if not isinstance(flow, udp.UDPFlow):
|
||||
logger.warning("Cannot inject UDP messages into non-UDP flows.")
|
||||
|
||||
event = UdpMessageInjected(flow, udp.UDPMessage(not to_client, message))
|
||||
try:
|
||||
self.inject_event(event)
|
||||
except ValueError as e:
|
||||
logger.warning(str(e))
|
||||
|
||||
def server_connect(self, data: server_hooks.ServerConnectionHookData):
|
||||
if data.server.sockname is None:
|
||||
data.server.sockname = self._connect_addr
|
||||
|
||||
# Prevent mitmproxy from recursively connecting to itself.
|
||||
assert data.server.address
|
||||
connect_host, connect_port, *_ = data.server.address
|
||||
|
||||
for server in self.servers:
|
||||
for listen_host, listen_port, *_ in server.listen_addrs:
|
||||
self_connect = (
|
||||
connect_port == listen_port
|
||||
and connect_host in ("localhost", "127.0.0.1", "::1", listen_host)
|
||||
and server.mode.transport_protocol == data.server.transport_protocol
|
||||
)
|
||||
if self_connect:
|
||||
data.server.error = (
|
||||
"Request destination unknown. "
|
||||
"Unable to figure out where this request should be forwarded to."
|
||||
)
|
||||
return
|
||||
98
venv/Lib/site-packages/mitmproxy/addons/readfile.py
Normal file
@@ -0,0 +1,98 @@
|
||||
import asyncio
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
from typing import BinaryIO
|
||||
from typing import Optional
|
||||
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import io
|
||||
from mitmproxy.utils import asyncio_utils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ReadFile:
|
||||
"""
|
||||
An addon that handles reading from file on startup.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.filter = None
|
||||
self._read_task: asyncio.Task | None = None
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option("rfile", Optional[str], None, "Read flows from file.")
|
||||
loader.add_option(
|
||||
"readfile_filter", Optional[str], None, "Read only matching flows."
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "readfile_filter" in updated:
|
||||
if ctx.options.readfile_filter:
|
||||
try:
|
||||
self.filter = flowfilter.parse(ctx.options.readfile_filter)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
else:
|
||||
self.filter = None
|
||||
|
||||
async def load_flows(self, fo: BinaryIO) -> int:
|
||||
cnt = 0
|
||||
freader = io.FlowReader(fo)
|
||||
try:
|
||||
for flow in freader.stream():
|
||||
if self.filter and not self.filter(flow):
|
||||
continue
|
||||
await ctx.master.load_flow(flow)
|
||||
cnt += 1
|
||||
except (OSError, exceptions.FlowReadException) as e:
|
||||
if cnt:
|
||||
logging.warning("Flow file corrupted - loaded %i flows." % cnt)
|
||||
else:
|
||||
logging.error("Flow file corrupted.")
|
||||
raise exceptions.FlowReadException(str(e)) from e
|
||||
else:
|
||||
return cnt
|
||||
|
||||
async def load_flows_from_path(self, path: str) -> int:
|
||||
path = os.path.expanduser(path)
|
||||
try:
|
||||
with open(path, "rb") as f:
|
||||
return await self.load_flows(f)
|
||||
except OSError as e:
|
||||
logging.error(f"Cannot load flows: {e}")
|
||||
raise exceptions.FlowReadException(str(e)) from e
|
||||
|
||||
async def doread(self, rfile: str) -> None:
|
||||
try:
|
||||
await self.load_flows_from_path(rfile)
|
||||
except exceptions.FlowReadException as e:
|
||||
logger.exception(f"Failed to read {ctx.options.rfile}: {e}")
|
||||
|
||||
def running(self):
|
||||
if ctx.options.rfile:
|
||||
self._read_task = asyncio_utils.create_task(
|
||||
self.doread(ctx.options.rfile),
|
||||
name="readfile",
|
||||
keep_ref=False,
|
||||
)
|
||||
|
||||
@command.command("readfile.reading")
|
||||
def reading(self) -> bool:
|
||||
return bool(self._read_task and not self._read_task.done())
|
||||
|
||||
|
||||
class ReadFileStdin(ReadFile):
|
||||
"""Support the special case of "-" for reading from stdin"""
|
||||
|
||||
async def load_flows_from_path(self, path: str) -> int:
|
||||
if path == "-": # pragma: no cover
|
||||
# Need to think about how to test this. This function is scheduled
|
||||
# onto the event loop, where a sys.stdin mock has no effect.
|
||||
return await self.load_flows(sys.stdin.buffer)
|
||||
else:
|
||||
return await super().load_flows_from_path(path)
|
||||
197
venv/Lib/site-packages/mitmproxy/addons/save.py
Normal file
@@ -0,0 +1,197 @@
|
||||
import logging
|
||||
import os.path
|
||||
import sys
|
||||
from collections.abc import Sequence
|
||||
from datetime import datetime
|
||||
from functools import lru_cache
|
||||
from pathlib import Path
|
||||
from typing import Literal
|
||||
from typing import Optional
|
||||
|
||||
import mitmproxy.types
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import dns
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import http
|
||||
from mitmproxy import io
|
||||
from mitmproxy import tcp
|
||||
from mitmproxy import udp
|
||||
from mitmproxy.log import ALERT
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _path(path: str) -> str:
|
||||
"""Extract the path from a path spec (which may have an extra "+" at the front)"""
|
||||
if path.startswith("+"):
|
||||
path = path[1:]
|
||||
return os.path.expanduser(path)
|
||||
|
||||
|
||||
@lru_cache
|
||||
def _mode(path: str) -> Literal["ab", "wb"]:
|
||||
"""Extract the writing mode (overwrite or append) from a path spec"""
|
||||
if path.startswith("+"):
|
||||
return "ab"
|
||||
else:
|
||||
return "wb"
|
||||
|
||||
|
||||
class Save:
|
||||
def __init__(self) -> None:
|
||||
self.stream: io.FilteredFlowWriter | None = None
|
||||
self.filt: flowfilter.TFilter | None = None
|
||||
self.active_flows: set[flow.Flow] = set()
|
||||
self.current_path: str | None = None
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"save_stream_file",
|
||||
Optional[str],
|
||||
None,
|
||||
"""
|
||||
Stream flows to file as they arrive. Prefix path with + to append.
|
||||
The full path can use python strftime() formating, missing
|
||||
directories are created as needed. A new file is opened every time
|
||||
the formatted string changes.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"save_stream_filter",
|
||||
Optional[str],
|
||||
None,
|
||||
"Filter which flows are written to file.",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "save_stream_filter" in updated:
|
||||
if ctx.options.save_stream_filter:
|
||||
try:
|
||||
self.filt = flowfilter.parse(ctx.options.save_stream_filter)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
else:
|
||||
self.filt = None
|
||||
if "save_stream_file" in updated or "save_stream_filter" in updated:
|
||||
if ctx.options.save_stream_file:
|
||||
try:
|
||||
self.maybe_rotate_to_new_file()
|
||||
except OSError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
assert self.stream
|
||||
self.stream.flt = self.filt
|
||||
else:
|
||||
self.done()
|
||||
|
||||
def maybe_rotate_to_new_file(self) -> None:
|
||||
path = datetime.today().strftime(_path(ctx.options.save_stream_file))
|
||||
if self.current_path == path:
|
||||
return
|
||||
|
||||
if self.stream:
|
||||
self.stream.fo.close()
|
||||
self.stream = None
|
||||
|
||||
new_log_file = Path(path)
|
||||
new_log_file.parent.mkdir(parents=True, exist_ok=True)
|
||||
|
||||
f = new_log_file.open(_mode(ctx.options.save_stream_file))
|
||||
self.stream = io.FilteredFlowWriter(f, self.filt)
|
||||
self.current_path = path
|
||||
|
||||
def save_flow(self, flow: flow.Flow) -> None:
|
||||
"""
|
||||
Write the flow to the stream, but first check if we need to rotate to a new file.
|
||||
"""
|
||||
if not self.stream:
|
||||
return
|
||||
try:
|
||||
self.maybe_rotate_to_new_file()
|
||||
self.stream.add(flow)
|
||||
except OSError as e:
|
||||
# If we somehow fail to write flows to a logfile, we really want to crash visibly
|
||||
# instead of letting traffic through unrecorded.
|
||||
# No normal logging here, that would not be triggered anymore.
|
||||
sys.stderr.write(f"Error while writing to {self.current_path}: {e}")
|
||||
sys.exit(1)
|
||||
else:
|
||||
self.active_flows.discard(flow)
|
||||
|
||||
def done(self) -> None:
|
||||
if self.stream:
|
||||
for f in self.active_flows:
|
||||
self.stream.add(f)
|
||||
self.active_flows.clear()
|
||||
|
||||
self.current_path = None
|
||||
self.stream.fo.close()
|
||||
self.stream = None
|
||||
|
||||
@command.command("save.file")
|
||||
def save(self, flows: Sequence[flow.Flow], path: mitmproxy.types.Path) -> None:
|
||||
"""
|
||||
Save flows to a file. If the path starts with a +, flows are
|
||||
appended to the file, otherwise it is over-written.
|
||||
"""
|
||||
try:
|
||||
with open(_path(path), _mode(path)) as f:
|
||||
stream = io.FlowWriter(f)
|
||||
for i in flows:
|
||||
stream.add(i)
|
||||
except OSError as e:
|
||||
raise exceptions.CommandError(e) from e
|
||||
if path.endswith(".har") or path.endswith(".zhar"): # pragma: no cover
|
||||
logging.log(
|
||||
ALERT,
|
||||
f"Saved as mitmproxy dump file. To save HAR files, use the `save.har` command.",
|
||||
)
|
||||
else:
|
||||
logging.log(ALERT, f"Saved {len(flows)} flows.")
|
||||
|
||||
def tcp_start(self, flow: tcp.TCPFlow):
|
||||
if self.stream:
|
||||
self.active_flows.add(flow)
|
||||
|
||||
def tcp_end(self, flow: tcp.TCPFlow):
|
||||
self.save_flow(flow)
|
||||
|
||||
def tcp_error(self, flow: tcp.TCPFlow):
|
||||
self.tcp_end(flow)
|
||||
|
||||
def udp_start(self, flow: udp.UDPFlow):
|
||||
if self.stream:
|
||||
self.active_flows.add(flow)
|
||||
|
||||
def udp_end(self, flow: udp.UDPFlow):
|
||||
self.save_flow(flow)
|
||||
|
||||
def udp_error(self, flow: udp.UDPFlow):
|
||||
self.udp_end(flow)
|
||||
|
||||
def websocket_end(self, flow: http.HTTPFlow):
|
||||
self.save_flow(flow)
|
||||
|
||||
def request(self, flow: http.HTTPFlow):
|
||||
if self.stream:
|
||||
self.active_flows.add(flow)
|
||||
|
||||
def response(self, flow: http.HTTPFlow):
|
||||
# websocket flows will receive a websocket_end,
|
||||
# we don't want to persist them here already
|
||||
if flow.websocket is None:
|
||||
self.save_flow(flow)
|
||||
|
||||
def error(self, flow: http.HTTPFlow):
|
||||
self.response(flow)
|
||||
|
||||
def dns_request(self, flow: dns.DNSFlow):
|
||||
if self.stream:
|
||||
self.active_flows.add(flow)
|
||||
|
||||
def dns_response(self, flow: dns.DNSFlow):
|
||||
self.save_flow(flow)
|
||||
|
||||
def dns_error(self, flow: dns.DNSFlow):
|
||||
self.save_flow(flow)
|
||||
312
venv/Lib/site-packages/mitmproxy/addons/savehar.py
Normal file
@@ -0,0 +1,312 @@
|
||||
"""Write flow objects to a HAR file"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import zlib
|
||||
from collections.abc import Sequence
|
||||
from datetime import datetime
|
||||
from datetime import timezone
|
||||
from typing import Any
|
||||
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import http
|
||||
from mitmproxy import types
|
||||
from mitmproxy import version
|
||||
from mitmproxy.addonmanager import Loader
|
||||
from mitmproxy.connection import Server
|
||||
from mitmproxy.coretypes.multidict import _MultiDict
|
||||
from mitmproxy.log import ALERT
|
||||
from mitmproxy.utils import human
|
||||
from mitmproxy.utils import strutils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SaveHar:
|
||||
def __init__(self) -> None:
|
||||
self.flows: list[flow.Flow] = []
|
||||
self.filt: flowfilter.TFilter | None = None
|
||||
|
||||
@command.command("save.har")
|
||||
def export_har(self, flows: Sequence[flow.Flow], path: types.Path) -> None:
|
||||
"""Export flows to an HAR (HTTP Archive) file."""
|
||||
|
||||
har = json.dumps(self.make_har(flows), indent=4).encode()
|
||||
|
||||
if path.endswith(".zhar"):
|
||||
har = zlib.compress(har, 9)
|
||||
|
||||
with open(path, "wb") as f:
|
||||
f.write(har)
|
||||
|
||||
logging.log(ALERT, f"HAR file saved ({human.pretty_size(len(har))} bytes).")
|
||||
|
||||
def make_har(self, flows: Sequence[flow.Flow]) -> dict:
|
||||
entries = []
|
||||
skipped = 0
|
||||
# A list of server seen till now is maintained so we can avoid
|
||||
# using 'connect' time for entries that use an existing connection.
|
||||
servers_seen: set[Server] = set()
|
||||
|
||||
for f in flows:
|
||||
if isinstance(f, http.HTTPFlow):
|
||||
entries.append(self.flow_entry(f, servers_seen))
|
||||
else:
|
||||
skipped += 1
|
||||
|
||||
if skipped > 0:
|
||||
logger.info(f"Skipped {skipped} flows that weren't HTTP flows.")
|
||||
|
||||
return {
|
||||
"log": {
|
||||
"version": "1.2",
|
||||
"creator": {
|
||||
"name": "mitmproxy",
|
||||
"version": version.VERSION,
|
||||
"comment": "",
|
||||
},
|
||||
"pages": [],
|
||||
"entries": entries,
|
||||
}
|
||||
}
|
||||
|
||||
def load(self, loader: Loader):
|
||||
loader.add_option(
|
||||
"hardump",
|
||||
str,
|
||||
"",
|
||||
"""
|
||||
Save a HAR file with all flows on exit.
|
||||
You may select particular flows by setting save_stream_filter.
|
||||
For mitmdump, enabling this option will mean that flows are kept in memory.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "save_stream_filter" in updated:
|
||||
if ctx.options.save_stream_filter:
|
||||
try:
|
||||
self.filt = flowfilter.parse(ctx.options.save_stream_filter)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
else:
|
||||
self.filt = None
|
||||
|
||||
if "hardump" in updated:
|
||||
if not ctx.options.hardump:
|
||||
self.flows = []
|
||||
|
||||
def response(self, flow: http.HTTPFlow) -> None:
|
||||
# websocket flows will receive a websocket_end,
|
||||
# we don't want to persist them here already
|
||||
if flow.websocket is None:
|
||||
self._save_flow(flow)
|
||||
|
||||
def error(self, flow: http.HTTPFlow) -> None:
|
||||
self.response(flow)
|
||||
|
||||
def websocket_end(self, flow: http.HTTPFlow) -> None:
|
||||
self._save_flow(flow)
|
||||
|
||||
def _save_flow(self, flow: http.HTTPFlow) -> None:
|
||||
if ctx.options.hardump:
|
||||
flow_matches = self.filt is None or self.filt(flow)
|
||||
if flow_matches:
|
||||
self.flows.append(flow)
|
||||
|
||||
def done(self):
|
||||
if ctx.options.hardump:
|
||||
if ctx.options.hardump == "-":
|
||||
har = self.make_har(self.flows)
|
||||
print(json.dumps(har, indent=4))
|
||||
else:
|
||||
self.export_har(self.flows, ctx.options.hardump)
|
||||
|
||||
def flow_entry(self, flow: http.HTTPFlow, servers_seen: set[Server]) -> dict:
|
||||
"""Creates HAR entry from flow"""
|
||||
|
||||
if flow.server_conn in servers_seen:
|
||||
connect_time = -1.0
|
||||
ssl_time = -1.0
|
||||
elif flow.server_conn.timestamp_tcp_setup:
|
||||
assert flow.server_conn.timestamp_start
|
||||
connect_time = 1000 * (
|
||||
flow.server_conn.timestamp_tcp_setup - flow.server_conn.timestamp_start
|
||||
)
|
||||
|
||||
if flow.server_conn.timestamp_tls_setup:
|
||||
ssl_time = 1000 * (
|
||||
flow.server_conn.timestamp_tls_setup
|
||||
- flow.server_conn.timestamp_tcp_setup
|
||||
)
|
||||
else:
|
||||
ssl_time = -1.0
|
||||
servers_seen.add(flow.server_conn)
|
||||
else:
|
||||
connect_time = -1.0
|
||||
ssl_time = -1.0
|
||||
|
||||
if flow.request.timestamp_end:
|
||||
send = 1000 * (flow.request.timestamp_end - flow.request.timestamp_start)
|
||||
else:
|
||||
send = 0
|
||||
|
||||
if flow.response and flow.request.timestamp_end:
|
||||
wait = 1000 * (flow.response.timestamp_start - flow.request.timestamp_end)
|
||||
else:
|
||||
wait = 0
|
||||
|
||||
if flow.response and flow.response.timestamp_end:
|
||||
receive = 1000 * (
|
||||
flow.response.timestamp_end - flow.response.timestamp_start
|
||||
)
|
||||
|
||||
else:
|
||||
receive = 0
|
||||
|
||||
timings: dict[str, float | None] = {
|
||||
"connect": connect_time,
|
||||
"ssl": ssl_time,
|
||||
"send": send,
|
||||
"receive": receive,
|
||||
"wait": wait,
|
||||
}
|
||||
|
||||
if flow.response:
|
||||
try:
|
||||
content = flow.response.content
|
||||
except ValueError:
|
||||
content = flow.response.raw_content
|
||||
response_body_size = (
|
||||
len(flow.response.raw_content) if flow.response.raw_content else 0
|
||||
)
|
||||
response_body_decoded_size = len(content) if content else 0
|
||||
response_body_compression = response_body_decoded_size - response_body_size
|
||||
response = {
|
||||
"status": flow.response.status_code,
|
||||
"statusText": flow.response.reason,
|
||||
"httpVersion": flow.response.http_version,
|
||||
"cookies": self.format_response_cookies(flow.response),
|
||||
"headers": self.format_multidict(flow.response.headers),
|
||||
"content": {
|
||||
"size": response_body_size,
|
||||
"compression": response_body_compression,
|
||||
"mimeType": flow.response.headers.get("Content-Type", ""),
|
||||
},
|
||||
"redirectURL": flow.response.headers.get("Location", ""),
|
||||
"headersSize": len(str(flow.response.headers)),
|
||||
"bodySize": response_body_size,
|
||||
}
|
||||
if content and strutils.is_mostly_bin(content):
|
||||
response["content"]["text"] = base64.b64encode(content).decode()
|
||||
response["content"]["encoding"] = "base64"
|
||||
else:
|
||||
text_content = flow.response.get_text(strict=False)
|
||||
if text_content is None:
|
||||
response["content"]["text"] = ""
|
||||
else:
|
||||
response["content"]["text"] = text_content
|
||||
else:
|
||||
response = {
|
||||
"status": 0,
|
||||
"statusText": "",
|
||||
"httpVersion": "",
|
||||
"headers": [],
|
||||
"cookies": [],
|
||||
"content": {},
|
||||
"redirectURL": "",
|
||||
"headersSize": -1,
|
||||
"bodySize": -1,
|
||||
"_transferSize": 0,
|
||||
"_error": None,
|
||||
}
|
||||
if flow.error:
|
||||
response["_error"] = flow.error.msg
|
||||
|
||||
if flow.request.method == "CONNECT":
|
||||
url = f"https://{flow.request.pretty_url}/"
|
||||
else:
|
||||
url = flow.request.pretty_url
|
||||
|
||||
entry: dict[str, Any] = {
|
||||
"startedDateTime": datetime.fromtimestamp(
|
||||
flow.request.timestamp_start, timezone.utc
|
||||
).isoformat(),
|
||||
"time": sum(v for v in timings.values() if v is not None and v >= 0),
|
||||
"request": {
|
||||
"method": flow.request.method,
|
||||
"url": url,
|
||||
"httpVersion": flow.request.http_version,
|
||||
"cookies": self.format_multidict(flow.request.cookies),
|
||||
"headers": self.format_multidict(flow.request.headers),
|
||||
"queryString": self.format_multidict(flow.request.query),
|
||||
"headersSize": len(str(flow.request.headers)),
|
||||
"bodySize": len(flow.request.raw_content)
|
||||
if flow.request.raw_content
|
||||
else 0,
|
||||
},
|
||||
"response": response,
|
||||
"cache": {},
|
||||
"timings": timings,
|
||||
}
|
||||
|
||||
if flow.request.method in ["POST", "PUT", "PATCH"]:
|
||||
params = self.format_multidict(flow.request.urlencoded_form)
|
||||
entry["request"]["postData"] = {
|
||||
"mimeType": flow.request.headers.get("Content-Type", ""),
|
||||
"text": flow.request.get_text(strict=False),
|
||||
"params": params,
|
||||
}
|
||||
|
||||
if flow.server_conn.peername:
|
||||
entry["serverIPAddress"] = str(flow.server_conn.peername[0])
|
||||
|
||||
websocket_messages = []
|
||||
if flow.websocket:
|
||||
for message in flow.websocket.messages:
|
||||
if message.is_text:
|
||||
data = message.text
|
||||
else:
|
||||
data = base64.b64encode(message.content).decode()
|
||||
websocket_message = {
|
||||
"type": "send" if message.from_client else "receive",
|
||||
"time": message.timestamp,
|
||||
"opcode": message.type.value,
|
||||
"data": data,
|
||||
}
|
||||
websocket_messages.append(websocket_message)
|
||||
|
||||
entry["_resourceType"] = "websocket"
|
||||
entry["_webSocketMessages"] = websocket_messages
|
||||
return entry
|
||||
|
||||
def format_response_cookies(self, response: http.Response) -> list[dict]:
|
||||
"""Formats the response's cookie header to list of cookies"""
|
||||
cookie_list = response.cookies.items(multi=True)
|
||||
rv = []
|
||||
for name, (value, attrs) in cookie_list:
|
||||
cookie = {
|
||||
"name": name,
|
||||
"value": value,
|
||||
"path": attrs.get("path", "/"),
|
||||
"domain": attrs.get("domain", ""),
|
||||
"httpOnly": "httpOnly" in attrs,
|
||||
"secure": "secure" in attrs,
|
||||
}
|
||||
# TODO: handle expires attribute here.
|
||||
# This is not quite trivial because we need to parse random date formats.
|
||||
# For now, we just ignore the attribute.
|
||||
|
||||
if "sameSite" in attrs:
|
||||
cookie["sameSite"] = attrs["sameSite"]
|
||||
|
||||
rv.append(cookie)
|
||||
return rv
|
||||
|
||||
def format_multidict(self, obj: _MultiDict[str, str]) -> list[dict]:
|
||||
return [{"name": k, "value": v} for k, v in obj.items(multi=True)]
|
||||
229
venv/Lib/site-packages/mitmproxy/addons/script.py
Normal file
@@ -0,0 +1,229 @@
|
||||
import asyncio
|
||||
import importlib.machinery
|
||||
import importlib.util
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import types
|
||||
from collections.abc import Sequence
|
||||
|
||||
import mitmproxy.types as mtypes
|
||||
from mitmproxy import addonmanager
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import eventsequence
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import hooks
|
||||
from mitmproxy.utils import asyncio_utils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def load_script(path: str) -> types.ModuleType | None:
|
||||
fullname = "__mitmproxy_script__.{}".format(
|
||||
os.path.splitext(os.path.basename(path))[0]
|
||||
)
|
||||
# the fullname is not unique among scripts, so if there already is an existing script with said
|
||||
# fullname, remove it.
|
||||
sys.modules.pop(fullname, None)
|
||||
oldpath = sys.path
|
||||
sys.path.insert(0, os.path.dirname(path))
|
||||
|
||||
try:
|
||||
loader = importlib.machinery.SourceFileLoader(fullname, path)
|
||||
spec = importlib.util.spec_from_loader(fullname, loader=loader)
|
||||
assert spec
|
||||
m = importlib.util.module_from_spec(spec)
|
||||
loader.exec_module(m)
|
||||
if not getattr(m, "name", None):
|
||||
m.name = path # type: ignore
|
||||
return m
|
||||
except ImportError as e:
|
||||
if getattr(sys, "frozen", False):
|
||||
e.msg += (
|
||||
f".\n"
|
||||
f"Note that mitmproxy's binaries include their own Python environment. "
|
||||
f"If your addon requires the installation of additional dependencies, "
|
||||
f"please install mitmproxy from PyPI "
|
||||
f"(https://docs.mitmproxy.org/stable/overview-installation/#installation-from-the-python-package-index-pypi)."
|
||||
)
|
||||
script_error_handler(path, e)
|
||||
return None
|
||||
except Exception as e:
|
||||
script_error_handler(path, e)
|
||||
return None
|
||||
finally:
|
||||
sys.path[:] = oldpath
|
||||
|
||||
|
||||
def script_error_handler(path: str, exc: Exception) -> None:
|
||||
"""
|
||||
Log errors during script loading.
|
||||
"""
|
||||
tback = exc.__traceback__
|
||||
tback = addonmanager.cut_traceback(
|
||||
tback, "invoke_addon_sync"
|
||||
) # we're calling configure() on load
|
||||
tback = addonmanager.cut_traceback(
|
||||
tback, "_call_with_frames_removed"
|
||||
) # module execution from importlib
|
||||
logger.error(f"error in script {path}", exc_info=(type(exc), exc, tback))
|
||||
|
||||
|
||||
ReloadInterval = 1
|
||||
|
||||
|
||||
class Script:
|
||||
"""
|
||||
An addon that manages a single script.
|
||||
"""
|
||||
|
||||
def __init__(self, path: str, reload: bool) -> None:
|
||||
self.name = "scriptmanager:" + path
|
||||
self.path = path
|
||||
self.fullpath = os.path.expanduser(path.strip("'\" "))
|
||||
self.ns: types.ModuleType | None = None
|
||||
self.is_running = False
|
||||
|
||||
if not os.path.isfile(self.fullpath):
|
||||
raise exceptions.OptionsError(f"No such script: {self.fullpath}")
|
||||
|
||||
self.reloadtask = None
|
||||
if reload:
|
||||
self.reloadtask = asyncio_utils.create_task(
|
||||
self.watcher(),
|
||||
name=f"script watcher for {path}",
|
||||
keep_ref=False,
|
||||
)
|
||||
else:
|
||||
self.loadscript()
|
||||
|
||||
def running(self):
|
||||
self.is_running = True
|
||||
|
||||
def done(self):
|
||||
if self.reloadtask:
|
||||
self.reloadtask.cancel()
|
||||
|
||||
@property
|
||||
def addons(self):
|
||||
return [self.ns] if self.ns else []
|
||||
|
||||
def loadscript(self):
|
||||
logger.info("Loading script %s" % self.path)
|
||||
if self.ns:
|
||||
ctx.master.addons.remove(self.ns)
|
||||
self.ns = None
|
||||
with addonmanager.safecall():
|
||||
ns = load_script(self.fullpath)
|
||||
ctx.master.addons.register(ns)
|
||||
self.ns = ns
|
||||
if self.ns:
|
||||
try:
|
||||
ctx.master.addons.invoke_addon_sync(
|
||||
self.ns, hooks.ConfigureHook(ctx.options.keys())
|
||||
)
|
||||
except Exception as e:
|
||||
script_error_handler(self.fullpath, e)
|
||||
if self.is_running:
|
||||
# We're already running, so we call that on the addon now.
|
||||
ctx.master.addons.invoke_addon_sync(self.ns, hooks.RunningHook())
|
||||
|
||||
async def watcher(self):
|
||||
# Script loading is terminally confused at the moment.
|
||||
# This here is a stopgap workaround to defer loading.
|
||||
await asyncio.sleep(0)
|
||||
last_mtime = 0.0
|
||||
while True:
|
||||
try:
|
||||
mtime = os.stat(self.fullpath).st_mtime
|
||||
except FileNotFoundError:
|
||||
logger.info("Removing script %s" % self.path)
|
||||
scripts = list(ctx.options.scripts)
|
||||
scripts.remove(self.path)
|
||||
ctx.options.update(scripts=scripts)
|
||||
return
|
||||
if mtime > last_mtime:
|
||||
self.loadscript()
|
||||
last_mtime = mtime
|
||||
await asyncio.sleep(ReloadInterval)
|
||||
|
||||
|
||||
class ScriptLoader:
|
||||
"""
|
||||
An addon that manages loading scripts from options.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.is_running = False
|
||||
self.addons = []
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option("scripts", Sequence[str], [], "Execute a script.")
|
||||
|
||||
def running(self):
|
||||
self.is_running = True
|
||||
|
||||
@command.command("script.run")
|
||||
def script_run(self, flows: Sequence[flow.Flow], path: mtypes.Path) -> None:
|
||||
"""
|
||||
Run a script on the specified flows. The script is configured with
|
||||
the current options and all lifecycle events for each flow are
|
||||
simulated. Note that the load event is not invoked.
|
||||
"""
|
||||
if not os.path.isfile(path):
|
||||
logger.error("No such script: %s" % path)
|
||||
return
|
||||
mod = load_script(path)
|
||||
if mod:
|
||||
with addonmanager.safecall():
|
||||
ctx.master.addons.invoke_addon_sync(
|
||||
mod,
|
||||
hooks.ConfigureHook(ctx.options.keys()),
|
||||
)
|
||||
ctx.master.addons.invoke_addon_sync(mod, hooks.RunningHook())
|
||||
for f in flows:
|
||||
for evt in eventsequence.iterate(f):
|
||||
ctx.master.addons.invoke_addon_sync(mod, evt)
|
||||
|
||||
def configure(self, updated):
|
||||
if "scripts" in updated:
|
||||
for s in ctx.options.scripts:
|
||||
if ctx.options.scripts.count(s) > 1:
|
||||
raise exceptions.OptionsError("Duplicate script")
|
||||
|
||||
for a in self.addons[:]:
|
||||
if a.path not in ctx.options.scripts:
|
||||
logger.info("Un-loading script: %s" % a.path)
|
||||
ctx.master.addons.remove(a)
|
||||
self.addons.remove(a)
|
||||
|
||||
# The machinations below are to ensure that:
|
||||
# - Scripts remain in the same order
|
||||
# - Scripts are not initialized un-necessarily. If only a
|
||||
# script's order in the script list has changed, it is just
|
||||
# moved.
|
||||
|
||||
current = {}
|
||||
for a in self.addons:
|
||||
current[a.path] = a
|
||||
|
||||
ordered = []
|
||||
newscripts = []
|
||||
for s in ctx.options.scripts:
|
||||
if s in current:
|
||||
ordered.append(current[s])
|
||||
else:
|
||||
sc = Script(s, True)
|
||||
ordered.append(sc)
|
||||
newscripts.append(sc)
|
||||
|
||||
self.addons = ordered
|
||||
|
||||
for s in newscripts:
|
||||
ctx.master.addons.register(s)
|
||||
if self.is_running:
|
||||
# If we're already running, we configure and tell the addon
|
||||
# we're up and running.
|
||||
ctx.master.addons.invoke_addon_sync(s, hooks.RunningHook())
|
||||
@@ -0,0 +1,23 @@
|
||||
import logging
|
||||
|
||||
from mitmproxy import http
|
||||
|
||||
|
||||
class ServerSideEvents:
|
||||
"""
|
||||
Server-Side Events are currently swallowed if there's no streaming,
|
||||
see https://github.com/mitmproxy/mitmproxy/issues/4469.
|
||||
|
||||
Until this bug is fixed, this addon warns the user about this.
|
||||
"""
|
||||
|
||||
def response(self, flow: http.HTTPFlow):
|
||||
assert flow.response
|
||||
is_sse = flow.response.headers.get("content-type", "").startswith(
|
||||
"text/event-stream"
|
||||
)
|
||||
if is_sse and not flow.response.stream:
|
||||
logging.warning(
|
||||
"mitmproxy currently does not support server side events. As a workaround, you can enable response "
|
||||
"streaming for such flows: https://github.com/mitmproxy/mitmproxy/issues/4469"
|
||||
)
|
||||
305
venv/Lib/site-packages/mitmproxy/addons/serverplayback.py
Normal file
@@ -0,0 +1,305 @@
|
||||
import hashlib
|
||||
import logging
|
||||
import urllib
|
||||
from collections.abc import Hashable
|
||||
from collections.abc import Sequence
|
||||
from typing import Any
|
||||
|
||||
import mitmproxy.types
|
||||
from mitmproxy import command
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flow
|
||||
from mitmproxy import hooks
|
||||
from mitmproxy import http
|
||||
from mitmproxy import io
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
HASH_OPTIONS = [
|
||||
"server_replay_ignore_content",
|
||||
"server_replay_ignore_host",
|
||||
"server_replay_ignore_params",
|
||||
"server_replay_ignore_payload_params",
|
||||
"server_replay_ignore_port",
|
||||
"server_replay_use_headers",
|
||||
]
|
||||
|
||||
|
||||
class ServerPlayback:
|
||||
flowmap: dict[Hashable, list[http.HTTPFlow]]
|
||||
configured: bool
|
||||
|
||||
def __init__(self):
|
||||
self.flowmap = {}
|
||||
self.configured = False
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"server_replay_kill_extra",
|
||||
bool,
|
||||
False,
|
||||
"Kill extra requests during replay (for which no replayable response was found)."
|
||||
"[Deprecated, prefer to use server_replay_extra='kill']",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_extra",
|
||||
str,
|
||||
"forward",
|
||||
"Behaviour for extra requests during replay for which no replayable response was found. "
|
||||
"Setting a numeric string value will return an empty HTTP response with the respective status code.",
|
||||
choices=["forward", "kill", "204", "400", "404", "500"],
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_reuse",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
Don't remove flows from server replay state after use. This makes it
|
||||
possible to replay same response multiple times.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_nopop",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
Deprecated alias for `server_replay_reuse`.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_refresh",
|
||||
bool,
|
||||
True,
|
||||
"""
|
||||
Refresh server replay responses by adjusting date, expires and
|
||||
last-modified headers, as well as adjusting cookie expiration.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_use_headers",
|
||||
Sequence[str],
|
||||
[],
|
||||
"""
|
||||
Request headers that need to match while searching for a saved flow
|
||||
to replay.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay",
|
||||
Sequence[str],
|
||||
[],
|
||||
"Replay server responses from a saved file.",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_ignore_content",
|
||||
bool,
|
||||
False,
|
||||
"Ignore request content while searching for a saved flow to replay.",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_ignore_params",
|
||||
Sequence[str],
|
||||
[],
|
||||
"""
|
||||
Request parameters to be ignored while searching for a saved flow
|
||||
to replay.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_ignore_payload_params",
|
||||
Sequence[str],
|
||||
[],
|
||||
"""
|
||||
Request payload parameters (application/x-www-form-urlencoded or
|
||||
multipart/form-data) to be ignored while searching for a saved flow
|
||||
to replay.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_ignore_host",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
Ignore request destination host while searching for a saved flow
|
||||
to replay.
|
||||
""",
|
||||
)
|
||||
loader.add_option(
|
||||
"server_replay_ignore_port",
|
||||
bool,
|
||||
False,
|
||||
"""
|
||||
Ignore request destination port while searching for a saved flow
|
||||
to replay.
|
||||
""",
|
||||
)
|
||||
|
||||
@command.command("replay.server")
|
||||
def load_flows(self, flows: Sequence[flow.Flow]) -> None:
|
||||
"""
|
||||
Replay server responses from flows.
|
||||
"""
|
||||
self.flowmap = {}
|
||||
self.add_flows(flows)
|
||||
|
||||
@command.command("replay.server.add")
|
||||
def add_flows(self, flows: Sequence[flow.Flow]) -> None:
|
||||
"""
|
||||
Add responses from flows to server replay list.
|
||||
"""
|
||||
for f in flows:
|
||||
if isinstance(f, http.HTTPFlow):
|
||||
lst = self.flowmap.setdefault(self._hash(f), [])
|
||||
lst.append(f)
|
||||
ctx.master.addons.trigger(hooks.UpdateHook([]))
|
||||
|
||||
@command.command("replay.server.file")
|
||||
def load_file(self, path: mitmproxy.types.Path) -> None:
|
||||
try:
|
||||
flows = io.read_flows_from_paths([path])
|
||||
except exceptions.FlowReadException as e:
|
||||
raise exceptions.CommandError(str(e))
|
||||
self.load_flows(flows)
|
||||
|
||||
@command.command("replay.server.stop")
|
||||
def clear(self) -> None:
|
||||
"""
|
||||
Stop server replay.
|
||||
"""
|
||||
self.flowmap = {}
|
||||
ctx.master.addons.trigger(hooks.UpdateHook([]))
|
||||
|
||||
@command.command("replay.server.count")
|
||||
def count(self) -> int:
|
||||
return sum(len(i) for i in self.flowmap.values())
|
||||
|
||||
def _hash(self, flow: http.HTTPFlow) -> Hashable:
|
||||
"""
|
||||
Calculates a loose hash of the flow request.
|
||||
"""
|
||||
r = flow.request
|
||||
_, _, path, _, query, _ = urllib.parse.urlparse(r.url)
|
||||
queriesArray = urllib.parse.parse_qsl(query, keep_blank_values=True)
|
||||
|
||||
key: list[Any] = [str(r.scheme), str(r.method), str(path)]
|
||||
if not ctx.options.server_replay_ignore_content:
|
||||
if ctx.options.server_replay_ignore_payload_params and r.multipart_form:
|
||||
key.extend(
|
||||
(k, v)
|
||||
for k, v in r.multipart_form.items(multi=True)
|
||||
if k.decode(errors="replace")
|
||||
not in ctx.options.server_replay_ignore_payload_params
|
||||
)
|
||||
elif ctx.options.server_replay_ignore_payload_params and r.urlencoded_form:
|
||||
key.extend(
|
||||
(k, v)
|
||||
for k, v in r.urlencoded_form.items(multi=True)
|
||||
if k not in ctx.options.server_replay_ignore_payload_params
|
||||
)
|
||||
else:
|
||||
key.append(str(r.raw_content))
|
||||
|
||||
if not ctx.options.server_replay_ignore_host:
|
||||
key.append(r.pretty_host)
|
||||
if not ctx.options.server_replay_ignore_port:
|
||||
key.append(r.port)
|
||||
|
||||
filtered = []
|
||||
ignore_params = ctx.options.server_replay_ignore_params or []
|
||||
for p in queriesArray:
|
||||
if p[0] not in ignore_params:
|
||||
filtered.append(p)
|
||||
for p in filtered:
|
||||
key.append(p[0])
|
||||
key.append(p[1])
|
||||
|
||||
if ctx.options.server_replay_use_headers:
|
||||
headers = []
|
||||
for i in ctx.options.server_replay_use_headers:
|
||||
v = r.headers.get(i)
|
||||
headers.append((i, v))
|
||||
key.append(headers)
|
||||
return hashlib.sha256(repr(key).encode("utf8", "surrogateescape")).digest()
|
||||
|
||||
def next_flow(self, flow: http.HTTPFlow) -> http.HTTPFlow | None:
|
||||
"""
|
||||
Returns the next flow object, or None if no matching flow was
|
||||
found.
|
||||
"""
|
||||
hash = self._hash(flow)
|
||||
if hash in self.flowmap:
|
||||
if ctx.options.server_replay_reuse or ctx.options.server_replay_nopop:
|
||||
return next(
|
||||
(flow for flow in self.flowmap[hash] if flow.response), None
|
||||
)
|
||||
else:
|
||||
ret = self.flowmap[hash].pop(0)
|
||||
while not ret.response:
|
||||
if self.flowmap[hash]:
|
||||
ret = self.flowmap[hash].pop(0)
|
||||
else:
|
||||
del self.flowmap[hash]
|
||||
return None
|
||||
if not self.flowmap[hash]:
|
||||
del self.flowmap[hash]
|
||||
return ret
|
||||
else:
|
||||
return None
|
||||
|
||||
def configure(self, updated):
|
||||
if ctx.options.server_replay_kill_extra:
|
||||
logger.warning(
|
||||
"server_replay_kill_extra has been deprecated, "
|
||||
"please update your config to use server_replay_extra='kill'."
|
||||
)
|
||||
if ctx.options.server_replay_nopop: # pragma: no cover
|
||||
logger.error(
|
||||
"server_replay_nopop has been renamed to server_replay_reuse, please update your config."
|
||||
)
|
||||
if not self.configured and ctx.options.server_replay:
|
||||
self.configured = True
|
||||
try:
|
||||
flows = io.read_flows_from_paths(ctx.options.server_replay)
|
||||
except exceptions.FlowReadException as e:
|
||||
raise exceptions.OptionsError(str(e))
|
||||
self.load_flows(flows)
|
||||
if any(option in updated for option in HASH_OPTIONS):
|
||||
self.recompute_hashes()
|
||||
|
||||
def recompute_hashes(self) -> None:
|
||||
"""
|
||||
Rebuild flowmap if the hashing method has changed during execution,
|
||||
see https://github.com/mitmproxy/mitmproxy/issues/4506
|
||||
"""
|
||||
flows = [flow for lst in self.flowmap.values() for flow in lst]
|
||||
self.load_flows(flows)
|
||||
|
||||
def request(self, f: http.HTTPFlow) -> None:
|
||||
if self.flowmap:
|
||||
rflow = self.next_flow(f)
|
||||
if rflow:
|
||||
assert rflow.response
|
||||
response = rflow.response.copy()
|
||||
if ctx.options.server_replay_refresh:
|
||||
response.refresh()
|
||||
f.response = response
|
||||
f.is_replay = "response"
|
||||
elif (
|
||||
ctx.options.server_replay_kill_extra
|
||||
or ctx.options.server_replay_extra == "kill"
|
||||
):
|
||||
logging.warning(
|
||||
"server_playback: killed non-replay request {}".format(
|
||||
f.request.url
|
||||
)
|
||||
)
|
||||
f.kill()
|
||||
elif ctx.options.server_replay_extra != "forward":
|
||||
logging.warning(
|
||||
"server_playback: returned {} non-replay request {}".format(
|
||||
ctx.options.server_replay_extra, f.request.url
|
||||
)
|
||||
)
|
||||
f.response = http.Response.make(int(ctx.options.server_replay_extra))
|
||||
f.is_replay = "response"
|
||||
38
venv/Lib/site-packages/mitmproxy/addons/stickyauth.py
Normal file
@@ -0,0 +1,38 @@
|
||||
from typing import Optional
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flowfilter
|
||||
|
||||
|
||||
class StickyAuth:
|
||||
def __init__(self):
|
||||
self.flt = None
|
||||
self.hosts = {}
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"stickyauth",
|
||||
Optional[str],
|
||||
None,
|
||||
"Set sticky auth filter. Matched against requests.",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "stickyauth" in updated:
|
||||
if ctx.options.stickyauth:
|
||||
try:
|
||||
self.flt = flowfilter.parse(ctx.options.stickyauth)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
else:
|
||||
self.flt = None
|
||||
|
||||
def request(self, flow):
|
||||
if self.flt:
|
||||
host = flow.request.host
|
||||
if "authorization" in flow.request.headers:
|
||||
self.hosts[host] = flow.request.headers["authorization"]
|
||||
elif flowfilter.match(self.flt, flow):
|
||||
if host in self.hosts:
|
||||
flow.request.headers["authorization"] = self.hosts[host]
|
||||
97
venv/Lib/site-packages/mitmproxy/addons/stickycookie.py
Normal file
@@ -0,0 +1,97 @@
|
||||
import collections
|
||||
from http import cookiejar
|
||||
from typing import Optional
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import http
|
||||
from mitmproxy.net.http import cookies
|
||||
|
||||
TOrigin = tuple[str, int, str]
|
||||
|
||||
|
||||
def ckey(attrs: dict[str, str], f: http.HTTPFlow) -> TOrigin:
|
||||
"""
|
||||
Returns a (domain, port, path) tuple.
|
||||
"""
|
||||
domain = f.request.host
|
||||
path = "/"
|
||||
if "domain" in attrs:
|
||||
domain = attrs["domain"]
|
||||
if "path" in attrs:
|
||||
path = attrs["path"]
|
||||
return (domain, f.request.port, path)
|
||||
|
||||
|
||||
def domain_match(a: str, b: str) -> bool:
|
||||
if cookiejar.domain_match(a, b): # type: ignore
|
||||
return True
|
||||
elif cookiejar.domain_match(a, b.strip(".")): # type: ignore
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class StickyCookie:
|
||||
def __init__(self) -> None:
|
||||
self.jar: collections.defaultdict[TOrigin, dict[str, str]] = (
|
||||
collections.defaultdict(dict)
|
||||
)
|
||||
self.flt: flowfilter.TFilter | None = None
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"stickycookie",
|
||||
Optional[str],
|
||||
None,
|
||||
"Set sticky cookie filter. Matched against requests.",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "stickycookie" in updated:
|
||||
if ctx.options.stickycookie:
|
||||
try:
|
||||
self.flt = flowfilter.parse(ctx.options.stickycookie)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
else:
|
||||
self.flt = None
|
||||
|
||||
def response(self, flow: http.HTTPFlow):
|
||||
assert flow.response
|
||||
if self.flt:
|
||||
for name, (value, attrs) in flow.response.cookies.items(multi=True):
|
||||
# FIXME: We now know that Cookie.py screws up some cookies with
|
||||
# valid RFC 822/1123 datetime specifications for expiry. Sigh.
|
||||
dom_port_path = ckey(attrs, flow)
|
||||
|
||||
if domain_match(flow.request.host, dom_port_path[0]):
|
||||
if cookies.is_expired(attrs):
|
||||
# Remove the cookie from jar
|
||||
self.jar[dom_port_path].pop(name, None)
|
||||
|
||||
# If all cookies of a dom_port_path have been removed
|
||||
# then remove it from the jar itself
|
||||
if not self.jar[dom_port_path]:
|
||||
self.jar.pop(dom_port_path, None)
|
||||
else:
|
||||
self.jar[dom_port_path][name] = value
|
||||
|
||||
def request(self, flow: http.HTTPFlow):
|
||||
if self.flt:
|
||||
cookie_list: list[tuple[str, str]] = []
|
||||
if flowfilter.match(self.flt, flow):
|
||||
for (domain, port, path), c in self.jar.items():
|
||||
match = [
|
||||
domain_match(flow.request.host, domain),
|
||||
flow.request.port == port,
|
||||
flow.request.path.startswith(path),
|
||||
]
|
||||
if all(match):
|
||||
cookie_list.extend(c.items())
|
||||
if cookie_list:
|
||||
# FIXME: we need to formalise this...
|
||||
flow.metadata["stickycookie"] = True
|
||||
flow.request.headers["cookie"] = cookies.format_cookie_header(
|
||||
cookie_list
|
||||
)
|
||||
@@ -0,0 +1,37 @@
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import dns
|
||||
from mitmproxy.net.dns import types
|
||||
|
||||
|
||||
class StripDnsHttpsRecords:
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"strip_ech",
|
||||
bool,
|
||||
True,
|
||||
"Strip Encrypted ClientHello (ECH) data from DNS HTTPS records so that mitmproxy can generate matching certificates.",
|
||||
)
|
||||
|
||||
def dns_response(self, flow: dns.DNSFlow):
|
||||
assert flow.response
|
||||
if ctx.options.strip_ech:
|
||||
for answer in flow.response.answers:
|
||||
if answer.type == types.HTTPS:
|
||||
answer.https_ech = None
|
||||
if not ctx.options.http3:
|
||||
for answer in flow.response.answers:
|
||||
if (
|
||||
answer.type == types.HTTPS
|
||||
and answer.https_alpn is not None
|
||||
and any(
|
||||
# HTTP/3 or any of the spec drafts (h3-...)?
|
||||
a == b"h3" or a.startswith(b"h3-")
|
||||
for a in answer.https_alpn
|
||||
)
|
||||
):
|
||||
alpns = tuple(
|
||||
a
|
||||
for a in answer.https_alpn
|
||||
if a != b"h3" and not a.startswith(b"h3-")
|
||||
)
|
||||
answer.https_alpn = alpns or None
|
||||
50
venv/Lib/site-packages/mitmproxy/addons/termlog.py
Normal file
@@ -0,0 +1,50 @@
|
||||
from __future__ import annotations
|
||||
|
||||
import asyncio
|
||||
import logging
|
||||
import sys
|
||||
from typing import IO
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import log
|
||||
from mitmproxy.utils import vt_codes
|
||||
|
||||
|
||||
class TermLog:
|
||||
_teardown_task: asyncio.Task | None = None
|
||||
|
||||
def __init__(self, out: IO[str] | None = None):
|
||||
self.logger = TermLogHandler(out)
|
||||
self.logger.install()
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"termlog_verbosity", str, "info", "Log verbosity.", choices=log.LogLevels
|
||||
)
|
||||
self.logger.setLevel(logging.INFO)
|
||||
|
||||
def configure(self, updated):
|
||||
if "termlog_verbosity" in updated:
|
||||
self.logger.setLevel(ctx.options.termlog_verbosity.upper())
|
||||
|
||||
def uninstall(self) -> None:
|
||||
# uninstall the log dumper.
|
||||
# This happens at the very very end after done() is completed,
|
||||
# because we don't want to uninstall while other addons are still logging.
|
||||
self.logger.uninstall()
|
||||
|
||||
|
||||
class TermLogHandler(log.MitmLogHandler):
|
||||
def __init__(self, out: IO[str] | None = None):
|
||||
super().__init__()
|
||||
self.file: IO[str] = out or sys.stdout
|
||||
self.has_vt_codes = vt_codes.ensure_supported(self.file)
|
||||
self.formatter = log.MitmFormatter(self.has_vt_codes)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
try:
|
||||
print(self.format(record), file=self.file)
|
||||
except OSError:
|
||||
# We cannot print, exit immediately.
|
||||
# See https://github.com/mitmproxy/mitmproxy/issues/4669
|
||||
sys.exit(1)
|
||||
654
venv/Lib/site-packages/mitmproxy/addons/tlsconfig.py
Normal file
@@ -0,0 +1,654 @@
|
||||
import ipaddress
|
||||
import logging
|
||||
import os
|
||||
import ssl
|
||||
import urllib.parse
|
||||
from pathlib import Path
|
||||
from typing import Any
|
||||
from typing import Literal
|
||||
from typing import TypedDict
|
||||
|
||||
from aioquic.h3.connection import H3_ALPN
|
||||
from aioquic.tls import CipherSuite
|
||||
from cryptography import x509
|
||||
from OpenSSL import SSL
|
||||
|
||||
from mitmproxy import certs
|
||||
from mitmproxy import connection
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import http
|
||||
from mitmproxy import tls
|
||||
from mitmproxy.net import tls as net_tls
|
||||
from mitmproxy.options import CONF_BASENAME
|
||||
from mitmproxy.proxy import context
|
||||
from mitmproxy.proxy.layers import modes
|
||||
from mitmproxy.proxy.layers import quic
|
||||
from mitmproxy.proxy.layers import tls as proxy_tls
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# We manually need to specify this, otherwise OpenSSL may select a non-HTTP2 cipher by default.
|
||||
# https://ssl-config.mozilla.org/#config=old
|
||||
|
||||
_DEFAULT_CIPHERS = (
|
||||
"ECDHE-ECDSA-AES128-GCM-SHA256",
|
||||
"ECDHE-RSA-AES128-GCM-SHA256",
|
||||
"ECDHE-ECDSA-AES256-GCM-SHA384",
|
||||
"ECDHE-RSA-AES256-GCM-SHA384",
|
||||
"ECDHE-ECDSA-CHACHA20-POLY1305",
|
||||
"ECDHE-RSA-CHACHA20-POLY1305",
|
||||
"DHE-RSA-AES128-GCM-SHA256",
|
||||
"DHE-RSA-AES256-GCM-SHA384",
|
||||
"DHE-RSA-CHACHA20-POLY1305",
|
||||
"ECDHE-ECDSA-AES128-SHA256",
|
||||
"ECDHE-RSA-AES128-SHA256",
|
||||
"ECDHE-ECDSA-AES128-SHA",
|
||||
"ECDHE-RSA-AES128-SHA",
|
||||
"ECDHE-ECDSA-AES256-SHA384",
|
||||
"ECDHE-RSA-AES256-SHA384",
|
||||
"ECDHE-ECDSA-AES256-SHA",
|
||||
"ECDHE-RSA-AES256-SHA",
|
||||
"DHE-RSA-AES128-SHA256",
|
||||
"DHE-RSA-AES256-SHA256",
|
||||
"AES128-GCM-SHA256",
|
||||
"AES256-GCM-SHA384",
|
||||
"AES128-SHA256",
|
||||
"AES256-SHA256",
|
||||
"AES128-SHA",
|
||||
"AES256-SHA",
|
||||
"DES-CBC3-SHA",
|
||||
)
|
||||
|
||||
_DEFAULT_CIPHERS_WITH_SECLEVEL_0 = ("@SECLEVEL=0", *_DEFAULT_CIPHERS)
|
||||
|
||||
|
||||
def _default_ciphers(
|
||||
min_tls_version: net_tls.Version,
|
||||
) -> tuple[str, ...]:
|
||||
"""
|
||||
@SECLEVEL=0 is necessary for TLS 1.1 and below to work,
|
||||
see https://github.com/pyca/cryptography/issues/9523
|
||||
"""
|
||||
if min_tls_version in net_tls.INSECURE_TLS_MIN_VERSIONS:
|
||||
return _DEFAULT_CIPHERS_WITH_SECLEVEL_0
|
||||
else:
|
||||
return _DEFAULT_CIPHERS
|
||||
|
||||
|
||||
# 2022/05: X509_CHECK_FLAG_NEVER_CHECK_SUBJECT is not available in LibreSSL, ignore gracefully as it's not critical.
|
||||
DEFAULT_HOSTFLAGS = (
|
||||
SSL._lib.X509_CHECK_FLAG_NO_PARTIAL_WILDCARDS # type: ignore
|
||||
| getattr(SSL._lib, "X509_CHECK_FLAG_NEVER_CHECK_SUBJECT", 0) # type: ignore
|
||||
)
|
||||
|
||||
|
||||
class AppData(TypedDict):
|
||||
client_alpn: bytes | None
|
||||
server_alpn: bytes | None
|
||||
http2: bool
|
||||
|
||||
|
||||
def alpn_select_callback(conn: SSL.Connection, options: list[bytes]) -> Any:
|
||||
app_data: AppData = conn.get_app_data()
|
||||
client_alpn = app_data["client_alpn"]
|
||||
server_alpn = app_data["server_alpn"]
|
||||
http2 = app_data["http2"]
|
||||
if client_alpn is not None:
|
||||
if client_alpn in options:
|
||||
return client_alpn
|
||||
else:
|
||||
return SSL.NO_OVERLAPPING_PROTOCOLS
|
||||
if server_alpn and server_alpn in options:
|
||||
return server_alpn
|
||||
if server_alpn == b"":
|
||||
# We do have a server connection, but the remote server refused to negotiate a protocol:
|
||||
# We need to mirror this on the client connection.
|
||||
return SSL.NO_OVERLAPPING_PROTOCOLS
|
||||
http_alpns = proxy_tls.HTTP_ALPNS if http2 else proxy_tls.HTTP1_ALPNS
|
||||
# client sends in order of preference, so we are nice and respect that.
|
||||
for alpn in options:
|
||||
if alpn in http_alpns:
|
||||
return alpn
|
||||
else:
|
||||
return SSL.NO_OVERLAPPING_PROTOCOLS
|
||||
|
||||
|
||||
class TlsConfig:
|
||||
"""
|
||||
This addon supplies the proxy core with the desired OpenSSL connection objects to negotiate TLS.
|
||||
"""
|
||||
|
||||
certstore: certs.CertStore = None # type: ignore
|
||||
|
||||
# TODO: We should support configuring TLS 1.3 cipher suites (https://github.com/mitmproxy/mitmproxy/issues/4260)
|
||||
# TODO: We should re-use SSL.Context options here, if only for TLS session resumption.
|
||||
# This may require patches to pyOpenSSL, as some functionality is only exposed on contexts.
|
||||
# TODO: This addon should manage the following options itself, which are current defined in mitmproxy/options.py:
|
||||
# - upstream_cert
|
||||
# - add_upstream_certs_to_client_chain
|
||||
# - key_size
|
||||
# - certs
|
||||
# - cert_passphrase
|
||||
# - ssl_verify_upstream_trusted_ca
|
||||
# - ssl_verify_upstream_trusted_confdir
|
||||
|
||||
def load(self, loader):
|
||||
insecure_tls_min_versions = (
|
||||
", ".join(x.name for x in net_tls.INSECURE_TLS_MIN_VERSIONS[:-1])
|
||||
+ f" and {net_tls.INSECURE_TLS_MIN_VERSIONS[-1].name}"
|
||||
)
|
||||
loader.add_option(
|
||||
name="tls_version_client_min",
|
||||
typespec=str,
|
||||
default=net_tls.DEFAULT_MIN_VERSION.name,
|
||||
choices=[x.name for x in net_tls.Version],
|
||||
help=f"Set the minimum TLS version for client connections. "
|
||||
f"{insecure_tls_min_versions} are insecure.",
|
||||
)
|
||||
loader.add_option(
|
||||
name="tls_version_client_max",
|
||||
typespec=str,
|
||||
default=net_tls.DEFAULT_MAX_VERSION.name,
|
||||
choices=[x.name for x in net_tls.Version],
|
||||
help=f"Set the maximum TLS version for client connections.",
|
||||
)
|
||||
loader.add_option(
|
||||
name="tls_version_server_min",
|
||||
typespec=str,
|
||||
default=net_tls.DEFAULT_MIN_VERSION.name,
|
||||
choices=[x.name for x in net_tls.Version],
|
||||
help=f"Set the minimum TLS version for server connections. "
|
||||
f"{insecure_tls_min_versions} are insecure.",
|
||||
)
|
||||
loader.add_option(
|
||||
name="tls_version_server_max",
|
||||
typespec=str,
|
||||
default=net_tls.DEFAULT_MAX_VERSION.name,
|
||||
choices=[x.name for x in net_tls.Version],
|
||||
help=f"Set the maximum TLS version for server connections.",
|
||||
)
|
||||
loader.add_option(
|
||||
name="tls_ecdh_curve_client",
|
||||
typespec=str | None,
|
||||
default=None,
|
||||
help="Use a specific elliptic curve for ECDHE key exchange on client connections. "
|
||||
'OpenSSL syntax, for example "prime256v1" (see `openssl ecparam -list_curves`).',
|
||||
)
|
||||
loader.add_option(
|
||||
name="tls_ecdh_curve_server",
|
||||
typespec=str | None,
|
||||
default=None,
|
||||
help="Use a specific elliptic curve for ECDHE key exchange on server connections. "
|
||||
'OpenSSL syntax, for example "prime256v1" (see `openssl ecparam -list_curves`).',
|
||||
)
|
||||
loader.add_option(
|
||||
name="request_client_cert",
|
||||
typespec=bool,
|
||||
default=False,
|
||||
help=f"Requests a client certificate (TLS message 'CertificateRequest') to establish a mutual TLS connection between client and mitmproxy (combined with 'client_certs' option for mitmproxy and upstream).",
|
||||
)
|
||||
loader.add_option(
|
||||
"ciphers_client",
|
||||
str | None,
|
||||
None,
|
||||
"Set supported ciphers for client <-> mitmproxy connections using OpenSSL syntax.",
|
||||
)
|
||||
loader.add_option(
|
||||
"ciphers_server",
|
||||
str | None,
|
||||
None,
|
||||
"Set supported ciphers for mitmproxy <-> server connections using OpenSSL syntax.",
|
||||
)
|
||||
|
||||
def tls_clienthello(self, tls_clienthello: tls.ClientHelloData):
|
||||
conn_context = tls_clienthello.context
|
||||
tls_clienthello.establish_server_tls_first = (
|
||||
conn_context.server.tls and ctx.options.connection_strategy == "eager"
|
||||
)
|
||||
|
||||
def tls_start_client(self, tls_start: tls.TlsData) -> None:
|
||||
"""Establish TLS or DTLS between client and proxy."""
|
||||
if tls_start.ssl_conn is not None:
|
||||
return # a user addon has already provided the pyOpenSSL context.
|
||||
|
||||
assert isinstance(tls_start.conn, connection.Client)
|
||||
|
||||
client: connection.Client = tls_start.conn
|
||||
server: connection.Server = tls_start.context.server
|
||||
|
||||
entry = self.get_cert(tls_start.context)
|
||||
|
||||
if not client.cipher_list and ctx.options.ciphers_client:
|
||||
client.cipher_list = ctx.options.ciphers_client.split(":")
|
||||
# don't assign to client.cipher_list, doesn't need to be stored.
|
||||
cipher_list = client.cipher_list or _default_ciphers(
|
||||
net_tls.Version[ctx.options.tls_version_client_min]
|
||||
)
|
||||
|
||||
if ctx.options.add_upstream_certs_to_client_chain: # pragma: no cover
|
||||
# exempted from coverage until https://bugs.python.org/issue18233 is fixed.
|
||||
extra_chain_certs = server.certificate_list
|
||||
else:
|
||||
extra_chain_certs = []
|
||||
|
||||
ssl_ctx = net_tls.create_client_proxy_context(
|
||||
method=net_tls.Method.DTLS_SERVER_METHOD
|
||||
if tls_start.is_dtls
|
||||
else net_tls.Method.TLS_SERVER_METHOD,
|
||||
min_version=net_tls.Version[ctx.options.tls_version_client_min],
|
||||
max_version=net_tls.Version[ctx.options.tls_version_client_max],
|
||||
cipher_list=tuple(cipher_list),
|
||||
ecdh_curve=net_tls.get_curve(ctx.options.tls_ecdh_curve_client),
|
||||
chain_file=entry.chain_file,
|
||||
request_client_cert=ctx.options.request_client_cert,
|
||||
alpn_select_callback=alpn_select_callback,
|
||||
extra_chain_certs=tuple(extra_chain_certs),
|
||||
dhparams=self.certstore.dhparams,
|
||||
)
|
||||
tls_start.ssl_conn = SSL.Connection(ssl_ctx)
|
||||
|
||||
tls_start.ssl_conn.use_certificate(entry.cert.to_cryptography())
|
||||
tls_start.ssl_conn.use_privatekey(entry.privatekey)
|
||||
|
||||
# Force HTTP/1 for secure web proxies, we currently don't support CONNECT over HTTP/2.
|
||||
# There is a proof-of-concept branch at https://github.com/mhils/mitmproxy/tree/http2-proxy,
|
||||
# but the complexity outweighs the benefits for now.
|
||||
if len(tls_start.context.layers) == 2 and isinstance(
|
||||
tls_start.context.layers[0], modes.HttpProxy
|
||||
):
|
||||
client_alpn: bytes | None = b"http/1.1"
|
||||
else:
|
||||
client_alpn = client.alpn
|
||||
|
||||
tls_start.ssl_conn.set_app_data(
|
||||
AppData(
|
||||
client_alpn=client_alpn,
|
||||
server_alpn=server.alpn,
|
||||
http2=ctx.options.http2,
|
||||
)
|
||||
)
|
||||
tls_start.ssl_conn.set_accept_state()
|
||||
|
||||
def tls_start_server(self, tls_start: tls.TlsData) -> None:
|
||||
"""Establish TLS or DTLS between proxy and server."""
|
||||
if tls_start.ssl_conn is not None:
|
||||
return # a user addon has already provided the pyOpenSSL context.
|
||||
|
||||
assert isinstance(tls_start.conn, connection.Server)
|
||||
|
||||
client: connection.Client = tls_start.context.client
|
||||
# tls_start.conn may be different from tls_start.context.server, e.g. an upstream HTTPS proxy.
|
||||
server: connection.Server = tls_start.conn
|
||||
assert server.address
|
||||
|
||||
if ctx.options.ssl_insecure:
|
||||
verify = net_tls.Verify.VERIFY_NONE
|
||||
else:
|
||||
verify = net_tls.Verify.VERIFY_PEER
|
||||
|
||||
if server.sni is None:
|
||||
server.sni = client.sni or server.address[0]
|
||||
|
||||
if not server.alpn_offers:
|
||||
if client.alpn_offers:
|
||||
if ctx.options.http2:
|
||||
# We would perfectly support HTTP/1 -> HTTP/2, but we want to keep things on the same protocol
|
||||
# version. There are some edge cases where we want to mirror the regular server's behavior
|
||||
# accurately, for example header capitalization.
|
||||
server.alpn_offers = tuple(client.alpn_offers)
|
||||
else:
|
||||
server.alpn_offers = tuple(
|
||||
x for x in client.alpn_offers if x != b"h2"
|
||||
)
|
||||
else:
|
||||
# We either have no client TLS or a client without ALPN.
|
||||
# - If the client does use TLS but did not send an ALPN extension, we want to mirror that upstream.
|
||||
# - If the client does not use TLS, there's no clear-cut answer. As a pragmatic approach, we also do
|
||||
# not send any ALPN extension in this case, which defaults to whatever protocol we are speaking
|
||||
# or falls back to HTTP.
|
||||
server.alpn_offers = []
|
||||
|
||||
if not server.cipher_list and ctx.options.ciphers_server:
|
||||
server.cipher_list = ctx.options.ciphers_server.split(":")
|
||||
# don't assign to client.cipher_list, doesn't need to be stored.
|
||||
cipher_list = server.cipher_list or _default_ciphers(
|
||||
net_tls.Version[ctx.options.tls_version_server_min]
|
||||
)
|
||||
|
||||
client_cert: str | None = None
|
||||
if ctx.options.client_certs:
|
||||
client_certs = os.path.expanduser(ctx.options.client_certs)
|
||||
if os.path.isfile(client_certs):
|
||||
client_cert = client_certs
|
||||
else:
|
||||
server_name: str = server.sni or server.address[0]
|
||||
p = os.path.join(client_certs, f"{server_name}.pem")
|
||||
if os.path.isfile(p):
|
||||
client_cert = p
|
||||
|
||||
ssl_ctx = net_tls.create_proxy_server_context(
|
||||
method=net_tls.Method.DTLS_CLIENT_METHOD
|
||||
if tls_start.is_dtls
|
||||
else net_tls.Method.TLS_CLIENT_METHOD,
|
||||
min_version=net_tls.Version[ctx.options.tls_version_server_min],
|
||||
max_version=net_tls.Version[ctx.options.tls_version_server_max],
|
||||
cipher_list=tuple(cipher_list),
|
||||
ecdh_curve=net_tls.get_curve(ctx.options.tls_ecdh_curve_server),
|
||||
verify=verify,
|
||||
ca_path=ctx.options.ssl_verify_upstream_trusted_confdir,
|
||||
ca_pemfile=ctx.options.ssl_verify_upstream_trusted_ca,
|
||||
client_cert=client_cert,
|
||||
legacy_server_connect=ctx.options.ssl_insecure,
|
||||
)
|
||||
|
||||
tls_start.ssl_conn = SSL.Connection(ssl_ctx)
|
||||
if server.sni:
|
||||
# We need to set SNI + enable hostname verification.
|
||||
assert isinstance(server.sni, str)
|
||||
# Manually enable hostname verification on the context object.
|
||||
# https://wiki.openssl.org/index.php/Hostname_validation
|
||||
param = SSL._lib.SSL_get0_param(tls_start.ssl_conn._ssl) # type: ignore
|
||||
# Matching on the CN is disabled in both Chrome and Firefox, so we disable it, too.
|
||||
# https://www.chromestatus.com/feature/4981025180483584
|
||||
|
||||
SSL._lib.X509_VERIFY_PARAM_set_hostflags(param, DEFAULT_HOSTFLAGS) # type: ignore
|
||||
|
||||
try:
|
||||
ip: bytes = ipaddress.ip_address(server.sni).packed
|
||||
except ValueError:
|
||||
host_name = server.sni.encode("idna")
|
||||
tls_start.ssl_conn.set_tlsext_host_name(host_name)
|
||||
ok = SSL._lib.X509_VERIFY_PARAM_set1_host( # type: ignore
|
||||
param, host_name, len(host_name)
|
||||
) # type: ignore
|
||||
SSL._openssl_assert(ok == 1) # type: ignore
|
||||
else:
|
||||
# RFC 6066: Literal IPv4 and IPv6 addresses are not permitted in "HostName",
|
||||
# so we don't call set_tlsext_host_name.
|
||||
ok = SSL._lib.X509_VERIFY_PARAM_set1_ip(param, ip, len(ip)) # type: ignore
|
||||
SSL._openssl_assert(ok == 1) # type: ignore
|
||||
elif verify is not net_tls.Verify.VERIFY_NONE:
|
||||
raise ValueError("Cannot validate certificate hostname without SNI")
|
||||
|
||||
if server.alpn_offers:
|
||||
tls_start.ssl_conn.set_alpn_protos(list(server.alpn_offers))
|
||||
|
||||
tls_start.ssl_conn.set_connect_state()
|
||||
|
||||
def quic_start_client(self, tls_start: quic.QuicTlsData) -> None:
|
||||
"""Establish QUIC between client and proxy."""
|
||||
if tls_start.settings is not None:
|
||||
return # a user addon has already provided the settings.
|
||||
tls_start.settings = quic.QuicTlsSettings()
|
||||
|
||||
# keep the following part in sync with `tls_start_client`
|
||||
assert isinstance(tls_start.conn, connection.Client)
|
||||
|
||||
client: connection.Client = tls_start.conn
|
||||
server: connection.Server = tls_start.context.server
|
||||
|
||||
entry = self.get_cert(tls_start.context)
|
||||
|
||||
if not client.cipher_list and ctx.options.ciphers_client:
|
||||
client.cipher_list = ctx.options.ciphers_client.split(":")
|
||||
|
||||
if ctx.options.add_upstream_certs_to_client_chain: # pragma: no cover
|
||||
extra_chain_certs = server.certificate_list
|
||||
else:
|
||||
extra_chain_certs = []
|
||||
|
||||
# set context parameters
|
||||
if client.cipher_list:
|
||||
tls_start.settings.cipher_suites = [
|
||||
CipherSuite[cipher] for cipher in client.cipher_list
|
||||
]
|
||||
# if we don't have upstream ALPN, we allow all offered by the client
|
||||
tls_start.settings.alpn_protocols = [
|
||||
alpn.decode("ascii")
|
||||
for alpn in [alpn for alpn in (client.alpn, server.alpn) if alpn]
|
||||
or client.alpn_offers
|
||||
]
|
||||
|
||||
# set the certificates
|
||||
tls_start.settings.certificate = entry.cert._cert
|
||||
tls_start.settings.certificate_private_key = entry.privatekey
|
||||
tls_start.settings.certificate_chain = [
|
||||
cert._cert for cert in (*entry.chain_certs, *extra_chain_certs)
|
||||
]
|
||||
|
||||
def quic_start_server(self, tls_start: quic.QuicTlsData) -> None:
|
||||
"""Establish QUIC between proxy and server."""
|
||||
if tls_start.settings is not None:
|
||||
return # a user addon has already provided the settings.
|
||||
tls_start.settings = quic.QuicTlsSettings()
|
||||
|
||||
# keep the following part in sync with `tls_start_server`
|
||||
assert isinstance(tls_start.conn, connection.Server)
|
||||
|
||||
client: connection.Client = tls_start.context.client
|
||||
server: connection.Server = tls_start.conn
|
||||
assert server.address
|
||||
|
||||
if ctx.options.ssl_insecure:
|
||||
tls_start.settings.verify_mode = ssl.CERT_NONE
|
||||
else:
|
||||
tls_start.settings.verify_mode = ssl.CERT_REQUIRED
|
||||
|
||||
if server.sni is None:
|
||||
server.sni = client.sni or server.address[0]
|
||||
|
||||
if not server.alpn_offers:
|
||||
if client.alpn_offers:
|
||||
server.alpn_offers = tuple(client.alpn_offers)
|
||||
else:
|
||||
# aioquic fails if no ALPN is offered, so use H3
|
||||
server.alpn_offers = tuple(alpn.encode("ascii") for alpn in H3_ALPN)
|
||||
|
||||
if not server.cipher_list and ctx.options.ciphers_server:
|
||||
server.cipher_list = ctx.options.ciphers_server.split(":")
|
||||
|
||||
# set context parameters
|
||||
if server.cipher_list:
|
||||
tls_start.settings.cipher_suites = [
|
||||
CipherSuite[cipher] for cipher in server.cipher_list
|
||||
]
|
||||
if server.alpn_offers:
|
||||
tls_start.settings.alpn_protocols = [
|
||||
alpn.decode("ascii") for alpn in server.alpn_offers
|
||||
]
|
||||
|
||||
# set the certificates
|
||||
# NOTE client certificates are not supported
|
||||
tls_start.settings.ca_path = ctx.options.ssl_verify_upstream_trusted_confdir
|
||||
tls_start.settings.ca_file = ctx.options.ssl_verify_upstream_trusted_ca
|
||||
|
||||
def running(self):
|
||||
# FIXME: We have a weird bug where the contract for configure is not followed and it is never called with
|
||||
# confdir or command_history as updated.
|
||||
self.configure("confdir") # pragma: no cover
|
||||
|
||||
def configure(self, updated):
|
||||
if (
|
||||
"certs" in updated
|
||||
or "confdir" in updated
|
||||
or "key_size" in updated
|
||||
or "cert_passphrase" in updated
|
||||
):
|
||||
certstore_path = os.path.expanduser(ctx.options.confdir)
|
||||
self.certstore = certs.CertStore.from_store(
|
||||
path=certstore_path,
|
||||
basename=CONF_BASENAME,
|
||||
key_size=ctx.options.key_size,
|
||||
passphrase=ctx.options.cert_passphrase.encode("utf8")
|
||||
if ctx.options.cert_passphrase
|
||||
else None,
|
||||
)
|
||||
if self.certstore.default_ca.has_expired():
|
||||
logger.warning(
|
||||
"The mitmproxy certificate authority has expired!\n"
|
||||
"Please delete all CA-related files in your ~/.mitmproxy folder.\n"
|
||||
"The CA will be regenerated automatically after restarting mitmproxy.\n"
|
||||
"See https://docs.mitmproxy.org/stable/concepts-certificates/ for additional help.",
|
||||
)
|
||||
|
||||
for certspec in ctx.options.certs:
|
||||
parts = certspec.split("=", 1)
|
||||
if len(parts) == 1:
|
||||
parts = ["*", parts[0]]
|
||||
|
||||
cert = Path(parts[1]).expanduser()
|
||||
if not cert.exists():
|
||||
raise exceptions.OptionsError(
|
||||
f"Certificate file does not exist: {cert}"
|
||||
)
|
||||
try:
|
||||
self.certstore.add_cert_file(
|
||||
parts[0],
|
||||
cert,
|
||||
passphrase=ctx.options.cert_passphrase.encode("utf8")
|
||||
if ctx.options.cert_passphrase
|
||||
else None,
|
||||
)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(
|
||||
f"Invalid certificate format for {cert}: {e}"
|
||||
) from e
|
||||
|
||||
if "tls_ecdh_curve_client" in updated or "tls_ecdh_curve_server" in updated:
|
||||
for ecdh_curve in [
|
||||
ctx.options.tls_ecdh_curve_client,
|
||||
ctx.options.tls_ecdh_curve_server,
|
||||
]:
|
||||
if ecdh_curve is not None and ecdh_curve not in net_tls.EC_CURVES:
|
||||
raise exceptions.OptionsError(
|
||||
f"Invalid ECDH curve: {ecdh_curve!r}. Valid curves are: {', '.join(net_tls.EC_CURVES)}"
|
||||
)
|
||||
|
||||
if "tls_version_client_min" in updated:
|
||||
self._warn_unsupported_version("tls_version_client_min", True)
|
||||
if "tls_version_client_max" in updated:
|
||||
self._warn_unsupported_version("tls_version_client_max", False)
|
||||
if "tls_version_server_min" in updated:
|
||||
self._warn_unsupported_version("tls_version_server_min", True)
|
||||
if "tls_version_server_max" in updated:
|
||||
self._warn_unsupported_version("tls_version_server_max", False)
|
||||
if "tls_version_client_min" in updated or "ciphers_client" in updated:
|
||||
self._warn_seclevel_missing("client")
|
||||
if "tls_version_server_min" in updated or "ciphers_server" in updated:
|
||||
self._warn_seclevel_missing("server")
|
||||
|
||||
def _warn_unsupported_version(self, attribute: str, warn_unbound: bool):
|
||||
val = net_tls.Version[getattr(ctx.options, attribute)]
|
||||
supported_versions = [
|
||||
v for v in net_tls.Version if net_tls.is_supported_version(v)
|
||||
]
|
||||
supported_versions_str = ", ".join(v.name for v in supported_versions)
|
||||
|
||||
if val is net_tls.Version.UNBOUNDED:
|
||||
if warn_unbound:
|
||||
logger.info(
|
||||
f"{attribute} has been set to {val.name}. Note that your "
|
||||
f"OpenSSL build only supports the following TLS versions: {supported_versions_str}"
|
||||
)
|
||||
elif val not in supported_versions:
|
||||
logger.warning(
|
||||
f"{attribute} has been set to {val.name}, which is not supported by the current OpenSSL build. "
|
||||
f"The current build only supports the following versions: {supported_versions_str}"
|
||||
)
|
||||
|
||||
def _warn_seclevel_missing(self, side: Literal["client", "server"]) -> None:
|
||||
"""
|
||||
OpenSSL cipher spec need to specify @SECLEVEL for old TLS versions to work,
|
||||
see https://github.com/pyca/cryptography/issues/9523.
|
||||
"""
|
||||
if side == "client":
|
||||
custom_ciphers = ctx.options.ciphers_client
|
||||
min_tls_version = ctx.options.tls_version_client_min
|
||||
else:
|
||||
custom_ciphers = ctx.options.ciphers_server
|
||||
min_tls_version = ctx.options.tls_version_server_min
|
||||
|
||||
if (
|
||||
custom_ciphers
|
||||
and net_tls.Version[min_tls_version] in net_tls.INSECURE_TLS_MIN_VERSIONS
|
||||
and "@SECLEVEL=0" not in custom_ciphers
|
||||
):
|
||||
logger.warning(
|
||||
f'With tls_version_{side}_min set to {min_tls_version}, ciphers_{side} must include "@SECLEVEL=0" '
|
||||
f"for insecure TLS versions to work."
|
||||
)
|
||||
|
||||
def crl_path(self) -> str:
|
||||
return f"/mitmproxy-{self.certstore.default_ca.serial}.crl"
|
||||
|
||||
def get_cert(self, conn_context: context.Context) -> certs.CertStoreEntry:
|
||||
"""
|
||||
This function determines the Common Name (CN), Subject Alternative Names (SANs) and Organization Name
|
||||
our certificate should have and then fetches a matching cert from the certstore.
|
||||
"""
|
||||
altnames: list[x509.GeneralName] = []
|
||||
organization: str | None = None
|
||||
crl_distribution_point: str | None = None
|
||||
|
||||
# Use upstream certificate if available.
|
||||
if ctx.options.upstream_cert and conn_context.server.certificate_list:
|
||||
upstream_cert: certs.Cert = conn_context.server.certificate_list[0]
|
||||
if upstream_cert.cn:
|
||||
altnames.append(_ip_or_dns_name(upstream_cert.cn))
|
||||
altnames.extend(upstream_cert.altnames)
|
||||
if upstream_cert.organization:
|
||||
organization = upstream_cert.organization
|
||||
|
||||
# Replace original URL path with the CA cert serial number, which acts as a magic token
|
||||
if crls := upstream_cert.crl_distribution_points:
|
||||
try:
|
||||
scheme, netloc, *_ = urllib.parse.urlsplit(crls[0])
|
||||
except ValueError:
|
||||
logger.info(f"Failed to parse CRL URL: {crls[0]!r}")
|
||||
else:
|
||||
# noinspection PyTypeChecker
|
||||
crl_distribution_point = urllib.parse.urlunsplit(
|
||||
(scheme, netloc, self.crl_path(), None, None)
|
||||
)
|
||||
|
||||
# Add SNI or our local IP address.
|
||||
if conn_context.client.sni:
|
||||
altnames.append(_ip_or_dns_name(conn_context.client.sni))
|
||||
else:
|
||||
altnames.append(_ip_or_dns_name(conn_context.client.sockname[0]))
|
||||
|
||||
# If we already know of a server address, include that in the SANs as well.
|
||||
if conn_context.server.address:
|
||||
altnames.append(_ip_or_dns_name(conn_context.server.address[0]))
|
||||
|
||||
# only keep first occurrence of each hostname
|
||||
altnames = list(dict.fromkeys(altnames))
|
||||
|
||||
# RFC 2818: If a subjectAltName extension of type dNSName is present, that MUST be used as the identity.
|
||||
# In other words, the Common Name is irrelevant then.
|
||||
cn = next((str(x.value) for x in altnames), None)
|
||||
return self.certstore.get_cert(
|
||||
cn, altnames, organization, crl_distribution_point
|
||||
)
|
||||
|
||||
def request(self, flow: http.HTTPFlow):
|
||||
if not flow.live or flow.error or flow.response:
|
||||
return
|
||||
# Check if a request has a magic CRL token at the end
|
||||
if flow.request.path.endswith(self.crl_path()):
|
||||
flow.response = http.Response.make(
|
||||
200,
|
||||
self.certstore.default_crl,
|
||||
{"Content-Type": "application/pkix-crl"},
|
||||
)
|
||||
|
||||
|
||||
def _ip_or_dns_name(val: str) -> x509.GeneralName:
|
||||
"""Convert a string into either an x509.IPAddress or x509.DNSName object."""
|
||||
try:
|
||||
ip = ipaddress.ip_address(val)
|
||||
except ValueError:
|
||||
return x509.DNSName(val.encode("idna").decode())
|
||||
else:
|
||||
return x509.IPAddress(ip)
|
||||
33
venv/Lib/site-packages/mitmproxy/addons/update_alt_svc.py
Normal file
@@ -0,0 +1,33 @@
|
||||
import re
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy.http import HTTPFlow
|
||||
from mitmproxy.proxy import mode_specs
|
||||
|
||||
ALT_SVC = "alt-svc"
|
||||
HOST_PATTERN = r"([a-zA-Z0-9.-]*:\d{1,5})"
|
||||
|
||||
|
||||
def update_alt_svc_header(header: str, port: int) -> str:
|
||||
return re.sub(HOST_PATTERN, f":{port}", header)
|
||||
|
||||
|
||||
class UpdateAltSvc:
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"keep_alt_svc_header",
|
||||
bool,
|
||||
False,
|
||||
"Reverse Proxy: Keep Alt-Svc headers as-is, even if they do not point to mitmproxy. Enabling this option may cause clients to bypass the proxy.",
|
||||
)
|
||||
|
||||
def responseheaders(self, flow: HTTPFlow):
|
||||
assert flow.response
|
||||
if (
|
||||
not ctx.options.keep_alt_svc_header
|
||||
and isinstance(flow.client_conn.proxy_mode, mode_specs.ReverseMode)
|
||||
and ALT_SVC in flow.response.headers
|
||||
):
|
||||
_, listen_port, *_ = flow.client_conn.sockname
|
||||
headers = flow.response.headers
|
||||
headers[ALT_SVC] = update_alt_svc_header(headers[ALT_SVC], listen_port)
|
||||
62
venv/Lib/site-packages/mitmproxy/addons/upstream_auth.py
Normal file
@@ -0,0 +1,62 @@
|
||||
import base64
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import http
|
||||
from mitmproxy.proxy import mode_specs
|
||||
from mitmproxy.utils import strutils
|
||||
|
||||
|
||||
def parse_upstream_auth(auth: str) -> bytes:
|
||||
pattern = re.compile(".+:")
|
||||
if pattern.search(auth) is None:
|
||||
raise exceptions.OptionsError("Invalid upstream auth specification: %s" % auth)
|
||||
return b"Basic" + b" " + base64.b64encode(strutils.always_bytes(auth))
|
||||
|
||||
|
||||
class UpstreamAuth:
|
||||
"""
|
||||
This addon handles authentication to systems upstream from us for the
|
||||
upstream proxy and reverse proxy mode. There are 3 cases:
|
||||
|
||||
- Upstream proxy CONNECT requests should have authentication added, and
|
||||
subsequent already connected requests should not.
|
||||
- Upstream proxy regular requests
|
||||
- Reverse proxy regular requests (CONNECT is invalid in this mode)
|
||||
"""
|
||||
|
||||
auth: bytes | None = None
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"upstream_auth",
|
||||
Optional[str],
|
||||
None,
|
||||
"""
|
||||
Add HTTP Basic authentication to upstream proxy and reverse proxy
|
||||
requests. Format: username:password.
|
||||
""",
|
||||
)
|
||||
|
||||
def configure(self, updated):
|
||||
if "upstream_auth" in updated:
|
||||
if ctx.options.upstream_auth is None:
|
||||
self.auth = None
|
||||
else:
|
||||
self.auth = parse_upstream_auth(ctx.options.upstream_auth)
|
||||
|
||||
def http_connect_upstream(self, f: http.HTTPFlow):
|
||||
if self.auth:
|
||||
f.request.headers["Proxy-Authorization"] = self.auth
|
||||
|
||||
def requestheaders(self, f: http.HTTPFlow):
|
||||
if self.auth:
|
||||
if (
|
||||
isinstance(f.client_conn.proxy_mode, mode_specs.UpstreamMode)
|
||||
and f.request.scheme == "http"
|
||||
):
|
||||
f.request.headers["Proxy-Authorization"] = self.auth
|
||||
elif isinstance(f.client_conn.proxy_mode, mode_specs.ReverseMode):
|
||||
f.request.headers["Authorization"] = self.auth
|
||||
749
venv/Lib/site-packages/mitmproxy/addons/view.py
Normal file
@@ -0,0 +1,749 @@
|
||||
"""
|
||||
The View:
|
||||
|
||||
- Keeps track of a store of flows
|
||||
- Maintains a filtered, ordered view onto that list of flows
|
||||
- Exposes a number of signals so the view can be monitored
|
||||
- Tracks focus within the view
|
||||
- Exposes a settings store for flows that automatically expires if the flow is
|
||||
removed from the store.
|
||||
"""
|
||||
|
||||
import collections
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import Iterator
|
||||
from collections.abc import MutableMapping
|
||||
from collections.abc import Sequence
|
||||
from typing import Any
|
||||
from typing import Optional
|
||||
|
||||
import sortedcontainers
|
||||
|
||||
import mitmproxy.flow
|
||||
from mitmproxy import command
|
||||
from mitmproxy import connection
|
||||
from mitmproxy import ctx
|
||||
from mitmproxy import dns
|
||||
from mitmproxy import exceptions
|
||||
from mitmproxy import flowfilter
|
||||
from mitmproxy import hooks
|
||||
from mitmproxy import http
|
||||
from mitmproxy import io
|
||||
from mitmproxy import tcp
|
||||
from mitmproxy import udp
|
||||
from mitmproxy.log import ALERT
|
||||
from mitmproxy.utils import human
|
||||
from mitmproxy.utils import signals
|
||||
|
||||
# The underlying sorted list implementation expects the sort key to be stable
|
||||
# for the lifetime of the object. However, if we sort by size, for instance,
|
||||
# the sort order changes as the flow progresses through its lifecycle. We
|
||||
# address this through two means:
|
||||
#
|
||||
# - Let order keys cache the sort value by flow ID.
|
||||
#
|
||||
# - Add a facility to refresh items in the list by removing and re-adding them
|
||||
# when they are updated.
|
||||
|
||||
|
||||
class _OrderKey:
|
||||
def __init__(self, view):
|
||||
self.view = view
|
||||
|
||||
def generate(self, f: mitmproxy.flow.Flow) -> Any: # pragma: no cover
|
||||
pass
|
||||
|
||||
def refresh(self, f):
|
||||
k = self._key()
|
||||
old = self.view.settings[f][k]
|
||||
new = self.generate(f)
|
||||
if old != new:
|
||||
self.view._view.remove(f)
|
||||
self.view.settings[f][k] = new
|
||||
self.view._view.add(f)
|
||||
self.view.sig_view_refresh.send()
|
||||
|
||||
def _key(self):
|
||||
return "_order_%s" % id(self)
|
||||
|
||||
def __call__(self, f):
|
||||
if f.id in self.view._store:
|
||||
k = self._key()
|
||||
s = self.view.settings[f]
|
||||
if k in s:
|
||||
return s[k]
|
||||
val = self.generate(f)
|
||||
s[k] = val
|
||||
return val
|
||||
else:
|
||||
return self.generate(f)
|
||||
|
||||
|
||||
class OrderRequestStart(_OrderKey):
|
||||
def generate(self, f: mitmproxy.flow.Flow) -> float:
|
||||
return f.timestamp_created
|
||||
|
||||
|
||||
class OrderRequestMethod(_OrderKey):
|
||||
def generate(self, f: mitmproxy.flow.Flow) -> str:
|
||||
if isinstance(f, http.HTTPFlow):
|
||||
return f.request.method
|
||||
elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):
|
||||
return f.type.upper()
|
||||
elif isinstance(f, dns.DNSFlow):
|
||||
return dns.op_codes.to_str(f.request.op_code)
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class OrderRequestURL(_OrderKey):
|
||||
def generate(self, f: mitmproxy.flow.Flow) -> str:
|
||||
if isinstance(f, http.HTTPFlow):
|
||||
return f.request.url
|
||||
elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):
|
||||
return human.format_address(f.server_conn.address)
|
||||
elif isinstance(f, dns.DNSFlow):
|
||||
return f.request.questions[0].name if f.request.questions else ""
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class OrderKeySize(_OrderKey):
|
||||
def generate(self, f: mitmproxy.flow.Flow) -> int:
|
||||
if isinstance(f, http.HTTPFlow):
|
||||
size = 0
|
||||
if f.request.raw_content:
|
||||
size += len(f.request.raw_content)
|
||||
if f.response and f.response.raw_content:
|
||||
size += len(f.response.raw_content)
|
||||
return size
|
||||
elif isinstance(f, (tcp.TCPFlow, udp.UDPFlow)):
|
||||
size = 0
|
||||
for message in f.messages:
|
||||
size += len(message.content)
|
||||
return size
|
||||
elif isinstance(f, dns.DNSFlow):
|
||||
return f.response.size if f.response else 0
|
||||
else:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
orders = [
|
||||
("t", "time"),
|
||||
("m", "method"),
|
||||
("u", "url"),
|
||||
("z", "size"),
|
||||
]
|
||||
|
||||
|
||||
def _signal_with_flow(flow: mitmproxy.flow.Flow) -> None: ...
|
||||
|
||||
|
||||
def _sig_view_remove(flow: mitmproxy.flow.Flow, index: int) -> None: ...
|
||||
|
||||
|
||||
class View(collections.abc.Sequence):
|
||||
def __init__(self) -> None:
|
||||
super().__init__()
|
||||
self._store: collections.OrderedDict[str, mitmproxy.flow.Flow] = (
|
||||
collections.OrderedDict()
|
||||
)
|
||||
self.filter = flowfilter.match_all
|
||||
# Should we show only marked flows?
|
||||
self.show_marked = False
|
||||
|
||||
self.default_order = OrderRequestStart(self)
|
||||
self.orders = dict(
|
||||
time=OrderRequestStart(self),
|
||||
method=OrderRequestMethod(self),
|
||||
url=OrderRequestURL(self),
|
||||
size=OrderKeySize(self),
|
||||
)
|
||||
self.order_key: _OrderKey = self.default_order
|
||||
self.order_reversed = False
|
||||
self.focus_follow = False
|
||||
|
||||
self._view = sortedcontainers.SortedListWithKey(key=self.order_key)
|
||||
|
||||
# The sig_view* signals broadcast events that affect the view. That is,
|
||||
# an update to a flow in the store but not in the view does not trigger
|
||||
# a signal. All signals are called after the view has been updated.
|
||||
self.sig_view_update = signals.SyncSignal(_signal_with_flow)
|
||||
self.sig_view_add = signals.SyncSignal(_signal_with_flow)
|
||||
self.sig_view_remove = signals.SyncSignal(_sig_view_remove)
|
||||
# Signals that the view should be refreshed completely
|
||||
self.sig_view_refresh = signals.SyncSignal(lambda: None)
|
||||
|
||||
# The sig_store* signals broadcast events that affect the underlying
|
||||
# store. If a flow is removed from just the view, sig_view_remove is
|
||||
# triggered. If it is removed from the store while it is also in the
|
||||
# view, both sig_store_remove and sig_view_remove are triggered.
|
||||
self.sig_store_remove = signals.SyncSignal(_signal_with_flow)
|
||||
# Signals that the store should be refreshed completely
|
||||
self.sig_store_refresh = signals.SyncSignal(lambda: None)
|
||||
|
||||
self.focus = Focus(self)
|
||||
self.settings = Settings(self)
|
||||
|
||||
def load(self, loader):
|
||||
loader.add_option(
|
||||
"view_filter", Optional[str], None, "Limit the view to matching flows."
|
||||
)
|
||||
loader.add_option(
|
||||
"view_order",
|
||||
str,
|
||||
"time",
|
||||
"Flow sort order.",
|
||||
choices=list(map(lambda c: c[1], orders)),
|
||||
)
|
||||
loader.add_option(
|
||||
"view_order_reversed", bool, False, "Reverse the sorting order."
|
||||
)
|
||||
loader.add_option(
|
||||
"console_focus_follow", bool, False, "Focus follows new flows."
|
||||
)
|
||||
|
||||
def store_count(self):
|
||||
return len(self._store)
|
||||
|
||||
def _rev(self, idx: int) -> int:
|
||||
"""
|
||||
Reverses an index, if needed
|
||||
"""
|
||||
if self.order_reversed:
|
||||
if idx < 0:
|
||||
idx = -idx - 1
|
||||
else:
|
||||
idx = len(self._view) - idx - 1
|
||||
if idx < 0:
|
||||
raise IndexError
|
||||
return idx
|
||||
|
||||
def __len__(self):
|
||||
return len(self._view)
|
||||
|
||||
def __getitem__(self, offset) -> Any:
|
||||
return self._view[self._rev(offset)]
|
||||
|
||||
# Reflect some methods to the efficient underlying implementation
|
||||
|
||||
def _bisect(self, f: mitmproxy.flow.Flow) -> int:
|
||||
v = self._view.bisect_right(f)
|
||||
return self._rev(v - 1) + 1
|
||||
|
||||
def index(
|
||||
self, f: mitmproxy.flow.Flow, start: int = 0, stop: int | None = None
|
||||
) -> int:
|
||||
return self._rev(self._view.index(f, start, stop))
|
||||
|
||||
def __contains__(self, f: Any) -> bool:
|
||||
return self._view.__contains__(f)
|
||||
|
||||
def _order_key_name(self):
|
||||
return "_order_%s" % id(self.order_key)
|
||||
|
||||
def _base_add(self, f):
|
||||
self.settings[f][self._order_key_name()] = self.order_key(f)
|
||||
self._view.add(f)
|
||||
|
||||
def _refilter(self):
|
||||
self._view.clear()
|
||||
for i in self._store.values():
|
||||
if self.show_marked and not i.marked:
|
||||
continue
|
||||
if self.filter(i):
|
||||
self._base_add(i)
|
||||
self.sig_view_refresh.send()
|
||||
|
||||
""" View API """
|
||||
|
||||
# Focus
|
||||
@command.command("view.focus.go")
|
||||
def go(self, offset: int) -> None:
|
||||
"""
|
||||
Go to a specified offset. Positive offests are from the beginning of
|
||||
the view, negative from the end of the view, so that 0 is the first
|
||||
flow, -1 is the last flow.
|
||||
"""
|
||||
if len(self) == 0:
|
||||
return
|
||||
if offset < 0:
|
||||
offset = len(self) + offset
|
||||
if offset < 0:
|
||||
offset = 0
|
||||
if offset > len(self) - 1:
|
||||
offset = len(self) - 1
|
||||
self.focus.flow = self[offset]
|
||||
|
||||
@command.command("view.focus.next")
|
||||
def focus_next(self) -> None:
|
||||
"""
|
||||
Set focus to the next flow.
|
||||
"""
|
||||
if self.focus.index is not None:
|
||||
idx = self.focus.index + 1
|
||||
if self.inbounds(idx):
|
||||
self.focus.flow = self[idx]
|
||||
else:
|
||||
pass
|
||||
|
||||
@command.command("view.focus.prev")
|
||||
def focus_prev(self) -> None:
|
||||
"""
|
||||
Set focus to the previous flow.
|
||||
"""
|
||||
if self.focus.index is not None:
|
||||
idx = self.focus.index - 1
|
||||
if self.inbounds(idx):
|
||||
self.focus.flow = self[idx]
|
||||
else:
|
||||
pass
|
||||
|
||||
# Order
|
||||
@command.command("view.order.options")
|
||||
def order_options(self) -> Sequence[str]:
|
||||
"""
|
||||
Choices supported by the view_order option.
|
||||
"""
|
||||
return list(sorted(self.orders.keys()))
|
||||
|
||||
@command.command("view.order.reverse")
|
||||
def set_reversed(self, boolean: bool) -> None:
|
||||
self.order_reversed = boolean
|
||||
self.sig_view_refresh.send()
|
||||
|
||||
@command.command("view.order.set")
|
||||
def set_order(self, order_key: str) -> None:
|
||||
"""
|
||||
Sets the current view order.
|
||||
"""
|
||||
if order_key not in self.orders:
|
||||
raise exceptions.CommandError("Unknown flow order: %s" % order_key)
|
||||
key = self.orders[order_key]
|
||||
self.order_key = key
|
||||
newview = sortedcontainers.SortedListWithKey(key=key)
|
||||
newview.update(self._view)
|
||||
self._view = newview
|
||||
|
||||
@command.command("view.order")
|
||||
def get_order(self) -> str:
|
||||
"""
|
||||
Returns the current view order.
|
||||
"""
|
||||
order = ""
|
||||
for k in self.orders.keys():
|
||||
if self.order_key == self.orders[k]:
|
||||
order = k
|
||||
return order
|
||||
|
||||
# Filter
|
||||
@command.command("view.filter.set")
|
||||
def set_filter_cmd(self, filter_expr: str) -> None:
|
||||
"""
|
||||
Sets the current view filter.
|
||||
"""
|
||||
filt = None
|
||||
if filter_expr:
|
||||
try:
|
||||
filt = flowfilter.parse(filter_expr)
|
||||
except ValueError as e:
|
||||
raise exceptions.CommandError(str(e)) from e
|
||||
self.set_filter(filt)
|
||||
|
||||
def set_filter(self, flt: flowfilter.TFilter | None):
|
||||
self.filter = flt or flowfilter.match_all
|
||||
self._refilter()
|
||||
|
||||
# View Updates
|
||||
@command.command("view.clear")
|
||||
def clear(self) -> None:
|
||||
"""
|
||||
Clears both the store and view.
|
||||
"""
|
||||
self._store.clear()
|
||||
self._view.clear()
|
||||
self.sig_view_refresh.send()
|
||||
self.sig_store_refresh.send()
|
||||
|
||||
@command.command("view.clear_unmarked")
|
||||
def clear_not_marked(self) -> None:
|
||||
"""
|
||||
Clears only the unmarked flows.
|
||||
"""
|
||||
for flow in self._store.copy().values():
|
||||
if not flow.marked:
|
||||
self._store.pop(flow.id)
|
||||
|
||||
self._refilter()
|
||||
self.sig_store_refresh.send()
|
||||
|
||||
# View Settings
|
||||
@command.command("view.settings.getval")
|
||||
def getvalue(self, flow: mitmproxy.flow.Flow, key: str, default: str) -> str:
|
||||
"""
|
||||
Get a value from the settings store for the specified flow.
|
||||
"""
|
||||
return self.settings[flow].get(key, default)
|
||||
|
||||
@command.command("view.settings.setval.toggle")
|
||||
def setvalue_toggle(self, flows: Sequence[mitmproxy.flow.Flow], key: str) -> None:
|
||||
"""
|
||||
Toggle a boolean value in the settings store, setting the value to
|
||||
the string "true" or "false".
|
||||
"""
|
||||
updated = []
|
||||
for f in flows:
|
||||
current = self.settings[f].get("key", "false")
|
||||
self.settings[f][key] = "false" if current == "true" else "true"
|
||||
updated.append(f)
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
|
||||
@command.command("view.settings.setval")
|
||||
def setvalue(
|
||||
self, flows: Sequence[mitmproxy.flow.Flow], key: str, value: str
|
||||
) -> None:
|
||||
"""
|
||||
Set a value in the settings store for the specified flows.
|
||||
"""
|
||||
updated = []
|
||||
for f in flows:
|
||||
self.settings[f][key] = value
|
||||
updated.append(f)
|
||||
ctx.master.addons.trigger(hooks.UpdateHook(updated))
|
||||
|
||||
# Flows
|
||||
@command.command("view.flows.duplicate")
|
||||
def duplicate(self, flows: Sequence[mitmproxy.flow.Flow]) -> None:
|
||||
"""
|
||||
Duplicates the specified flows, and sets the focus to the first
|
||||
duplicate.
|
||||
"""
|
||||
dups = [f.copy() for f in flows]
|
||||
if dups:
|
||||
self.add(dups)
|
||||
self.focus.flow = dups[0]
|
||||
logging.log(ALERT, "Duplicated %s flows" % len(dups))
|
||||
|
||||
@command.command("view.flows.remove")
|
||||
def remove(self, flows: Sequence[mitmproxy.flow.Flow]) -> None:
|
||||
"""
|
||||
Removes the flow from the underlying store and the view.
|
||||
"""
|
||||
for f in flows:
|
||||
if f.id in self._store:
|
||||
if f.killable:
|
||||
f.kill()
|
||||
if f in self._view:
|
||||
# We manually pass the index here because multiple flows may have the same
|
||||
# sorting key, and we cannot reconstruct the index from that.
|
||||
idx = self._view.index(f)
|
||||
self._view.remove(f)
|
||||
self.sig_view_remove.send(flow=f, index=idx)
|
||||
del self._store[f.id]
|
||||
self.sig_store_remove.send(flow=f)
|
||||
if len(flows) > 1:
|
||||
logging.log(ALERT, "Removed %s flows" % len(flows))
|
||||
|
||||
@command.command("view.flows.resolve")
|
||||
def resolve(self, flow_spec: str) -> Sequence[mitmproxy.flow.Flow]:
|
||||
"""
|
||||
Resolve a flow list specification to an actual list of flows.
|
||||
"""
|
||||
if flow_spec == "@all":
|
||||
return [i for i in self._store.values()]
|
||||
if flow_spec == "@focus":
|
||||
return [self.focus.flow] if self.focus.flow else []
|
||||
elif flow_spec == "@shown":
|
||||
return [i for i in self]
|
||||
elif flow_spec == "@hidden":
|
||||
return [i for i in self._store.values() if i not in self._view]
|
||||
elif flow_spec == "@marked":
|
||||
return [i for i in self._store.values() if i.marked]
|
||||
elif flow_spec == "@unmarked":
|
||||
return [i for i in self._store.values() if not i.marked]
|
||||
elif re.match(r"@[0-9a-f\-,]{36,}", flow_spec):
|
||||
ids = flow_spec[1:].split(",")
|
||||
return [i for i in self._store.values() if i.id in ids]
|
||||
else:
|
||||
try:
|
||||
filt = flowfilter.parse(flow_spec)
|
||||
except ValueError as e:
|
||||
raise exceptions.CommandError(str(e)) from e
|
||||
return [i for i in self._store.values() if filt(i)]
|
||||
|
||||
@command.command("view.flows.create")
|
||||
def create(self, method: str, url: str) -> None:
|
||||
try:
|
||||
req = http.Request.make(method.upper(), url)
|
||||
except ValueError as e:
|
||||
raise exceptions.CommandError("Invalid URL: %s" % e)
|
||||
|
||||
c = connection.Client(
|
||||
peername=("", 0),
|
||||
sockname=("", 0),
|
||||
timestamp_start=req.timestamp_start - 0.0001,
|
||||
)
|
||||
s = connection.Server(address=(req.host, req.port))
|
||||
|
||||
f = http.HTTPFlow(c, s)
|
||||
f.request = req
|
||||
f.request.headers["Host"] = req.host
|
||||
self.add([f])
|
||||
|
||||
@command.command("view.flows.load")
|
||||
def load_file(self, path: mitmproxy.types.Path) -> None:
|
||||
"""
|
||||
Load flows into the view, without processing them with addons.
|
||||
"""
|
||||
try:
|
||||
with open(path, "rb") as f:
|
||||
for i in io.FlowReader(f).stream():
|
||||
# Do this to get a new ID, so we can load the same file N times and
|
||||
# get new flows each time. It would be more efficient to just have a
|
||||
# .newid() method or something.
|
||||
self.add([i.copy()])
|
||||
except OSError as e:
|
||||
logging.error(e.strerror)
|
||||
except exceptions.FlowReadException as e:
|
||||
logging.error(str(e))
|
||||
|
||||
def add(self, flows: Sequence[mitmproxy.flow.Flow]) -> None:
|
||||
"""
|
||||
Adds a flow to the state. If the flow already exists, it is
|
||||
ignored.
|
||||
"""
|
||||
for f in flows:
|
||||
if f.id not in self._store:
|
||||
self._store[f.id] = f
|
||||
if self.filter(f):
|
||||
self._base_add(f)
|
||||
if self.focus_follow:
|
||||
self.focus.flow = f
|
||||
self.sig_view_add.send(flow=f)
|
||||
|
||||
def get_by_id(self, flow_id: str) -> mitmproxy.flow.Flow | None:
|
||||
"""
|
||||
Get flow with the given id from the store.
|
||||
Returns None if the flow is not found.
|
||||
"""
|
||||
return self._store.get(flow_id)
|
||||
|
||||
# View Properties
|
||||
@command.command("view.properties.length")
|
||||
def get_length(self) -> int:
|
||||
"""
|
||||
Returns view length.
|
||||
"""
|
||||
return len(self)
|
||||
|
||||
@command.command("view.properties.marked")
|
||||
def get_marked(self) -> bool:
|
||||
"""
|
||||
Returns true if view is in marked mode.
|
||||
"""
|
||||
return self.show_marked
|
||||
|
||||
@command.command("view.properties.marked.toggle")
|
||||
def toggle_marked(self) -> None:
|
||||
"""
|
||||
Toggle whether to show marked views only.
|
||||
"""
|
||||
self.show_marked = not self.show_marked
|
||||
self._refilter()
|
||||
|
||||
@command.command("view.properties.inbounds")
|
||||
def inbounds(self, index: int) -> bool:
|
||||
"""
|
||||
Is this 0 <= index < len(self)?
|
||||
"""
|
||||
return 0 <= index < len(self)
|
||||
|
||||
# Event handlers
|
||||
def configure(self, updated):
|
||||
if "view_filter" in updated:
|
||||
filt = None
|
||||
if ctx.options.view_filter:
|
||||
try:
|
||||
filt = flowfilter.parse(ctx.options.view_filter)
|
||||
except ValueError as e:
|
||||
raise exceptions.OptionsError(str(e)) from e
|
||||
self.set_filter(filt)
|
||||
if "view_order" in updated:
|
||||
if ctx.options.view_order not in self.orders:
|
||||
raise exceptions.OptionsError(
|
||||
"Unknown flow order: %s" % ctx.options.view_order
|
||||
)
|
||||
self.set_order(ctx.options.view_order)
|
||||
if "view_order_reversed" in updated:
|
||||
self.set_reversed(ctx.options.view_order_reversed)
|
||||
if "console_focus_follow" in updated:
|
||||
self.focus_follow = ctx.options.console_focus_follow
|
||||
|
||||
def requestheaders(self, f):
|
||||
self.add([f])
|
||||
|
||||
def error(self, f):
|
||||
self.update([f])
|
||||
|
||||
def response(self, f):
|
||||
self.update([f])
|
||||
|
||||
def intercept(self, f):
|
||||
self.update([f])
|
||||
|
||||
def resume(self, f):
|
||||
self.update([f])
|
||||
|
||||
def kill(self, f):
|
||||
self.update([f])
|
||||
|
||||
def tcp_start(self, f):
|
||||
self.add([f])
|
||||
|
||||
def tcp_message(self, f):
|
||||
self.update([f])
|
||||
|
||||
def tcp_error(self, f):
|
||||
self.update([f])
|
||||
|
||||
def tcp_end(self, f):
|
||||
self.update([f])
|
||||
|
||||
def udp_start(self, f):
|
||||
self.add([f])
|
||||
|
||||
def udp_message(self, f):
|
||||
self.update([f])
|
||||
|
||||
def udp_error(self, f):
|
||||
self.update([f])
|
||||
|
||||
def udp_end(self, f):
|
||||
self.update([f])
|
||||
|
||||
def dns_request(self, f):
|
||||
self.add([f])
|
||||
|
||||
def dns_response(self, f):
|
||||
self.update([f])
|
||||
|
||||
def dns_error(self, f):
|
||||
self.update([f])
|
||||
|
||||
def update(self, flows: Sequence[mitmproxy.flow.Flow]) -> None:
|
||||
"""
|
||||
Updates a list of flows. If flow is not in the state, it's ignored.
|
||||
"""
|
||||
for f in flows:
|
||||
if f.id in self._store:
|
||||
if self.filter(f):
|
||||
if f not in self._view:
|
||||
self._base_add(f)
|
||||
if self.focus_follow:
|
||||
self.focus.flow = f
|
||||
self.sig_view_add.send(flow=f)
|
||||
else:
|
||||
# This is a tad complicated. The sortedcontainers
|
||||
# implementation assumes that the order key is stable. If
|
||||
# it changes mid-way Very Bad Things happen. We detect when
|
||||
# this happens, and re-fresh the item.
|
||||
self.order_key.refresh(f)
|
||||
self.sig_view_update.send(flow=f)
|
||||
else:
|
||||
try:
|
||||
idx = self._view.index(f)
|
||||
except ValueError:
|
||||
pass # The value was not in the view
|
||||
else:
|
||||
self._view.remove(f)
|
||||
self.sig_view_remove.send(flow=f, index=idx)
|
||||
|
||||
|
||||
class Focus:
|
||||
"""
|
||||
Tracks a focus element within a View.
|
||||
"""
|
||||
|
||||
def __init__(self, v: View) -> None:
|
||||
self.view = v
|
||||
self._flow: mitmproxy.flow.Flow | None = None
|
||||
self.sig_change = signals.SyncSignal(lambda: None)
|
||||
if len(self.view):
|
||||
self.flow = self.view[0]
|
||||
v.sig_view_add.connect(self._sig_view_add)
|
||||
v.sig_view_remove.connect(self._sig_view_remove)
|
||||
v.sig_view_refresh.connect(self._sig_view_refresh)
|
||||
|
||||
@property
|
||||
def flow(self) -> mitmproxy.flow.Flow | None:
|
||||
return self._flow
|
||||
|
||||
@flow.setter
|
||||
def flow(self, f: mitmproxy.flow.Flow | None):
|
||||
if f is not None and f not in self.view:
|
||||
raise ValueError("Attempt to set focus to flow not in view")
|
||||
self._flow = f
|
||||
self.sig_change.send()
|
||||
|
||||
@property
|
||||
def index(self) -> int | None:
|
||||
if self.flow:
|
||||
return self.view.index(self.flow)
|
||||
return None
|
||||
|
||||
@index.setter
|
||||
def index(self, idx):
|
||||
if idx < 0 or idx > len(self.view) - 1:
|
||||
raise ValueError("Index out of view bounds")
|
||||
self.flow = self.view[idx]
|
||||
|
||||
def _nearest(self, f, v):
|
||||
return min(v._bisect(f), len(v) - 1)
|
||||
|
||||
def _sig_view_remove(self, flow, index):
|
||||
if len(self.view) == 0:
|
||||
self.flow = None
|
||||
elif flow is self.flow:
|
||||
self.index = min(index, len(self.view) - 1)
|
||||
|
||||
def _sig_view_refresh(self):
|
||||
if len(self.view) == 0:
|
||||
self.flow = None
|
||||
elif self.flow is None:
|
||||
self.flow = self.view[0]
|
||||
elif self.flow not in self.view:
|
||||
self.flow = self.view[self._nearest(self.flow, self.view)]
|
||||
|
||||
def _sig_view_add(self, flow):
|
||||
# We only have to act if we don't have a focus element
|
||||
if not self.flow:
|
||||
self.flow = flow
|
||||
|
||||
|
||||
class Settings(collections.abc.Mapping):
|
||||
def __init__(self, view: View) -> None:
|
||||
self.view = view
|
||||
self._values: MutableMapping[str, dict] = {}
|
||||
view.sig_store_remove.connect(self._sig_store_remove)
|
||||
view.sig_store_refresh.connect(self._sig_store_refresh)
|
||||
|
||||
def __iter__(self) -> Iterator:
|
||||
return iter(self._values)
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._values)
|
||||
|
||||
def __getitem__(self, f: mitmproxy.flow.Flow) -> dict:
|
||||
if f.id not in self.view._store:
|
||||
raise KeyError
|
||||
return self._values.setdefault(f.id, {})
|
||||
|
||||
def _sig_store_remove(self, flow):
|
||||
if flow.id in self._values:
|
||||
del self._values[flow.id]
|
||||
|
||||
def _sig_store_refresh(self):
|
||||
for fid in list(self._values.keys()):
|
||||
if fid not in self.view._store:
|
||||
del self._values[fid]
|
||||