File manager - Edit - /home/u478019808/domains/bestandroidphones.store/public_html/static/img/logo/emscripten.tar
Back
templates/pyodide-console.html 0000644 00000021062 15025346044 0012531 0 ustar 00 <!-- taken from https://github.com/pyodide/pyodide/blob/main/src/templates/console.html --> <!-- Copyright (C) 2019-2022, Pyodide contributors and Mozilla --> <!-- SPDX-FileCopyrightText: 2019-2022, Pyodide contributors and Mozilla --> <!-- SPDX-License-Identifier: MPL-2.0 --> <!doctype html> <html> <head> <meta charset="UTF-8" /> <script src="https://cdn.jsdelivr.net/npm/jquery"></script> <script src="https://cdn.jsdelivr.net/npm/jquery.terminal@2.35.2/js/jquery.terminal.min.js"></script> <script src="https://cdn.jsdelivr.net/npm/jquery.terminal@2.35.2/js/unix_formatting.min.js"></script> <link href="https://cdn.jsdelivr.net/npm/jquery.terminal@2.35.2/css/jquery.terminal.min.css" rel="stylesheet" /> <style> .terminal { --size: 1.5; --color: rgba(255, 255, 255, 0.8); } .noblink { --animation: terminal-none; } body { background-color: black; } #jquery-terminal-logo { color: white; border-color: white; position: absolute; top: 7px; right: 18px; z-index: 2; } #jquery-terminal-logo a { color: gray; text-decoration: none; font-size: 0.7em; } #loading { display: inline-block; width: 50px; height: 50px; position: fixed; top: 50%; left: 50%; border: 3px solid rgba(172, 237, 255, 0.5); border-radius: 50%; border-top-color: #fff; animation: spin 1s ease-in-out infinite; -webkit-animation: spin 1s ease-in-out infinite; } @keyframes spin { to { -webkit-transform: rotate(360deg); } } @-webkit-keyframes spin { to { -webkit-transform: rotate(360deg); } } </style> </head> <body> <div id="jquery-terminal-logo"> <a href="https://terminal.jcubic.pl/">jQuery Terminal</a> </div> <div id="loading"></div> <script> "use strict"; function sleep(s) { return new Promise((resolve) => setTimeout(resolve, s)); } async function main() { let indexURL = "https://cdn.jsdelivr.net/pyodide/v0.25.0/full/"; const urlParams = new URLSearchParams(window.location.search); const buildParam = urlParams.get("build"); if (buildParam) { if (["full", "debug", "pyc"].includes(buildParam)) { indexURL = indexURL.replace( "/full/", "/" + urlParams.get("build") + "/", ); } else { console.warn( 'Invalid URL parameter: build="' + buildParam + '". Using default "full".', ); } } const { loadPyodide } = await import(indexURL + "pyodide.mjs"); // to facilitate debugging globalThis.loadPyodide = loadPyodide; let term; globalThis.pyodide = await loadPyodide({ stdin: () => { let result = prompt(); echo(result); return result; }, }); let { repr_shorten, BANNER, PyodideConsole } = pyodide.pyimport("pyodide.console"); BANNER = `Welcome to the Pyodide ${pyodide.version} terminal emulator 🐍\n` + BANNER; const pyconsole = PyodideConsole(pyodide.globals); const namespace = pyodide.globals.get("dict")(); const await_fut = pyodide.runPython( ` import builtins from pyodide.ffi import to_js async def await_fut(fut): res = await fut if res is not None: builtins._ = res return to_js([res], depth=1) await_fut `, { globals: namespace }, ); namespace.destroy(); const echo = (msg, ...opts) => term.echo( msg .replaceAll("]]", "]]") .replaceAll("[[", "[["), ...opts, ); const ps1 = ">>> "; const ps2 = "... "; async function lock() { let resolve; const ready = term.ready; term.ready = new Promise((res) => (resolve = res)); await ready; return resolve; } async function interpreter(command) { const unlock = await lock(); term.pause(); // multiline should be split (useful when pasting) for (const c of command.split("\n")) { const escaped = c.replaceAll(/\u00a0/g, " "); const fut = pyconsole.push(escaped); term.set_prompt(fut.syntax_check === "incomplete" ? ps2 : ps1); switch (fut.syntax_check) { case "syntax-error": term.error(fut.formatted_error.trimEnd()); continue; case "incomplete": continue; case "complete": break; default: throw new Error(`Unexpected type ${ty}`); } // In JavaScript, await automatically also awaits any results of // awaits, so if an async function returns a future, it will await // the inner future too. This is not what we want so we // temporarily put it into a list to protect it. const wrapped = await_fut(fut); // complete case, get result / error and print it. try { const [value] = await wrapped; if (value !== undefined) { echo( repr_shorten.callKwargs(value, { separator: "\n<long output truncated>\n", }), ); } if (value instanceof pyodide.ffi.PyProxy) { value.destroy(); } } catch (e) { if (e.constructor.name === "PythonError") { const message = fut.formatted_error || e.message; term.error(message.trimEnd()); } else { throw e; } } finally { fut.destroy(); wrapped.destroy(); } } term.resume(); await sleep(10); unlock(); } term = $("body").terminal(interpreter, { greetings: BANNER, prompt: ps1, completionEscape: false, completion: function (command, callback) { callback(pyconsole.complete(command).toJs()[0]); }, keymap: { "CTRL+C": async function (event, original) { pyconsole.buffer.clear(); term.enter(); echo("KeyboardInterrupt"); term.set_command(""); term.set_prompt(ps1); }, TAB: (event, original) => { const command = term.before_cursor(); // Disable completion for whitespaces. if (command.trim() === "") { term.insert("\t"); return false; } return original(event); }, }, }); window.term = term; pyconsole.stdout_callback = (s) => echo(s, { newline: false }); pyconsole.stderr_callback = (s) => { term.error(s.trimEnd()); }; term.ready = Promise.resolve(); pyodide._api.on_fatal = async (e) => { if (e.name === "Exit") { term.error(e); term.error("Pyodide exited and can no longer be used."); } else { term.error( "Pyodide has suffered a fatal error. Please report this to the Pyodide maintainers.", ); term.error("The cause of the fatal error was:"); term.error(e); term.error("Look in the browser console for more details."); } await term.ready; term.pause(); await sleep(15); term.pause(); }; const searchParams = new URLSearchParams(window.location.search); if (searchParams.has("noblink")) { $(".cmd-cursor").addClass("noblink"); } await term.ready; await term.exec("import micropip\n"); await term.exec("micropip.list()\n"); await term.exec('await micropip.install("http://localhost:8000/urllib3-2.2.0-py3-none-any.whl")') await term.exec("micropip.list()"); await term.exec("import urllib3"); await term.exec("urllib3.__version__"); } window.console_ready = main(); </script> </body> </html> test_emscripten.py 0000644 00000107322 15025346044 0010336 0 ustar 00 from __future__ import annotations import sys import typing import pytest from urllib3.fields import _TYPE_FIELD_VALUE_TUPLE from ...port_helpers import find_unused_port if sys.version_info < (3, 11): # pyodide only works on 3.11+ pytest.skip(allow_module_level=True) # only run these tests if pytest_pyodide is installed # so we don't break non-emscripten pytest running pytest_pyodide = pytest.importorskip("pytest_pyodide") from pytest_pyodide import run_in_pyodide # type: ignore[import-not-found] # noqa: E402 from pytest_pyodide.decorator import ( # type: ignore[import-not-found] # noqa: E402 copy_files_to_pyodide, ) from .conftest import PyodideServerInfo, ServerRunnerInfo # noqa: E402 # make our ssl certificates work in chrome pytest_pyodide.runner.CHROME_FLAGS.append("ignore-certificate-errors") # copy our wheel file to pyodide and install it def install_urllib3_wheel() -> ( typing.Callable[ [typing.Callable[..., typing.Any]], typing.Callable[..., typing.Any] ] ): return copy_files_to_pyodide( # type: ignore[no-any-return] file_list=[("dist/*.whl", "/tmp")], install_wheels=True ) @install_urllib3_wheel() def test_index( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] from urllib3.connection import HTTPConnection from urllib3.response import BaseHTTPResponse conn = HTTPConnection(host, port) url = f"http://{host}:{port}/" conn.request("GET", url) response = conn.getresponse() # check methods of response assert isinstance(response, BaseHTTPResponse) assert response.url == url response.url = "http://woo" assert response.url == "http://woo" assert response.connection == conn assert response.retries is None data1 = response.data decoded1 = data1.decode("utf-8") data2 = response.data # check that getting data twice works decoded2 = data2.decode("utf-8") assert decoded1 == decoded2 == "Dummy server!" pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) @install_urllib3_wheel() def test_pool_requests( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int, https_port: int) -> None: # type: ignore[no-untyped-def] # first with PoolManager import urllib3 http = urllib3.PoolManager() resp = http.request("GET", f"http://{host}:{port}/") assert resp.data.decode("utf-8") == "Dummy server!" resp2 = http.request("GET", f"http://{host}:{port}/index") assert resp2.data.decode("utf-8") == "Dummy server!" # should all have come from one pool assert len(http.pools) == 1 resp3 = http.request("GET", f"https://{host}:{https_port}/") assert resp2.data.decode("utf-8") == "Dummy server!" # one http pool + one https pool assert len(http.pools) == 2 # now with ConnectionPool # because block == True, this will fail if the connection isn't # returned to the pool correctly after the first request pool = urllib3.HTTPConnectionPool(host, port, maxsize=1, block=True) resp3 = pool.urlopen("GET", "/index") assert resp3.data.decode("utf-8") == "Dummy server!" resp4 = pool.urlopen("GET", "/") assert resp4.data.decode("utf-8") == "Dummy server!" # now with manual release of connection # first - connection should be released once all # data is read pool2 = urllib3.HTTPConnectionPool(host, port, maxsize=1, block=True) resp5 = pool2.urlopen("GET", "/index", preload_content=False) assert pool2.pool is not None # at this point, the connection should not be in the pool assert pool2.pool.qsize() == 0 assert resp5.data.decode("utf-8") == "Dummy server!" # now we've read all the data, connection should be back to the pool assert pool2.pool.qsize() == 1 resp6 = pool2.urlopen("GET", "/index", preload_content=False) assert pool2.pool.qsize() == 0 # force it back to the pool resp6.release_conn() assert pool2.pool.qsize() == 1 read_str = resp6.read() # for consistency with urllib3, this still returns the correct data even though # we are in theory not using the connection any more assert read_str.decode("utf-8") == "Dummy server!" pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port, testserver_http.https_port, ) # wrong protocol / protocol error etc. should raise an exception of http.client.HTTPException @install_urllib3_wheel() def test_wrong_protocol( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import http.client import pytest from urllib3.connection import HTTPConnection conn = HTTPConnection(host, port) with pytest.raises(http.client.HTTPException): conn.request("GET", f"http://{host}:{port}/") pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.https_port ) # wrong protocol / protocol error etc. should raise an exception of http.client.HTTPException @install_urllib3_wheel() def test_bad_method( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide(packages=("pytest",)) # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import http.client import pytest from urllib3.connection import HTTPConnection conn = HTTPConnection(host, port) with pytest.raises(http.client.HTTPException): conn.request("TRACE", f"http://{host}:{port}/") pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.https_port ) # no connection - should raise @install_urllib3_wheel() def test_no_response( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide(packages=("pytest",)) # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import http.client import pytest from urllib3.connection import HTTPConnection conn = HTTPConnection(host, port) with pytest.raises(http.client.HTTPException): conn.request("GET", f"http://{host}:{port}/") _ = conn.getresponse() pyodide_test(selenium_coverage, testserver_http.http_host, find_unused_port()) @install_urllib3_wheel() def test_404(selenium_coverage: typing.Any, testserver_http: PyodideServerInfo) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] from urllib3.connection import HTTPConnection from urllib3.response import BaseHTTPResponse conn = HTTPConnection(host, port) conn.request("GET", f"http://{host}:{port}/status?status=404 NOT FOUND") response = conn.getresponse() assert isinstance(response, BaseHTTPResponse) assert response.status == 404 pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) # setting timeout should show a warning to js console # if we're on the ui thread, because XMLHttpRequest doesn't # support timeout in async mode if globalThis == Window @install_urllib3_wheel() def test_timeout_warning( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide() # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import js # type: ignore[import-not-found] import urllib3.contrib.emscripten.fetch from urllib3.connection import HTTPConnection old_log = js.console.warn log_msgs = [] def capture_log(*args): # type: ignore[no-untyped-def] log_msgs.append(str(args)) old_log(*args) js.console.warn = capture_log conn = HTTPConnection(host, port, timeout=1.0) conn.request("GET", f"http://{host}:{port}/") conn.getresponse() js.console.warn = old_log # should have shown timeout warning exactly once by now assert len([x for x in log_msgs if x.find("Warning: Timeout") != -1]) == 1 assert urllib3.contrib.emscripten.fetch._SHOWN_TIMEOUT_WARNING pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) @install_urllib3_wheel() def test_timeout_in_worker_non_streaming( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: worker_code = f""" import pyodide_js as pjs await pjs.loadPackage('http://{testserver_http.http_host}:{testserver_http.http_port}/wheel/dist.whl',deps=False) from urllib3.exceptions import TimeoutError from urllib3.connection import HTTPConnection conn = HTTPConnection("{testserver_http.http_host}", {testserver_http.http_port},timeout=1.0) result=-1 try: conn.request("GET","/slow") _response = conn.getresponse() result=-3 except TimeoutError as e: result=1 # we've got the correct exception except BaseException as e: result=-2 assert result == 1 """ run_from_server.run_webworker(worker_code) @install_urllib3_wheel() def test_timeout_in_worker_streaming( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: worker_code = f""" import pyodide_js as pjs await pjs.loadPackage('http://{testserver_http.http_host}:{testserver_http.http_port}/wheel/dist.whl',deps=False) import urllib3.contrib.emscripten.fetch await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready() from urllib3.exceptions import TimeoutError from urllib3.connection import HTTPConnection conn = HTTPConnection("{testserver_http.http_host}", {testserver_http.http_port},timeout=1.0) result=-1 try: conn.request("GET","/slow",preload_content=False) _response = conn.getresponse() result=-3 except TimeoutError as e: result=1 # we've got the correct exception except BaseException as e: result=-2 assert result == 1 """ run_from_server.run_webworker(worker_code) @install_urllib3_wheel() def test_index_https( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] from urllib3.connection import HTTPSConnection from urllib3.response import BaseHTTPResponse conn = HTTPSConnection(host, port) conn.request("GET", f"https://{host}:{port}/") response = conn.getresponse() assert isinstance(response, BaseHTTPResponse) data = response.data assert data.decode("utf-8") == "Dummy server!" pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.https_port ) @install_urllib3_wheel() def test_non_streaming_no_fallback_warning( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import js import urllib3.contrib.emscripten.fetch from urllib3.connection import HTTPSConnection from urllib3.response import BaseHTTPResponse log_msgs = [] old_log = js.console.warn def capture_log(*args): # type: ignore[no-untyped-def] log_msgs.append(str(args)) old_log(*args) js.console.warn = capture_log conn = HTTPSConnection(host, port) conn.request("GET", f"https://{host}:{port}/", preload_content=True) response = conn.getresponse() js.console.warn = old_log assert isinstance(response, BaseHTTPResponse) data = response.data assert data.decode("utf-8") == "Dummy server!" # no console warnings because we didn't ask it to stream the response # check no log messages assert ( len([x for x in log_msgs if x.find("Can't stream HTTP requests") != -1]) == 0 ) assert not urllib3.contrib.emscripten.fetch._SHOWN_STREAMING_WARNING pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.https_port ) @install_urllib3_wheel() def test_streaming_fallback_warning( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import js import urllib3.contrib.emscripten.fetch from urllib3.connection import HTTPSConnection from urllib3.response import BaseHTTPResponse # monkeypatch is_cross_origin_isolated so that it warns about that # even if we're serving it so it is fine urllib3.contrib.emscripten.fetch.is_cross_origin_isolated = lambda: False log_msgs = [] old_log = js.console.warn def capture_log(*args): # type: ignore[no-untyped-def] log_msgs.append(str(args)) old_log(*args) js.console.warn = capture_log conn = HTTPSConnection(host, port) conn.request("GET", f"https://{host}:{port}/", preload_content=False) response = conn.getresponse() js.console.warn = old_log assert isinstance(response, BaseHTTPResponse) data = response.data assert data.decode("utf-8") == "Dummy server!" # check that it has warned about falling back to non-streaming fetch exactly once assert ( len([x for x in log_msgs if x.find("Can't stream HTTP requests") != -1]) == 1 ) assert urllib3.contrib.emscripten.fetch._SHOWN_STREAMING_WARNING pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.https_port ) @install_urllib3_wheel() def test_specific_method( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] from urllib3 import HTTPSConnectionPool with HTTPSConnectionPool(host, port) as pool: path = "/specific_method?method=POST" response = pool.request("POST", path) assert response.status == 200 response = pool.request("PUT", path) assert response.status == 400 pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.https_port ) @install_urllib3_wheel() def test_streaming_download( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: # test streaming download, which must be in a webworker # as you can't do it on main thread # this should return the 17mb big file, and # should not log any warning about falling back bigfile_url = ( f"http://{testserver_http.http_host}:{testserver_http.http_port}/bigfile" ) worker_code = f""" import pyodide_js as pjs await pjs.loadPackage('http://{testserver_http.http_host}:{testserver_http.http_port}/wheel/dist.whl',deps=False) import urllib3.contrib.emscripten.fetch await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready() from urllib3.response import BaseHTTPResponse from urllib3.connection import HTTPConnection conn = HTTPConnection("{testserver_http.http_host}", {testserver_http.http_port}) conn.request("GET", "{bigfile_url}",preload_content=False) response = conn.getresponse() assert isinstance(response, BaseHTTPResponse) assert urllib3.contrib.emscripten.fetch._SHOWN_STREAMING_WARNING==False data=response.data.decode('utf-8') assert len(data) == 17825792 """ run_from_server.run_webworker(worker_code) @install_urllib3_wheel() def test_streaming_close( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: # test streaming download, which must be in a webworker # as you can't do it on main thread # this should return the 17mb big file, and # should not log any warning about falling back url = f"http://{testserver_http.http_host}:{testserver_http.http_port}/" worker_code = f""" import pyodide_js as pjs await pjs.loadPackage('http://{testserver_http.http_host}:{testserver_http.http_port}/wheel/dist.whl',deps=False) import urllib3.contrib.emscripten.fetch await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready() from urllib3.response import BaseHTTPResponse from urllib3.connection import HTTPConnection from io import RawIOBase conn = HTTPConnection("{testserver_http.http_host}", {testserver_http.http_port}) conn.request("GET", "{url}",preload_content=False) response = conn.getresponse() # check body is a RawIOBase stream and isn't seekable, writeable body_internal = response._response.body assert(isinstance(body_internal,RawIOBase)) assert(body_internal.writable() is False) assert(body_internal.seekable() is False) assert(body_internal.readable() is True) response.drain_conn() x=response.read() assert(not x) response.close() conn.close() # try and make destructor be covered # by killing everything del response del body_internal del conn """ run_from_server.run_webworker(worker_code) @install_urllib3_wheel() def test_streaming_bad_url( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: # this should cause an error # because the protocol is bad bad_url = f"hsffsdfttp://{testserver_http.http_host}:{testserver_http.http_port}/" # this must be in a webworker # as you can't do it on main thread worker_code = f""" import pytest import pyodide_js as pjs await pjs.loadPackage('http://{testserver_http.http_host}:{testserver_http.http_port}/wheel/dist.whl',deps=False) import http.client import urllib3.contrib.emscripten.fetch await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready() from urllib3.response import BaseHTTPResponse from urllib3.connection import HTTPConnection conn = HTTPConnection("{testserver_http.http_host}", {testserver_http.http_port}) with pytest.raises(http.client.HTTPException): conn.request("GET", "{bad_url}",preload_content=False) """ run_from_server.run_webworker(worker_code) @install_urllib3_wheel() def test_streaming_bad_method( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: # this should cause an error # because the protocol is bad bad_url = f"http://{testserver_http.http_host}:{testserver_http.http_port}/" # this must be in a webworker # as you can't do it on main thread worker_code = f""" import pytest import http.client import pyodide_js as pjs await pjs.loadPackage('http://{testserver_http.http_host}:{testserver_http.http_port}/wheel/dist.whl',deps=False) import urllib3.contrib.emscripten.fetch await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready() from urllib3.response import BaseHTTPResponse from urllib3.connection import HTTPConnection conn = HTTPConnection("{testserver_http.http_host}", {testserver_http.http_port}) with pytest.raises(http.client.HTTPException): # TRACE method should throw SecurityError in Javascript conn.request("TRACE", "{bad_url}",preload_content=False) """ run_from_server.run_webworker(worker_code) @install_urllib3_wheel() def test_streaming_notready_warning( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: # test streaming download but don't wait for # worker to be ready - should fallback to non-streaming # and log a warning file_url = f"http://{testserver_http.http_host}:{testserver_http.http_port}/" worker_code = f""" import pyodide_js as pjs await pjs.loadPackage('http://{testserver_http.http_host}:{testserver_http.http_port}/wheel/dist.whl',deps=False) import js import urllib3 from urllib3.response import BaseHTTPResponse from urllib3.connection import HTTPConnection log_msgs=[] old_log=js.console.warn def capture_log(*args): log_msgs.append(str(args)) old_log(*args) js.console.warn=capture_log conn = HTTPConnection("{testserver_http.http_host}", {testserver_http.http_port}) conn.request("GET", "{file_url}",preload_content=False) js.console.warn=old_log response = conn.getresponse() assert isinstance(response, BaseHTTPResponse) data=response.data.decode('utf-8') assert len([x for x in log_msgs if x.find("Can't stream HTTP requests")!=-1])==1 assert urllib3.contrib.emscripten.fetch._SHOWN_STREAMING_WARNING==True """ run_from_server.run_webworker(worker_code) @install_urllib3_wheel() def test_post_receive_json( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import json from urllib3.connection import HTTPConnection from urllib3.response import BaseHTTPResponse json_data = { "Bears": "like", "to": {"eat": "buns", "with": ["marmalade", "and custard"]}, } conn = HTTPConnection(host, port) conn.request( "POST", f"http://{host}:{port}/echo_json", body=json.dumps(json_data).encode("utf-8"), headers={"Content-type": "application/json"}, ) response = conn.getresponse() assert isinstance(response, BaseHTTPResponse) data = response.json() assert data == json_data pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) @install_urllib3_wheel() def test_upload( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] from urllib3 import HTTPConnectionPool data = "I'm in ur multipart form-data, hazing a cheezburgr" fields: dict[str, _TYPE_FIELD_VALUE_TUPLE] = { "upload_param": "filefield", "upload_filename": "lolcat.txt", "filefield": ("lolcat.txt", data), } fields["upload_size"] = str(len(data)) with HTTPConnectionPool(host, port) as pool: r = pool.request("POST", "/upload", fields=fields) assert r.status == 200 pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) @install_urllib3_wheel() def test_streaming_not_ready_in_browser( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: # streaming ready should always be false # if we're in the main browser thread selenium_coverage.run_async( """ import urllib3.contrib.emscripten.fetch result=await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready() assert(result is False) assert(urllib3.contrib.emscripten.fetch.streaming_ready() is None ) """ ) @install_urllib3_wheel() def test_requests_with_micropip( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: # this can't be @run_in_pyodide because of the async code selenium_coverage.run_async( f""" import micropip await micropip.install("requests") import requests import json r = requests.get("http://{testserver_http.http_host}:{testserver_http.http_port}/") assert(r.status_code == 200) assert(r.text == "Dummy server!") json_data={{"woo":"yay"}} # try posting some json with requests r = requests.post("http://{testserver_http.http_host}:{testserver_http.http_port}/echo_json",json=json_data) import js assert(r.json() == json_data) """ ) @install_urllib3_wheel() def test_open_close( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] from http.client import ResponseNotReady import pytest from urllib3.connection import HTTPConnection conn = HTTPConnection(host, port) # initially connection should be closed assert conn.is_closed is True # connection should have no response with pytest.raises(ResponseNotReady): response = conn.getresponse() # now make the response conn.request("GET", f"http://{host}:{port}/") # we never connect to proxy (or if we do, browser handles it) assert conn.has_connected_to_proxy is False # now connection should be open assert conn.is_closed is False # and should have a response response = conn.getresponse() assert response is not None conn.close() # now it is closed assert conn.is_closed is True # closed connection shouldn't have any response with pytest.raises(ResponseNotReady): conn.getresponse() pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) # check that various ways that the worker may be broken # throw exceptions nicely, by deliberately breaking things # this is for coverage @install_urllib3_wheel() def test_break_worker_streaming( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo, run_from_server: ServerRunnerInfo, ) -> None: worker_code = f""" import pyodide_js as pjs await pjs.loadPackage('http://{testserver_http.http_host}:{testserver_http.http_port}/wheel/dist.whl',deps=False) import pytest import urllib3.contrib.emscripten.fetch import js import http.client await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready() from urllib3.exceptions import TimeoutError from urllib3.connection import HTTPConnection conn = HTTPConnection("{testserver_http.http_host}", {testserver_http.http_port},timeout=1.0) # make the fetch worker return a bad response by: # 1) Clearing the int buffer # in the receive stream with pytest.raises(http.client.HTTPException): conn.request("GET","/",preload_content=False) response = conn.getresponse() body_internal = response._response.body assert(body_internal.int_buffer!=None) body_internal.int_buffer=None data=response.read() # 2) Monkeypatch postMessage so that it just sets an # exception status old_pm= body_internal.worker.postMessage with pytest.raises(http.client.HTTPException): conn.request("GET","/",preload_content=False) response = conn.getresponse() # make posted messages set an exception body_internal = response._response.body def set_exception(*args): body_internal.worker.postMessage = old_pm body_internal.int_buffer[1]=4 body_internal.byte_buffer[0]=ord("W") body_internal.byte_buffer[1]=ord("O") body_internal.byte_buffer[2]=ord("O") body_internal.byte_buffer[3]=ord("!") body_internal.byte_buffer[4]=0 js.Atomics.store(body_internal.int_buffer, 0, -4) js.Atomics.notify(body_internal.int_buffer,0) body_internal.worker.postMessage = set_exception data=response.read() # monkeypatch so it returns an unknown value for the magic number on initial fetch call with pytest.raises(http.client.HTTPException): # make posted messages set an exception worker=urllib3.contrib.emscripten.fetch._fetcher.js_worker def set_exception(self,*args): array=js.Int32Array.new(args[0].buffer) array[0]=-1234 worker.postMessage=set_exception.__get__(worker,worker.__class__) conn.request("GET","/",preload_content=False) response = conn.getresponse() data=response.read() urllib3.contrib.emscripten.fetch._fetcher.js_worker.postMessage=old_pm # 3) Stopping the worker receiving any messages which should cause a timeout error # in the receive stream with pytest.raises(TimeoutError): conn.request("GET","/",preload_content=False) response = conn.getresponse() # make posted messages not be send body_internal = response._response.body def ignore_message(*args): pass old_pm= body_internal.worker.postMessage body_internal.worker.postMessage = ignore_message data=response.read() body_internal.worker.postMessage = old_pm """ run_from_server.run_webworker(worker_code) @install_urllib3_wheel() def test_response_init_length( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import pytest import urllib3.exceptions from urllib3.connection import HTTPConnection from urllib3.response import BaseHTTPResponse conn = HTTPConnection(host, port) conn.request("GET", f"http://{host}:{port}/") response = conn.getresponse() assert isinstance(response, BaseHTTPResponse) # head shouldn't have length length = response._init_length("HEAD") assert length == 0 # multiple inconsistent lengths - should raise invalid header with pytest.raises(urllib3.exceptions.InvalidHeader): response.headers["Content-Length"] = "4,5,6" length = response._init_length("GET") # non-numeric length - should return None response.headers["Content-Length"] = "anna" length = response._init_length("GET") assert length is None # numeric length - should return it response.headers["Content-Length"] = "54" length = response._init_length("GET") assert length == 54 # negative length - should return None response.headers["Content-Length"] = "-12" length = response._init_length("GET") assert length is None # none -> None del response.headers["Content-Length"] length = response._init_length("GET") assert length is None pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) @install_urllib3_wheel() def test_response_close_connection( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] from urllib3.connection import HTTPConnection from urllib3.response import BaseHTTPResponse conn = HTTPConnection(host, port) conn.request("GET", f"http://{host}:{port}/") response = conn.getresponse() assert isinstance(response, BaseHTTPResponse) response.close() assert conn.is_closed pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) @install_urllib3_wheel() def test_read_chunked( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] from urllib3.connection import HTTPConnection conn = HTTPConnection(host, port) conn.request("GET", f"http://{host}:{port}/mediumfile", preload_content=False) response = conn.getresponse() count = 0 for x in response.read_chunked(512): count += 1 if count < 10: assert len(x) == 512 pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port ) @install_urllib3_wheel() def test_retries( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int) -> None: # type: ignore[no-untyped-def] import pytest import urllib3 pool = urllib3.HTTPConnectionPool( host, port, maxsize=1, block=True, retries=urllib3.util.Retry(connect=5, read=5, redirect=5), ) # monkeypatch connection class to count calls old_request = urllib3.connection.HTTPConnection.request count = 0 def count_calls(self, *args, **argv): # type: ignore[no-untyped-def] nonlocal count count += 1 return old_request(self, *args, **argv) urllib3.connection.HTTPConnection.request = count_calls # type: ignore[method-assign] with pytest.raises(urllib3.exceptions.MaxRetryError): pool.urlopen("GET", "/") # this should fail, but should have tried 6 times total assert count == 6 pyodide_test(selenium_coverage, testserver_http.http_host, find_unused_port()) @install_urllib3_wheel() def test_insecure_requests_warning( selenium_coverage: typing.Any, testserver_http: PyodideServerInfo ) -> None: @run_in_pyodide # type: ignore[misc] def pyodide_test(selenium_coverage, host: str, port: int, https_port: int) -> None: # type: ignore[no-untyped-def] import warnings import urllib3 import urllib3.exceptions http = urllib3.PoolManager() with warnings.catch_warnings(record=True) as w: http.request("GET", f"https://{host}:{https_port}") assert len(w) == 0 pyodide_test( selenium_coverage, testserver_http.http_host, testserver_http.http_port, testserver_http.https_port, ) conftest.py 0000644 00000014677 15025346044 0006765 0 ustar 00 from __future__ import annotations import contextlib import os import random import textwrap import typing from dataclasses import dataclass from pathlib import Path from typing import Any, Generator import hypercorn import pytest from dummyserver.app import pyodide_testing_app from dummyserver.hypercornserver import run_hypercorn_in_thread from dummyserver.socketserver import DEFAULT_CERTS from urllib3.util.url import parse_url _coverage_count = 0 def _get_coverage_filename(prefix: str) -> str: global _coverage_count _coverage_count += 1 rand_part = "".join([random.choice("1234567890") for x in range(20)]) return prefix + rand_part + f".{_coverage_count}" @pytest.fixture(scope="module") def testserver_http( request: pytest.FixtureRequest, ) -> Generator[PyodideServerInfo, None, None]: pyodide_dist_dir = Path(os.getcwd(), request.config.getoption("--dist-dir")) pyodide_testing_app.config["pyodide_dist_dir"] = str(pyodide_dist_dir) http_host = "localhost" with contextlib.ExitStack() as stack: http_server_config = hypercorn.Config() http_server_config.bind = [f"{http_host}:0"] stack.enter_context( run_hypercorn_in_thread(http_server_config, pyodide_testing_app) ) http_port = typing.cast(int, parse_url(http_server_config.bind[0]).port) https_server_config = hypercorn.Config() https_server_config.certfile = DEFAULT_CERTS["certfile"] https_server_config.keyfile = DEFAULT_CERTS["keyfile"] https_server_config.verify_mode = DEFAULT_CERTS["cert_reqs"] https_server_config.ca_certs = DEFAULT_CERTS["ca_certs"] https_server_config.alpn_protocols = DEFAULT_CERTS["alpn_protocols"] https_server_config.bind = [f"{http_host}:0"] stack.enter_context( run_hypercorn_in_thread(https_server_config, pyodide_testing_app) ) https_port = typing.cast(int, parse_url(https_server_config.bind[0]).port) yield PyodideServerInfo( http_host=http_host, http_port=http_port, https_port=https_port, ) print("Server teardown") @pytest.fixture() def selenium_coverage(selenium: Any) -> Generator[Any, None, None]: def _install_coverage(self: Any) -> None: self.run_js( """ await pyodide.loadPackage("coverage") await pyodide.runPythonAsync(`import coverage _coverage= coverage.Coverage(source_pkgs=['urllib3']) _coverage.start() ` )""" ) setattr( selenium, "_install_coverage", _install_coverage.__get__(selenium, selenium.__class__), ) selenium._install_coverage() yield selenium # on teardown, save _coverage output coverage_out_binary = bytes( selenium.run_js( """ return await pyodide.runPythonAsync(` _coverage.stop() _coverage.save() _coverage_datafile = open(".coverage","rb") _coverage_outdata = _coverage_datafile.read() # avoid polluting main namespace too much import js as _coverage_js # convert to js Array (as default conversion is TypedArray which does # bad things in firefox) _coverage_js.Array.from_(_coverage_outdata) `) """ ) ) with open(f"{_get_coverage_filename('.coverage.emscripten.')}", "wb") as outfile: outfile.write(coverage_out_binary) class ServerRunnerInfo: def __init__(self, host: str, port: int, selenium: Any) -> None: self.host = host self.port = port self.selenium = selenium def run_webworker(self, code: str) -> Any: if isinstance(code, str) and code.startswith("\n"): # we have a multiline string, fix indentation code = textwrap.dedent(code) # add coverage collection to this code code = ( textwrap.dedent( """ import coverage _coverage= coverage.Coverage(source_pkgs=['urllib3']) _coverage.start() """ ) + code ) code += textwrap.dedent( """ _coverage.stop() _coverage.save() _coverage_datafile = open(".coverage","rb") _coverage_outdata = _coverage_datafile.read() # avoid polluting main namespace too much import js as _coverage_js # convert to js Array (as default conversion is TypedArray which does # bad things in firefox) _coverage_js.Array.from_(_coverage_outdata) """ ) coverage_out_binary = bytes( self.selenium.run_js( f""" let worker = new Worker('https://{self.host}:{self.port}/pyodide/webworker_dev.js'); let p = new Promise((res, rej) => {{ worker.onmessageerror = e => rej(e); worker.onerror = e => rej(e); worker.onmessage = e => {{ if (e.data.results) {{ res(e.data.results); }} else {{ rej(e.data.error); }} }}; worker.postMessage({{ python: {repr(code)} }}); }}); return await p; """, pyodide_checks=False, ) ) with open( f"{_get_coverage_filename('.coverage.emscripten.worker.')}", "wb" ) as outfile: outfile.write(coverage_out_binary) # run pyodide on our test server instead of on the default # pytest-pyodide one - this makes it so that # we are at the same origin as web requests to server_host @pytest.fixture() def run_from_server( selenium_coverage: Any, testserver_http: PyodideServerInfo ) -> Generator[ServerRunnerInfo, None, None]: addr = f"https://{testserver_http.http_host}:{testserver_http.https_port}/pyodide/test.html" selenium_coverage.goto(addr) selenium_coverage.javascript_setup() selenium_coverage.load_pyodide() selenium_coverage.initialize_pyodide() selenium_coverage.save_state() selenium_coverage.restore_state() # install the wheel, which is served at /wheel/* selenium_coverage.run_js( """ await pyodide.loadPackage('/wheel/dist.whl') """ ) selenium_coverage._install_coverage() yield ServerRunnerInfo( testserver_http.http_host, testserver_http.https_port, selenium_coverage ) @dataclass class PyodideServerInfo: http_port: int https_port: int http_host: str __init__.py 0000644 00000001335 15025346044 0006662 0 ustar 00 from __future__ import annotations import urllib3.connection from ...connectionpool import HTTPConnectionPool, HTTPSConnectionPool from .connection import EmscriptenHTTPConnection, EmscriptenHTTPSConnection def inject_into_urllib3() -> None: # override connection classes to use emscripten specific classes # n.b. mypy complains about the overriding of classes below # if it isn't ignored HTTPConnectionPool.ConnectionCls = EmscriptenHTTPConnection HTTPSConnectionPool.ConnectionCls = EmscriptenHTTPSConnection urllib3.connection.HTTPConnection = EmscriptenHTTPConnection # type: ignore[misc,assignment] urllib3.connection.HTTPSConnection = EmscriptenHTTPSConnection # type: ignore[misc,assignment] response.py 0000644 00000023441 15025362624 0006765 0 ustar 00 from __future__ import annotations import json as _json import logging import typing from contextlib import contextmanager from dataclasses import dataclass from http.client import HTTPException as HTTPException from io import BytesIO, IOBase from ...exceptions import InvalidHeader, TimeoutError from ...response import BaseHTTPResponse from ...util.retry import Retry from .request import EmscriptenRequest if typing.TYPE_CHECKING: from ..._base_connection import BaseHTTPConnection, BaseHTTPSConnection log = logging.getLogger(__name__) @dataclass class EmscriptenResponse: status_code: int headers: dict[str, str] body: IOBase | bytes request: EmscriptenRequest class EmscriptenHttpResponseWrapper(BaseHTTPResponse): def __init__( self, internal_response: EmscriptenResponse, url: str | None = None, connection: BaseHTTPConnection | BaseHTTPSConnection | None = None, ): self._pool = None # set by pool class self._body = None self._response = internal_response self._url = url self._connection = connection self._closed = False super().__init__( headers=internal_response.headers, status=internal_response.status_code, request_url=url, version=0, version_string="HTTP/?", reason="", decode_content=True, ) self.length_remaining = self._init_length(self._response.request.method) self.length_is_certain = False @property def url(self) -> str | None: return self._url @url.setter def url(self, url: str | None) -> None: self._url = url @property def connection(self) -> BaseHTTPConnection | BaseHTTPSConnection | None: return self._connection @property def retries(self) -> Retry | None: return self._retries @retries.setter def retries(self, retries: Retry | None) -> None: # Override the request_url if retries has a redirect location. self._retries = retries def stream( self, amt: int | None = 2**16, decode_content: bool | None = None ) -> typing.Generator[bytes, None, None]: """ A generator wrapper for the read() method. A call will block until ``amt`` bytes have been read from the connection or until the connection is closed. :param amt: How much of the content to read. The generator will return up to much data per iteration, but may return less. This is particularly likely when using compressed data. However, the empty string will never be returned. :param decode_content: If True, will attempt to decode the body based on the 'content-encoding' header. """ while True: data = self.read(amt=amt, decode_content=decode_content) if data: yield data else: break def _init_length(self, request_method: str | None) -> int | None: length: int | None content_length: str | None = self.headers.get("content-length") if content_length is not None: try: # RFC 7230 section 3.3.2 specifies multiple content lengths can # be sent in a single Content-Length header # (e.g. Content-Length: 42, 42). This line ensures the values # are all valid ints and that as long as the `set` length is 1, # all values are the same. Otherwise, the header is invalid. lengths = {int(val) for val in content_length.split(",")} if len(lengths) > 1: raise InvalidHeader( "Content-Length contained multiple " "unmatching values (%s)" % content_length ) length = lengths.pop() except ValueError: length = None else: if length < 0: length = None else: # if content_length is None length = None # Check for responses that shouldn't include a body if ( self.status in (204, 304) or 100 <= self.status < 200 or request_method == "HEAD" ): length = 0 return length def read( self, amt: int | None = None, decode_content: bool | None = None, # ignored because browser decodes always cache_content: bool = False, ) -> bytes: if ( self._closed or self._response is None or (isinstance(self._response.body, IOBase) and self._response.body.closed) ): return b"" with self._error_catcher(): # body has been preloaded as a string by XmlHttpRequest if not isinstance(self._response.body, IOBase): self.length_remaining = len(self._response.body) self.length_is_certain = True # wrap body in IOStream self._response.body = BytesIO(self._response.body) if amt is not None and amt >= 0: # don't cache partial content cache_content = False data = self._response.body.read(amt) if self.length_remaining is not None: self.length_remaining = max(self.length_remaining - len(data), 0) if (self.length_is_certain and self.length_remaining == 0) or len( data ) < amt: # definitely finished reading, close response stream self._response.body.close() return typing.cast(bytes, data) else: # read all we can (and cache it) data = self._response.body.read() if cache_content: self._body = data if self.length_remaining is not None: self.length_remaining = max(self.length_remaining - len(data), 0) if len(data) == 0 or ( self.length_is_certain and self.length_remaining == 0 ): # definitely finished reading, close response stream self._response.body.close() return typing.cast(bytes, data) def read_chunked( self, amt: int | None = None, decode_content: bool | None = None, ) -> typing.Generator[bytes, None, None]: # chunked is handled by browser while True: bytes = self.read(amt, decode_content) if not bytes: break yield bytes def release_conn(self) -> None: if not self._pool or not self._connection: return None self._pool._put_conn(self._connection) self._connection = None def drain_conn(self) -> None: self.close() @property def data(self) -> bytes: if self._body: return self._body else: return self.read(cache_content=True) def json(self) -> typing.Any: """ Deserializes the body of the HTTP response as a Python object. The body of the HTTP response must be encoded using UTF-8, as per `RFC 8529 Section 8.1 <https://www.rfc-editor.org/rfc/rfc8259#section-8.1>`_. To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to your custom decoder instead. If the body of the HTTP response is not decodable to UTF-8, a `UnicodeDecodeError` will be raised. If the body of the HTTP response is not a valid JSON document, a `json.JSONDecodeError` will be raised. Read more :ref:`here <json_content>`. :returns: The body of the HTTP response as a Python object. """ data = self.data.decode("utf-8") return _json.loads(data) def close(self) -> None: if not self._closed: if isinstance(self._response.body, IOBase): self._response.body.close() if self._connection: self._connection.close() self._connection = None self._closed = True @contextmanager def _error_catcher(self) -> typing.Generator[None, None, None]: """ Catch Emscripten specific exceptions thrown by fetch.py, instead re-raising urllib3 variants, so that low-level exceptions are not leaked in the high-level api. On exit, release the connection back to the pool. """ from .fetch import _RequestError, _TimeoutError # avoid circular import clean_exit = False try: yield # If no exception is thrown, we should avoid cleaning up # unnecessarily. clean_exit = True except _TimeoutError as e: raise TimeoutError(str(e)) except _RequestError as e: raise HTTPException(str(e)) finally: # If we didn't terminate cleanly, we need to throw away our # connection. if not clean_exit: # The response may not be closed but we're not going to use it # anymore so close it now if ( isinstance(self._response.body, IOBase) and not self._response.body.closed ): self._response.body.close() # release the connection back to the pool self.release_conn() else: # If we have read everything from the response stream, # return the connection back to the pool. if ( isinstance(self._response.body, IOBase) and self._response.body.closed ): self.release_conn() connection.py 0000644 00000021063 15025362624 0007264 0 ustar 00 from __future__ import annotations import os import typing # use http.client.HTTPException for consistency with non-emscripten from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from ..._base_connection import _TYPE_BODY from ...connection import HTTPConnection, ProxyConfig, port_by_scheme from ...exceptions import TimeoutError from ...response import BaseHTTPResponse from ...util.connection import _TYPE_SOCKET_OPTIONS from ...util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT from ...util.url import Url from .fetch import _RequestError, _TimeoutError, send_request, send_streaming_request from .request import EmscriptenRequest from .response import EmscriptenHttpResponseWrapper, EmscriptenResponse if typing.TYPE_CHECKING: from ..._base_connection import BaseHTTPConnection, BaseHTTPSConnection class EmscriptenHTTPConnection: default_port: typing.ClassVar[int] = port_by_scheme["http"] default_socket_options: typing.ClassVar[_TYPE_SOCKET_OPTIONS] timeout: None | (float) host: str port: int blocksize: int source_address: tuple[str, int] | None socket_options: _TYPE_SOCKET_OPTIONS | None proxy: Url | None proxy_config: ProxyConfig | None is_verified: bool = False proxy_is_verified: bool | None = None _response: EmscriptenResponse | None def __init__( self, host: str, port: int = 0, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 8192, socket_options: _TYPE_SOCKET_OPTIONS | None = None, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, ) -> None: self.host = host self.port = port self.timeout = timeout if isinstance(timeout, float) else 0.0 self.scheme = "http" self._closed = True self._response = None # ignore these things because we don't # have control over that stuff self.proxy = None self.proxy_config = None self.blocksize = blocksize self.source_address = None self.socket_options = None self.is_verified = False def set_tunnel( self, host: str, port: int | None = 0, headers: typing.Mapping[str, str] | None = None, scheme: str = "http", ) -> None: pass def connect(self) -> None: pass def request( self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, # We know *at least* botocore is depending on the order of the # first 3 parameters so to be safe we only mark the later ones # as keyword-only to ensure we have space to extend. *, chunked: bool = False, preload_content: bool = True, decode_content: bool = True, enforce_content_length: bool = True, ) -> None: self._closed = False if url.startswith("/"): # no scheme / host / port included, make a full url url = f"{self.scheme}://{self.host}:{self.port}" + url request = EmscriptenRequest( url=url, method=method, timeout=self.timeout if self.timeout else 0, decode_content=decode_content, ) request.set_body(body) if headers: for k, v in headers.items(): request.set_header(k, v) self._response = None try: if not preload_content: self._response = send_streaming_request(request) if self._response is None: self._response = send_request(request) except _TimeoutError as e: raise TimeoutError(e.message) from e except _RequestError as e: raise HTTPException(e.message) from e def getresponse(self) -> BaseHTTPResponse: if self._response is not None: return EmscriptenHttpResponseWrapper( internal_response=self._response, url=self._response.request.url, connection=self, ) else: raise ResponseNotReady() def close(self) -> None: self._closed = True self._response = None @property def is_closed(self) -> bool: """Whether the connection either is brand new or has been previously closed. If this property is True then both ``is_connected`` and ``has_connected_to_proxy`` properties must be False. """ return self._closed @property def is_connected(self) -> bool: """Whether the connection is actively connected to any origin (proxy or target)""" return True @property def has_connected_to_proxy(self) -> bool: """Whether the connection has successfully connected to its proxy. This returns False if no proxy is in use. Used to determine whether errors are coming from the proxy layer or from tunnelling to the target origin. """ return False class EmscriptenHTTPSConnection(EmscriptenHTTPConnection): default_port = port_by_scheme["https"] # all this is basically ignored, as browser handles https cert_reqs: int | str | None = None ca_certs: str | None = None ca_cert_dir: str | None = None ca_cert_data: None | str | bytes = None cert_file: str | None key_file: str | None key_password: str | None ssl_context: typing.Any | None ssl_version: int | str | None = None ssl_minimum_version: int | None = None ssl_maximum_version: int | None = None assert_hostname: None | str | typing.Literal[False] assert_fingerprint: str | None = None def __init__( self, host: str, port: int = 0, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | _TYPE_SOCKET_OPTIONS = HTTPConnection.default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, cert_reqs: int | str | None = None, assert_hostname: None | str | typing.Literal[False] = None, assert_fingerprint: str | None = None, server_hostname: str | None = None, ssl_context: typing.Any | None = None, ca_certs: str | None = None, ca_cert_dir: str | None = None, ca_cert_data: None | str | bytes = None, ssl_minimum_version: int | None = None, ssl_maximum_version: int | None = None, ssl_version: int | str | None = None, # Deprecated cert_file: str | None = None, key_file: str | None = None, key_password: str | None = None, ) -> None: super().__init__( host, port=port, timeout=timeout, source_address=source_address, blocksize=blocksize, socket_options=socket_options, proxy=proxy, proxy_config=proxy_config, ) self.scheme = "https" self.key_file = key_file self.cert_file = cert_file self.key_password = key_password self.ssl_context = ssl_context self.server_hostname = server_hostname self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint self.ssl_version = ssl_version self.ssl_minimum_version = ssl_minimum_version self.ssl_maximum_version = ssl_maximum_version self.ca_certs = ca_certs and os.path.expanduser(ca_certs) self.ca_cert_dir = ca_cert_dir and os.path.expanduser(ca_cert_dir) self.ca_cert_data = ca_cert_data self.cert_reqs = None # The browser will automatically verify all requests. # We have no control over that setting. self.is_verified = True def set_cert( self, key_file: str | None = None, cert_file: str | None = None, cert_reqs: int | str | None = None, key_password: str | None = None, ca_certs: str | None = None, assert_hostname: None | str | typing.Literal[False] = None, assert_fingerprint: str | None = None, ca_cert_dir: str | None = None, ca_cert_data: None | str | bytes = None, ) -> None: pass # verify that this class implements BaseHTTP(s) connection correctly if typing.TYPE_CHECKING: _supports_http_protocol: BaseHTTPConnection = EmscriptenHTTPConnection("", 0) _supports_https_protocol: BaseHTTPSConnection = EmscriptenHTTPSConnection("", 0) fetch.py 0000644 00000033463 15025362624 0006225 0 ustar 00 """ Support for streaming http requests in emscripten. A few caveats - Firstly, you can't do streaming http in the main UI thread, because atomics.wait isn't allowed. Streaming only works if you're running pyodide in a web worker. Secondly, this uses an extra web worker and SharedArrayBuffer to do the asynchronous fetch operation, so it requires that you have crossOriginIsolation enabled, by serving over https (or from localhost) with the two headers below set: Cross-Origin-Opener-Policy: same-origin Cross-Origin-Embedder-Policy: require-corp You can tell if cross origin isolation is successfully enabled by looking at the global crossOriginIsolated variable in javascript console. If it isn't, streaming requests will fallback to XMLHttpRequest, i.e. getting the whole request into a buffer and then returning it. it shows a warning in the javascript console in this case. Finally, the webworker which does the streaming fetch is created on initial import, but will only be started once control is returned to javascript. Call `await wait_for_streaming_ready()` to wait for streaming fetch. NB: in this code, there are a lot of javascript objects. They are named js_* to make it clear what type of object they are. """ from __future__ import annotations import io import json from email.parser import Parser from importlib.resources import files from typing import TYPE_CHECKING, Any import js # type: ignore[import-not-found] from pyodide.ffi import ( # type: ignore[import-not-found] JsArray, JsException, JsProxy, to_js, ) if TYPE_CHECKING: from typing_extensions import Buffer from .request import EmscriptenRequest from .response import EmscriptenResponse """ There are some headers that trigger unintended CORS preflight requests. See also https://github.com/koenvo/pyodide-http/issues/22 """ HEADERS_TO_IGNORE = ("user-agent",) SUCCESS_HEADER = -1 SUCCESS_EOF = -2 ERROR_TIMEOUT = -3 ERROR_EXCEPTION = -4 _STREAMING_WORKER_CODE = ( files(__package__) .joinpath("emscripten_fetch_worker.js") .read_text(encoding="utf-8") ) class _RequestError(Exception): def __init__( self, message: str | None = None, *, request: EmscriptenRequest | None = None, response: EmscriptenResponse | None = None, ): self.request = request self.response = response self.message = message super().__init__(self.message) class _StreamingError(_RequestError): pass class _TimeoutError(_RequestError): pass def _obj_from_dict(dict_val: dict[str, Any]) -> JsProxy: return to_js(dict_val, dict_converter=js.Object.fromEntries) class _ReadStream(io.RawIOBase): def __init__( self, int_buffer: JsArray, byte_buffer: JsArray, timeout: float, worker: JsProxy, connection_id: int, request: EmscriptenRequest, ): self.int_buffer = int_buffer self.byte_buffer = byte_buffer self.read_pos = 0 self.read_len = 0 self.connection_id = connection_id self.worker = worker self.timeout = int(1000 * timeout) if timeout > 0 else None self.is_live = True self._is_closed = False self.request: EmscriptenRequest | None = request def __del__(self) -> None: self.close() # this is compatible with _base_connection def is_closed(self) -> bool: return self._is_closed # for compatibility with RawIOBase @property def closed(self) -> bool: return self.is_closed() def close(self) -> None: if not self.is_closed(): self.read_len = 0 self.read_pos = 0 self.int_buffer = None self.byte_buffer = None self._is_closed = True self.request = None if self.is_live: self.worker.postMessage(_obj_from_dict({"close": self.connection_id})) self.is_live = False super().close() def readable(self) -> bool: return True def writable(self) -> bool: return False def seekable(self) -> bool: return False def readinto(self, byte_obj: Buffer) -> int: if not self.int_buffer: raise _StreamingError( "No buffer for stream in _ReadStream.readinto", request=self.request, response=None, ) if self.read_len == 0: # wait for the worker to send something js.Atomics.store(self.int_buffer, 0, ERROR_TIMEOUT) self.worker.postMessage(_obj_from_dict({"getMore": self.connection_id})) if ( js.Atomics.wait(self.int_buffer, 0, ERROR_TIMEOUT, self.timeout) == "timed-out" ): raise _TimeoutError data_len = self.int_buffer[0] if data_len > 0: self.read_len = data_len self.read_pos = 0 elif data_len == ERROR_EXCEPTION: string_len = self.int_buffer[1] # decode the error string js_decoder = js.TextDecoder.new() json_str = js_decoder.decode(self.byte_buffer.slice(0, string_len)) raise _StreamingError( f"Exception thrown in fetch: {json_str}", request=self.request, response=None, ) else: # EOF, free the buffers and return zero # and free the request self.is_live = False self.close() return 0 # copy from int32array to python bytes ret_length = min(self.read_len, len(memoryview(byte_obj))) subarray = self.byte_buffer.subarray( self.read_pos, self.read_pos + ret_length ).to_py() memoryview(byte_obj)[0:ret_length] = subarray self.read_len -= ret_length self.read_pos += ret_length return ret_length class _StreamingFetcher: def __init__(self) -> None: # make web-worker and data buffer on startup self.streaming_ready = False js_data_blob = js.Blob.new( [_STREAMING_WORKER_CODE], _obj_from_dict({"type": "application/javascript"}) ) def promise_resolver(js_resolve_fn: JsProxy, js_reject_fn: JsProxy) -> None: def onMsg(e: JsProxy) -> None: self.streaming_ready = True js_resolve_fn(e) def onErr(e: JsProxy) -> None: js_reject_fn(e) # Defensive: never happens in ci self.js_worker.onmessage = onMsg self.js_worker.onerror = onErr js_data_url = js.URL.createObjectURL(js_data_blob) self.js_worker = js.globalThis.Worker.new(js_data_url) self.js_worker_ready_promise = js.globalThis.Promise.new(promise_resolver) def send(self, request: EmscriptenRequest) -> EmscriptenResponse: headers = { k: v for k, v in request.headers.items() if k not in HEADERS_TO_IGNORE } body = request.body fetch_data = {"headers": headers, "body": to_js(body), "method": request.method} # start the request off in the worker timeout = int(1000 * request.timeout) if request.timeout > 0 else None js_shared_buffer = js.SharedArrayBuffer.new(1048576) js_int_buffer = js.Int32Array.new(js_shared_buffer) js_byte_buffer = js.Uint8Array.new(js_shared_buffer, 8) js.Atomics.store(js_int_buffer, 0, ERROR_TIMEOUT) js.Atomics.notify(js_int_buffer, 0) js_absolute_url = js.URL.new(request.url, js.location).href self.js_worker.postMessage( _obj_from_dict( { "buffer": js_shared_buffer, "url": js_absolute_url, "fetchParams": fetch_data, } ) ) # wait for the worker to send something js.Atomics.wait(js_int_buffer, 0, ERROR_TIMEOUT, timeout) if js_int_buffer[0] == ERROR_TIMEOUT: raise _TimeoutError( "Timeout connecting to streaming request", request=request, response=None, ) elif js_int_buffer[0] == SUCCESS_HEADER: # got response # header length is in second int of intBuffer string_len = js_int_buffer[1] # decode the rest to a JSON string js_decoder = js.TextDecoder.new() # this does a copy (the slice) because decode can't work on shared array # for some silly reason json_str = js_decoder.decode(js_byte_buffer.slice(0, string_len)) # get it as an object response_obj = json.loads(json_str) return EmscriptenResponse( request=request, status_code=response_obj["status"], headers=response_obj["headers"], body=_ReadStream( js_int_buffer, js_byte_buffer, request.timeout, self.js_worker, response_obj["connectionID"], request, ), ) elif js_int_buffer[0] == ERROR_EXCEPTION: string_len = js_int_buffer[1] # decode the error string js_decoder = js.TextDecoder.new() json_str = js_decoder.decode(js_byte_buffer.slice(0, string_len)) raise _StreamingError( f"Exception thrown in fetch: {json_str}", request=request, response=None ) else: raise _StreamingError( f"Unknown status from worker in fetch: {js_int_buffer[0]}", request=request, response=None, ) # check if we are in a worker or not def is_in_browser_main_thread() -> bool: return hasattr(js, "window") and hasattr(js, "self") and js.self == js.window def is_cross_origin_isolated() -> bool: return hasattr(js, "crossOriginIsolated") and js.crossOriginIsolated def is_in_node() -> bool: return ( hasattr(js, "process") and hasattr(js.process, "release") and hasattr(js.process.release, "name") and js.process.release.name == "node" ) def is_worker_available() -> bool: return hasattr(js, "Worker") and hasattr(js, "Blob") _fetcher: _StreamingFetcher | None = None if is_worker_available() and ( (is_cross_origin_isolated() and not is_in_browser_main_thread()) and (not is_in_node()) ): _fetcher = _StreamingFetcher() else: _fetcher = None def send_streaming_request(request: EmscriptenRequest) -> EmscriptenResponse | None: if _fetcher and streaming_ready(): return _fetcher.send(request) else: _show_streaming_warning() return None _SHOWN_TIMEOUT_WARNING = False def _show_timeout_warning() -> None: global _SHOWN_TIMEOUT_WARNING if not _SHOWN_TIMEOUT_WARNING: _SHOWN_TIMEOUT_WARNING = True message = "Warning: Timeout is not available on main browser thread" js.console.warn(message) _SHOWN_STREAMING_WARNING = False def _show_streaming_warning() -> None: global _SHOWN_STREAMING_WARNING if not _SHOWN_STREAMING_WARNING: _SHOWN_STREAMING_WARNING = True message = "Can't stream HTTP requests because: \n" if not is_cross_origin_isolated(): message += " Page is not cross-origin isolated\n" if is_in_browser_main_thread(): message += " Python is running in main browser thread\n" if not is_worker_available(): message += " Worker or Blob classes are not available in this environment." # Defensive: this is always False in browsers that we test in if streaming_ready() is False: message += """ Streaming fetch worker isn't ready. If you want to be sure that streaming fetch is working, you need to call: 'await urllib3.contrib.emscripten.fetch.wait_for_streaming_ready()`""" from js import console console.warn(message) def send_request(request: EmscriptenRequest) -> EmscriptenResponse: try: js_xhr = js.XMLHttpRequest.new() if not is_in_browser_main_thread(): js_xhr.responseType = "arraybuffer" if request.timeout: js_xhr.timeout = int(request.timeout * 1000) else: js_xhr.overrideMimeType("text/plain; charset=ISO-8859-15") if request.timeout: # timeout isn't available on the main thread - show a warning in console # if it is set _show_timeout_warning() js_xhr.open(request.method, request.url, False) for name, value in request.headers.items(): if name.lower() not in HEADERS_TO_IGNORE: js_xhr.setRequestHeader(name, value) js_xhr.send(to_js(request.body)) headers = dict(Parser().parsestr(js_xhr.getAllResponseHeaders())) if not is_in_browser_main_thread(): body = js_xhr.response.to_py().tobytes() else: body = js_xhr.response.encode("ISO-8859-15") return EmscriptenResponse( status_code=js_xhr.status, headers=headers, body=body, request=request ) except JsException as err: if err.name == "TimeoutError": raise _TimeoutError(err.message, request=request) elif err.name == "NetworkError": raise _RequestError(err.message, request=request) else: # general http error raise _RequestError(err.message, request=request) def streaming_ready() -> bool | None: if _fetcher: return _fetcher.streaming_ready else: return None # no fetcher, return None to signify that async def wait_for_streaming_ready() -> bool: if _fetcher: await _fetcher.js_worker_ready_promise return True else: return False request.py 0000644 00000001066 15025362624 0006616 0 ustar 00 from __future__ import annotations from dataclasses import dataclass, field from ..._base_connection import _TYPE_BODY @dataclass class EmscriptenRequest: method: str url: str params: dict[str, str] | None = None body: _TYPE_BODY | None = None headers: dict[str, str] = field(default_factory=dict) timeout: float = 0 decode_content: bool = True def set_header(self, name: str, value: str) -> None: self.headers[name.capitalize()] = value def set_body(self, body: _TYPE_BODY | None) -> None: self.body = body emscripten_fetch_worker.js 0000644 00000007107 15025362624 0012027 0 ustar 00 let Status = { SUCCESS_HEADER: -1, SUCCESS_EOF: -2, ERROR_TIMEOUT: -3, ERROR_EXCEPTION: -4, }; let connections = {}; let nextConnectionID = 1; const encoder = new TextEncoder(); self.addEventListener("message", async function (event) { if (event.data.close) { let connectionID = event.data.close; delete connections[connectionID]; return; } else if (event.data.getMore) { let connectionID = event.data.getMore; let { curOffset, value, reader, intBuffer, byteBuffer } = connections[connectionID]; // if we still have some in buffer, then just send it back straight away if (!value || curOffset >= value.length) { // read another buffer if required try { let readResponse = await reader.read(); if (readResponse.done) { // read everything - clear connection and return delete connections[connectionID]; Atomics.store(intBuffer, 0, Status.SUCCESS_EOF); Atomics.notify(intBuffer, 0); // finished reading successfully // return from event handler return; } curOffset = 0; connections[connectionID].value = readResponse.value; value = readResponse.value; } catch (error) { console.log("Request exception:", error); let errorBytes = encoder.encode(error.message); let written = errorBytes.length; byteBuffer.set(errorBytes); intBuffer[1] = written; Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION); Atomics.notify(intBuffer, 0); } } // send as much buffer as we can let curLen = value.length - curOffset; if (curLen > byteBuffer.length) { curLen = byteBuffer.length; } byteBuffer.set(value.subarray(curOffset, curOffset + curLen), 0); Atomics.store(intBuffer, 0, curLen); // store current length in bytes Atomics.notify(intBuffer, 0); curOffset += curLen; connections[connectionID].curOffset = curOffset; return; } else { // start fetch let connectionID = nextConnectionID; nextConnectionID += 1; const intBuffer = new Int32Array(event.data.buffer); const byteBuffer = new Uint8Array(event.data.buffer, 8); try { const response = await fetch(event.data.url, event.data.fetchParams); // return the headers first via textencoder var headers = []; for (const pair of response.headers.entries()) { headers.push([pair[0], pair[1]]); } let headerObj = { headers: headers, status: response.status, connectionID, }; const headerText = JSON.stringify(headerObj); let headerBytes = encoder.encode(headerText); let written = headerBytes.length; byteBuffer.set(headerBytes); intBuffer[1] = written; // make a connection connections[connectionID] = { reader: response.body.getReader(), intBuffer: intBuffer, byteBuffer: byteBuffer, value: undefined, curOffset: 0, }; // set header ready Atomics.store(intBuffer, 0, Status.SUCCESS_HEADER); Atomics.notify(intBuffer, 0); // all fetching after this goes through a new postmessage call with getMore // this allows for parallel requests } catch (error) { console.log("Request exception:", error); let errorBytes = encoder.encode(error.message); let written = errorBytes.length; byteBuffer.set(errorBytes); intBuffer[1] = written; Atomics.store(intBuffer, 0, Status.ERROR_EXCEPTION); Atomics.notify(intBuffer, 0); } } }); self.postMessage({ inited: true });
| ver. 1.4 |
Github
|
.
| PHP 8.2.28 | Generation time: 0.01 |
proxy
|
phpinfo
|
Settings