Code import
This commit is contained in:
60
venv/lib/python2.7/site-packages/eventlet/__init__.py
Normal file
60
venv/lib/python2.7/site-packages/eventlet/__init__.py
Normal file
@@ -0,0 +1,60 @@
|
||||
import os
|
||||
|
||||
|
||||
version_info = (0, 21, 0)
|
||||
__version__ = '.'.join(map(str, version_info))
|
||||
# This is to make Debian packaging easier, it ignores import
|
||||
# errors of greenlet so that the packager can still at least
|
||||
# access the version. Also this makes easy_install a little quieter
|
||||
if os.environ.get('EVENTLET_IMPORT_VERSION_ONLY') != '1':
|
||||
from eventlet import convenience
|
||||
from eventlet import event
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
from eventlet import patcher
|
||||
from eventlet import queue
|
||||
from eventlet import semaphore
|
||||
from eventlet import support
|
||||
from eventlet import timeout
|
||||
import greenlet
|
||||
|
||||
connect = convenience.connect
|
||||
listen = convenience.listen
|
||||
serve = convenience.serve
|
||||
StopServe = convenience.StopServe
|
||||
wrap_ssl = convenience.wrap_ssl
|
||||
|
||||
Event = event.Event
|
||||
|
||||
GreenPool = greenpool.GreenPool
|
||||
GreenPile = greenpool.GreenPile
|
||||
|
||||
sleep = greenthread.sleep
|
||||
spawn = greenthread.spawn
|
||||
spawn_n = greenthread.spawn_n
|
||||
spawn_after = greenthread.spawn_after
|
||||
kill = greenthread.kill
|
||||
|
||||
import_patched = patcher.import_patched
|
||||
monkey_patch = patcher.monkey_patch
|
||||
|
||||
Queue = queue.Queue
|
||||
|
||||
Semaphore = semaphore.Semaphore
|
||||
CappedSemaphore = semaphore.CappedSemaphore
|
||||
BoundedSemaphore = semaphore.BoundedSemaphore
|
||||
|
||||
Timeout = timeout.Timeout
|
||||
with_timeout = timeout.with_timeout
|
||||
wrap_is_timeout = timeout.wrap_is_timeout
|
||||
is_timeout = timeout.is_timeout
|
||||
|
||||
getcurrent = greenlet.greenlet.getcurrent
|
||||
|
||||
# deprecated
|
||||
TimeoutError, exc_after, call_after_global = (
|
||||
support.wrap_deprecated(old, new)(fun) for old, new, fun in (
|
||||
('TimeoutError', 'Timeout', Timeout),
|
||||
('exc_after', 'greenthread.exc_after', greenthread.exc_after),
|
||||
('call_after_global', 'greenthread.call_after_global', greenthread.call_after_global),
|
||||
))
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/__init__.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/__init__.pyc
Normal file
Binary file not shown.
136
venv/lib/python2.7/site-packages/eventlet/backdoor.py
Normal file
136
venv/lib/python2.7/site-packages/eventlet/backdoor.py
Normal file
@@ -0,0 +1,136 @@
|
||||
from __future__ import print_function
|
||||
|
||||
from code import InteractiveConsole
|
||||
import errno
|
||||
import socket
|
||||
import sys
|
||||
import errno
|
||||
import traceback
|
||||
|
||||
import eventlet
|
||||
from eventlet import hubs
|
||||
from eventlet.support import greenlets, get_errno
|
||||
|
||||
try:
|
||||
sys.ps1
|
||||
except AttributeError:
|
||||
sys.ps1 = '>>> '
|
||||
try:
|
||||
sys.ps2
|
||||
except AttributeError:
|
||||
sys.ps2 = '... '
|
||||
|
||||
|
||||
class FileProxy(object):
|
||||
def __init__(self, f):
|
||||
self.f = f
|
||||
|
||||
def isatty(self):
|
||||
return True
|
||||
|
||||
def flush(self):
|
||||
pass
|
||||
|
||||
def write(self, data, *a, **kw):
|
||||
self.f.write(data, *a, **kw)
|
||||
self.f.flush()
|
||||
|
||||
def readline(self, *a):
|
||||
return self.f.readline(*a).replace('\r\n', '\n')
|
||||
|
||||
def __getattr__(self, attr):
|
||||
return getattr(self.f, attr)
|
||||
|
||||
|
||||
# @@tavis: the `locals` args below mask the built-in function. Should
|
||||
# be renamed.
|
||||
class SocketConsole(greenlets.greenlet):
|
||||
def __init__(self, desc, hostport, locals):
|
||||
self.hostport = hostport
|
||||
self.locals = locals
|
||||
# mangle the socket
|
||||
self.desc = FileProxy(desc)
|
||||
greenlets.greenlet.__init__(self)
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
console = InteractiveConsole(self.locals)
|
||||
console.interact()
|
||||
finally:
|
||||
self.switch_out()
|
||||
self.finalize()
|
||||
|
||||
def switch(self, *args, **kw):
|
||||
self.saved = sys.stdin, sys.stderr, sys.stdout
|
||||
sys.stdin = sys.stdout = sys.stderr = self.desc
|
||||
greenlets.greenlet.switch(self, *args, **kw)
|
||||
|
||||
def switch_out(self):
|
||||
sys.stdin, sys.stderr, sys.stdout = self.saved
|
||||
|
||||
def finalize(self):
|
||||
# restore the state of the socket
|
||||
self.desc = None
|
||||
if len(self.hostport) >= 2:
|
||||
host = self.hostport[0]
|
||||
port = self.hostport[1]
|
||||
print("backdoor closed to %s:%s" % (host, port,))
|
||||
else:
|
||||
print('backdoor closed')
|
||||
|
||||
|
||||
def backdoor_server(sock, locals=None):
|
||||
""" Blocking function that runs a backdoor server on the socket *sock*,
|
||||
accepting connections and running backdoor consoles for each client that
|
||||
connects.
|
||||
|
||||
The *locals* argument is a dictionary that will be included in the locals()
|
||||
of the interpreters. It can be convenient to stick important application
|
||||
variables in here.
|
||||
"""
|
||||
listening_on = sock.getsockname()
|
||||
if sock.family == socket.AF_INET:
|
||||
# Expand result to IP + port
|
||||
listening_on = '%s:%s' % listening_on
|
||||
elif sock.family == socket.AF_INET6:
|
||||
ip, port, _, _ = listening_on
|
||||
listening_on = '%s:%s' % (ip, port,)
|
||||
# No action needed if sock.family == socket.AF_UNIX
|
||||
|
||||
print("backdoor server listening on %s" % (listening_on,))
|
||||
try:
|
||||
try:
|
||||
while True:
|
||||
socketpair = sock.accept()
|
||||
backdoor(socketpair, locals)
|
||||
except socket.error as e:
|
||||
# Broken pipe means it was shutdown
|
||||
if get_errno(e) != errno.EPIPE:
|
||||
raise
|
||||
finally:
|
||||
sock.close()
|
||||
|
||||
|
||||
def backdoor(conn_info, locals=None):
|
||||
"""Sets up an interactive console on a socket with a single connected
|
||||
client. This does not block the caller, as it spawns a new greenlet to
|
||||
handle the console. This is meant to be called from within an accept loop
|
||||
(such as backdoor_server).
|
||||
"""
|
||||
conn, addr = conn_info
|
||||
if conn.family == socket.AF_INET:
|
||||
host, port = addr
|
||||
print("backdoor to %s:%s" % (host, port))
|
||||
elif conn.family == socket.AF_INET6:
|
||||
host, port, _, _ = addr
|
||||
print("backdoor to %s:%s" % (host, port))
|
||||
else:
|
||||
print('backdoor opened')
|
||||
fl = conn.makefile("rw")
|
||||
console = SocketConsole(fl, addr, locals)
|
||||
hub = hubs.get_hub()
|
||||
hub.schedule_call_global(0, console.switch)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
backdoor_server(eventlet.listen(('127.0.0.1', 9000)), {})
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/backdoor.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/backdoor.pyc
Normal file
Binary file not shown.
157
venv/lib/python2.7/site-packages/eventlet/convenience.py
Normal file
157
venv/lib/python2.7/site-packages/eventlet/convenience.py
Normal file
@@ -0,0 +1,157 @@
|
||||
import sys
|
||||
|
||||
from eventlet import greenio
|
||||
from eventlet import greenpool
|
||||
from eventlet import greenthread
|
||||
from eventlet.green import socket
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
|
||||
def connect(addr, family=socket.AF_INET, bind=None):
|
||||
"""Convenience function for opening client sockets.
|
||||
|
||||
:param addr: Address of the server to connect to. For TCP sockets, this is a (host, port) tuple.
|
||||
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
|
||||
:param bind: Local address to bind to, optional.
|
||||
:return: The connected green socket object.
|
||||
"""
|
||||
sock = socket.socket(family, socket.SOCK_STREAM)
|
||||
if bind is not None:
|
||||
sock.bind(bind)
|
||||
sock.connect(addr)
|
||||
return sock
|
||||
|
||||
|
||||
def listen(addr, family=socket.AF_INET, backlog=50):
|
||||
"""Convenience function for opening server sockets. This
|
||||
socket can be used in :func:`~eventlet.serve` or a custom ``accept()`` loop.
|
||||
|
||||
Sets SO_REUSEADDR on the socket to save on annoyance.
|
||||
|
||||
:param addr: Address to listen on. For TCP sockets, this is a (host, port) tuple.
|
||||
:param family: Socket family, optional. See :mod:`socket` documentation for available families.
|
||||
:param backlog:
|
||||
|
||||
The maximum number of queued connections. Should be at least 1; the maximum
|
||||
value is system-dependent.
|
||||
|
||||
:return: The listening green socket object.
|
||||
"""
|
||||
sock = socket.socket(family, socket.SOCK_STREAM)
|
||||
if sys.platform[:3] != "win":
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
if hasattr(socket, 'SO_REUSEPORT'):
|
||||
# NOTE(zhengwei): linux kernel >= 3.9
|
||||
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
|
||||
sock.bind(addr)
|
||||
sock.listen(backlog)
|
||||
return sock
|
||||
|
||||
|
||||
class StopServe(Exception):
|
||||
"""Exception class used for quitting :func:`~eventlet.serve` gracefully."""
|
||||
pass
|
||||
|
||||
|
||||
def _stop_checker(t, server_gt, conn):
|
||||
try:
|
||||
try:
|
||||
t.wait()
|
||||
finally:
|
||||
conn.close()
|
||||
except greenlet.GreenletExit:
|
||||
pass
|
||||
except Exception:
|
||||
greenthread.kill(server_gt, *sys.exc_info())
|
||||
|
||||
|
||||
def serve(sock, handle, concurrency=1000):
|
||||
"""Runs a server on the supplied socket. Calls the function *handle* in a
|
||||
separate greenthread for every incoming client connection. *handle* takes
|
||||
two arguments: the client socket object, and the client address::
|
||||
|
||||
def myhandle(client_sock, client_addr):
|
||||
print("client connected", client_addr)
|
||||
|
||||
eventlet.serve(eventlet.listen(('127.0.0.1', 9999)), myhandle)
|
||||
|
||||
Returning from *handle* closes the client socket.
|
||||
|
||||
:func:`serve` blocks the calling greenthread; it won't return until
|
||||
the server completes. If you desire an immediate return,
|
||||
spawn a new greenthread for :func:`serve`.
|
||||
|
||||
Any uncaught exceptions raised in *handle* are raised as exceptions
|
||||
from :func:`serve`, terminating the server, so be sure to be aware of the
|
||||
exceptions your application can raise. The return value of *handle* is
|
||||
ignored.
|
||||
|
||||
Raise a :class:`~eventlet.StopServe` exception to gracefully terminate the
|
||||
server -- that's the only way to get the server() function to return rather
|
||||
than raise.
|
||||
|
||||
The value in *concurrency* controls the maximum number of
|
||||
greenthreads that will be open at any time handling requests. When
|
||||
the server hits the concurrency limit, it stops accepting new
|
||||
connections until the existing ones complete.
|
||||
"""
|
||||
pool = greenpool.GreenPool(concurrency)
|
||||
server_gt = greenthread.getcurrent()
|
||||
|
||||
while True:
|
||||
try:
|
||||
conn, addr = sock.accept()
|
||||
gt = pool.spawn(handle, conn, addr)
|
||||
gt.link(_stop_checker, server_gt, conn)
|
||||
conn, addr, gt = None, None, None
|
||||
except StopServe:
|
||||
return
|
||||
|
||||
|
||||
def wrap_ssl(sock, *a, **kw):
|
||||
"""Convenience function for converting a regular socket into an
|
||||
SSL socket. Has the same interface as :func:`ssl.wrap_socket`,
|
||||
but can also use PyOpenSSL. Though, note that it ignores the
|
||||
`cert_reqs`, `ssl_version`, `ca_certs`, `do_handshake_on_connect`,
|
||||
and `suppress_ragged_eofs` arguments when using PyOpenSSL.
|
||||
|
||||
The preferred idiom is to call wrap_ssl directly on the creation
|
||||
method, e.g., ``wrap_ssl(connect(addr))`` or
|
||||
``wrap_ssl(listen(addr), server_side=True)``. This way there is
|
||||
no "naked" socket sitting around to accidentally corrupt the SSL
|
||||
session.
|
||||
|
||||
:return Green SSL object.
|
||||
"""
|
||||
return wrap_ssl_impl(sock, *a, **kw)
|
||||
|
||||
try:
|
||||
from eventlet.green import ssl
|
||||
wrap_ssl_impl = ssl.wrap_socket
|
||||
except ImportError:
|
||||
# trying PyOpenSSL
|
||||
try:
|
||||
from eventlet.green.OpenSSL import SSL
|
||||
except ImportError:
|
||||
def wrap_ssl_impl(*a, **kw):
|
||||
raise ImportError(
|
||||
"To use SSL with Eventlet, you must install PyOpenSSL or use Python 2.6 or later.")
|
||||
else:
|
||||
def wrap_ssl_impl(sock, keyfile=None, certfile=None, server_side=False,
|
||||
cert_reqs=None, ssl_version=None, ca_certs=None,
|
||||
do_handshake_on_connect=True,
|
||||
suppress_ragged_eofs=True, ciphers=None):
|
||||
# theoretically the ssl_version could be respected in this line
|
||||
context = SSL.Context(SSL.SSLv23_METHOD)
|
||||
if certfile is not None:
|
||||
context.use_certificate_file(certfile)
|
||||
if keyfile is not None:
|
||||
context.use_privatekey_file(keyfile)
|
||||
context.set_verify(SSL.VERIFY_NONE, lambda *x: True)
|
||||
|
||||
connection = SSL.Connection(context, sock)
|
||||
if server_side:
|
||||
connection.set_accept_state()
|
||||
else:
|
||||
connection.set_connect_state()
|
||||
return connection
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/convenience.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/convenience.pyc
Normal file
Binary file not shown.
53
venv/lib/python2.7/site-packages/eventlet/corolocal.py
Normal file
53
venv/lib/python2.7/site-packages/eventlet/corolocal.py
Normal file
@@ -0,0 +1,53 @@
|
||||
import weakref
|
||||
|
||||
from eventlet import greenthread
|
||||
|
||||
__all__ = ['get_ident', 'local']
|
||||
|
||||
|
||||
def get_ident():
|
||||
""" Returns ``id()`` of current greenlet. Useful for debugging."""
|
||||
return id(greenthread.getcurrent())
|
||||
|
||||
|
||||
# the entire purpose of this class is to store off the constructor
|
||||
# arguments in a local variable without calling __init__ directly
|
||||
class _localbase(object):
|
||||
__slots__ = '_local__args', '_local__greens'
|
||||
|
||||
def __new__(cls, *args, **kw):
|
||||
self = object.__new__(cls)
|
||||
object.__setattr__(self, '_local__args', (args, kw))
|
||||
object.__setattr__(self, '_local__greens', weakref.WeakKeyDictionary())
|
||||
if (args or kw) and (cls.__init__ is object.__init__):
|
||||
raise TypeError("Initialization arguments are not supported")
|
||||
return self
|
||||
|
||||
|
||||
def _patch(thrl):
|
||||
greens = object.__getattribute__(thrl, '_local__greens')
|
||||
# until we can store the localdict on greenlets themselves,
|
||||
# we store it in _local__greens on the local object
|
||||
cur = greenthread.getcurrent()
|
||||
if cur not in greens:
|
||||
# must be the first time we've seen this greenlet, call __init__
|
||||
greens[cur] = {}
|
||||
cls = type(thrl)
|
||||
if cls.__init__ is not object.__init__:
|
||||
args, kw = object.__getattribute__(thrl, '_local__args')
|
||||
thrl.__init__(*args, **kw)
|
||||
object.__setattr__(thrl, '__dict__', greens[cur])
|
||||
|
||||
|
||||
class local(_localbase):
|
||||
def __getattribute__(self, attr):
|
||||
_patch(self)
|
||||
return object.__getattribute__(self, attr)
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
_patch(self)
|
||||
return object.__setattr__(self, attr, value)
|
||||
|
||||
def __delattr__(self, attr):
|
||||
_patch(self)
|
||||
return object.__delattr__(self, attr)
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/corolocal.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/corolocal.pyc
Normal file
Binary file not shown.
61
venv/lib/python2.7/site-packages/eventlet/coros.py
Normal file
61
venv/lib/python2.7/site-packages/eventlet/coros.py
Normal file
@@ -0,0 +1,61 @@
|
||||
from __future__ import print_function
|
||||
|
||||
from eventlet import event as _event
|
||||
|
||||
|
||||
class metaphore(object):
|
||||
"""This is sort of an inverse semaphore: a counter that starts at 0 and
|
||||
waits only if nonzero. It's used to implement a "wait for all" scenario.
|
||||
|
||||
>>> from eventlet import coros, spawn_n
|
||||
>>> count = coros.metaphore()
|
||||
>>> count.wait()
|
||||
>>> def decrementer(count, id):
|
||||
... print("{0} decrementing".format(id))
|
||||
... count.dec()
|
||||
...
|
||||
>>> _ = spawn_n(decrementer, count, 'A')
|
||||
>>> _ = spawn_n(decrementer, count, 'B')
|
||||
>>> count.inc(2)
|
||||
>>> count.wait()
|
||||
A decrementing
|
||||
B decrementing
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
self.event = _event.Event()
|
||||
# send() right away, else we'd wait on the default 0 count!
|
||||
self.event.send()
|
||||
|
||||
def inc(self, by=1):
|
||||
"""Increment our counter. If this transitions the counter from zero to
|
||||
nonzero, make any subsequent :meth:`wait` call wait.
|
||||
"""
|
||||
assert by > 0
|
||||
self.counter += by
|
||||
if self.counter == by:
|
||||
# If we just incremented self.counter by 'by', and the new count
|
||||
# equals 'by', then the old value of self.counter was 0.
|
||||
# Transitioning from 0 to a nonzero value means wait() must
|
||||
# actually wait.
|
||||
self.event.reset()
|
||||
|
||||
def dec(self, by=1):
|
||||
"""Decrement our counter. If this transitions the counter from nonzero
|
||||
to zero, a current or subsequent wait() call need no longer wait.
|
||||
"""
|
||||
assert by > 0
|
||||
self.counter -= by
|
||||
if self.counter <= 0:
|
||||
# Don't leave self.counter < 0, that will screw things up in
|
||||
# future calls.
|
||||
self.counter = 0
|
||||
# Transitioning from nonzero to 0 means wait() need no longer wait.
|
||||
self.event.send()
|
||||
|
||||
def wait(self):
|
||||
"""Suspend the caller only if our count is nonzero. In that case,
|
||||
resume the caller once the count decrements to zero again.
|
||||
"""
|
||||
self.event.wait()
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/coros.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/coros.pyc
Normal file
Binary file not shown.
602
venv/lib/python2.7/site-packages/eventlet/dagpool.py
Normal file
602
venv/lib/python2.7/site-packages/eventlet/dagpool.py
Normal file
@@ -0,0 +1,602 @@
|
||||
# @file dagpool.py
|
||||
# @author Nat Goodspeed
|
||||
# @date 2016-08-08
|
||||
# @brief Provide DAGPool class
|
||||
|
||||
from eventlet.event import Event
|
||||
from eventlet import greenthread
|
||||
from eventlet.support import six
|
||||
import collections
|
||||
|
||||
|
||||
# value distinguished from any other Python value including None
|
||||
_MISSING = object()
|
||||
|
||||
|
||||
class Collision(Exception):
|
||||
"""
|
||||
DAGPool raises Collision when you try to launch two greenthreads with the
|
||||
same key, or post() a result for a key corresponding to a greenthread, or
|
||||
post() twice for the same key. As with KeyError, str(collision) names the
|
||||
key in question.
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
class PropagateError(Exception):
|
||||
"""
|
||||
When a DAGPool greenthread terminates with an exception instead of
|
||||
returning a result, attempting to retrieve its value raises
|
||||
PropagateError.
|
||||
|
||||
Attributes:
|
||||
|
||||
key
|
||||
the key of the greenthread which raised the exception
|
||||
|
||||
exc
|
||||
the exception object raised by the greenthread
|
||||
"""
|
||||
def __init__(self, key, exc):
|
||||
# initialize base class with a reasonable string message
|
||||
msg = "PropagateError({0}): {1}: {2}" \
|
||||
.format(key, exc.__class__.__name__, exc)
|
||||
super(PropagateError, self).__init__(msg)
|
||||
self.msg = msg
|
||||
# Unless we set args, this is unpickleable:
|
||||
# https://bugs.python.org/issue1692335
|
||||
self.args = (key, exc)
|
||||
self.key = key
|
||||
self.exc = exc
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
|
||||
class DAGPool(object):
|
||||
"""
|
||||
A DAGPool is a pool that constrains greenthreads, not by max concurrency,
|
||||
but by data dependencies.
|
||||
|
||||
This is a way to implement general DAG dependencies. A simple dependency
|
||||
tree (flowing in either direction) can straightforwardly be implemented
|
||||
using recursion and (e.g.)
|
||||
:meth:`GreenThread.imap() <eventlet.greenthread.GreenThread.imap>`.
|
||||
What gets complicated is when a given node depends on several other nodes
|
||||
as well as contributing to several other nodes.
|
||||
|
||||
With DAGPool, you concurrently launch all applicable greenthreads; each
|
||||
will proceed as soon as it has all required inputs. The DAG is implicit in
|
||||
which items are required by each greenthread.
|
||||
|
||||
Each greenthread is launched in a DAGPool with a key: any value that can
|
||||
serve as a Python dict key. The caller also specifies an iterable of other
|
||||
keys on which this greenthread depends. This iterable may be empty.
|
||||
|
||||
The greenthread callable must accept (key, results), where:
|
||||
|
||||
key
|
||||
is its own key
|
||||
|
||||
results
|
||||
is an iterable of (key, value) pairs.
|
||||
|
||||
A newly-launched DAGPool greenthread is entered immediately, and can
|
||||
perform any necessary setup work. At some point it will iterate over the
|
||||
(key, value) pairs from the passed 'results' iterable. Doing so blocks the
|
||||
greenthread until a value is available for each of the keys specified in
|
||||
its initial dependencies iterable. These (key, value) pairs are delivered
|
||||
in chronological order, *not* the order in which they are initially
|
||||
specified: each value will be delivered as soon as it becomes available.
|
||||
|
||||
The value returned by a DAGPool greenthread becomes the value for its
|
||||
key, which unblocks any other greenthreads waiting on that key.
|
||||
|
||||
If a DAGPool greenthread terminates with an exception instead of returning
|
||||
a value, attempting to retrieve the value raises :class:`PropagateError`,
|
||||
which binds the key of the original greenthread and the original
|
||||
exception. Unless the greenthread attempting to retrieve the value handles
|
||||
PropagateError, that exception will in turn be wrapped in a PropagateError
|
||||
of its own, and so forth. The code that ultimately handles PropagateError
|
||||
can follow the chain of PropagateError.exc attributes to discover the flow
|
||||
of that exception through the DAG of greenthreads.
|
||||
|
||||
External greenthreads may also interact with a DAGPool. See :meth:`wait_each`,
|
||||
:meth:`waitall`, :meth:`post`.
|
||||
|
||||
It is not recommended to constrain external DAGPool producer greenthreads
|
||||
in a :class:`GreenPool <eventlet.greenpool.GreenPool>`: it may be hard to
|
||||
provably avoid deadlock.
|
||||
|
||||
.. automethod:: __init__
|
||||
.. automethod:: __getitem__
|
||||
"""
|
||||
|
||||
_Coro = collections.namedtuple("_Coro", ("greenthread", "pending"))
|
||||
|
||||
def __init__(self, preload={}):
|
||||
"""
|
||||
DAGPool can be prepopulated with an initial dict or iterable of (key,
|
||||
value) pairs. These (key, value) pairs are of course immediately
|
||||
available for any greenthread that depends on any of those keys.
|
||||
"""
|
||||
try:
|
||||
# If a dict is passed, copy it. Don't risk a subsequent
|
||||
# modification to passed dict affecting our internal state.
|
||||
iteritems = six.iteritems(preload)
|
||||
except AttributeError:
|
||||
# Not a dict, just an iterable of (key, value) pairs
|
||||
iteritems = preload
|
||||
|
||||
# Load the initial dict
|
||||
self.values = dict(iteritems)
|
||||
|
||||
# track greenthreads
|
||||
self.coros = {}
|
||||
|
||||
# The key to blocking greenthreads is the Event.
|
||||
self.event = Event()
|
||||
|
||||
def waitall(self):
|
||||
"""
|
||||
waitall() blocks the calling greenthread until there is a value for
|
||||
every DAGPool greenthread launched by :meth:`spawn`. It returns a dict
|
||||
containing all :class:`preload data <DAGPool>`, all data from
|
||||
:meth:`post` and all values returned by spawned greenthreads.
|
||||
|
||||
See also :meth:`wait`.
|
||||
"""
|
||||
# waitall() is an alias for compatibility with GreenPool
|
||||
return self.wait()
|
||||
|
||||
def wait(self, keys=_MISSING):
|
||||
"""
|
||||
*keys* is an optional iterable of keys. If you omit the argument, it
|
||||
waits for all the keys from :class:`preload data <DAGPool>`, from
|
||||
:meth:`post` calls and from :meth:`spawn` calls: in other words, all
|
||||
the keys of which this DAGPool is aware.
|
||||
|
||||
wait() blocks the calling greenthread until all of the relevant keys
|
||||
have values. wait() returns a dict whose keys are the relevant keys,
|
||||
and whose values come from the *preload* data, from values returned by
|
||||
DAGPool greenthreads or from :meth:`post` calls.
|
||||
|
||||
If a DAGPool greenthread terminates with an exception, wait() will
|
||||
raise :class:`PropagateError` wrapping that exception. If more than
|
||||
one greenthread terminates with an exception, it is indeterminate
|
||||
which one wait() will raise.
|
||||
|
||||
If an external greenthread posts a :class:`PropagateError` instance,
|
||||
wait() will raise that PropagateError. If more than one greenthread
|
||||
posts PropagateError, it is indeterminate which one wait() will raise.
|
||||
|
||||
See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
|
||||
"""
|
||||
# This is mostly redundant with wait_each() functionality.
|
||||
return dict(self.wait_each(keys))
|
||||
|
||||
def wait_each(self, keys=_MISSING):
|
||||
"""
|
||||
*keys* is an optional iterable of keys. If you omit the argument, it
|
||||
waits for all the keys from :class:`preload data <DAGPool>`, from
|
||||
:meth:`post` calls and from :meth:`spawn` calls: in other words, all
|
||||
the keys of which this DAGPool is aware.
|
||||
|
||||
wait_each() is a generator producing (key, value) pairs as a value
|
||||
becomes available for each requested key. wait_each() blocks the
|
||||
calling greenthread until the next value becomes available. If the
|
||||
DAGPool was prepopulated with values for any of the relevant keys, of
|
||||
course those can be delivered immediately without waiting.
|
||||
|
||||
Delivery order is intentionally decoupled from the initial sequence of
|
||||
keys: each value is delivered as soon as it becomes available. If
|
||||
multiple keys are available at the same time, wait_each() delivers
|
||||
each of the ready ones in arbitrary order before blocking again.
|
||||
|
||||
The DAGPool does not distinguish between a value returned by one of
|
||||
its own greenthreads and one provided by a :meth:`post` call or *preload* data.
|
||||
|
||||
The wait_each() generator terminates (raises StopIteration) when all
|
||||
specified keys have been delivered. Thus, typical usage might be:
|
||||
|
||||
::
|
||||
|
||||
for key, value in dagpool.wait_each(keys):
|
||||
# process this ready key and value
|
||||
# continue processing now that we've gotten values for all keys
|
||||
|
||||
By implication, if you pass wait_each() an empty iterable of keys, it
|
||||
returns immediately without yielding anything.
|
||||
|
||||
If the value to be delivered is a :class:`PropagateError` exception object, the
|
||||
generator raises that PropagateError instead of yielding it.
|
||||
|
||||
See also :meth:`wait_each_success`, :meth:`wait_each_exception`.
|
||||
"""
|
||||
# Build a local set() and then call _wait_each().
|
||||
return self._wait_each(self._get_keyset_for_wait_each(keys))
|
||||
|
||||
def wait_each_success(self, keys=_MISSING):
|
||||
"""
|
||||
wait_each_success() filters results so that only success values are
|
||||
yielded. In other words, unlike :meth:`wait_each`, wait_each_success()
|
||||
will not raise :class:`PropagateError`. Not every provided (or
|
||||
defaulted) key will necessarily be represented, though naturally the
|
||||
generator will not finish until all have completed.
|
||||
|
||||
In all other respects, wait_each_success() behaves like :meth:`wait_each`.
|
||||
"""
|
||||
for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
|
||||
if not isinstance(value, PropagateError):
|
||||
yield key, value
|
||||
|
||||
def wait_each_exception(self, keys=_MISSING):
|
||||
"""
|
||||
wait_each_exception() filters results so that only exceptions are
|
||||
yielded. Not every provided (or defaulted) key will necessarily be
|
||||
represented, though naturally the generator will not finish until
|
||||
all have completed.
|
||||
|
||||
Unlike other DAGPool methods, wait_each_exception() simply yields
|
||||
:class:`PropagateError` instances as values rather than raising them.
|
||||
|
||||
In all other respects, wait_each_exception() behaves like :meth:`wait_each`.
|
||||
"""
|
||||
for key, value in self._wait_each_raw(self._get_keyset_for_wait_each(keys)):
|
||||
if isinstance(value, PropagateError):
|
||||
yield key, value
|
||||
|
||||
def _get_keyset_for_wait_each(self, keys):
|
||||
"""
|
||||
wait_each(), wait_each_success() and wait_each_exception() promise
|
||||
that if you pass an iterable of keys, the method will wait for results
|
||||
from those keys -- but if you omit the keys argument, the method will
|
||||
wait for results from all known keys. This helper implements that
|
||||
distinction, returning a set() of the relevant keys.
|
||||
"""
|
||||
if keys is not _MISSING:
|
||||
return set(keys)
|
||||
else:
|
||||
# keys arg omitted -- use all the keys we know about
|
||||
return set(six.iterkeys(self.coros)) | set(six.iterkeys(self.values))
|
||||
|
||||
def _wait_each(self, pending):
|
||||
"""
|
||||
When _wait_each() encounters a value of PropagateError, it raises it.
|
||||
|
||||
In all other respects, _wait_each() behaves like _wait_each_raw().
|
||||
"""
|
||||
for key, value in self._wait_each_raw(pending):
|
||||
yield key, self._value_or_raise(value)
|
||||
|
||||
@staticmethod
|
||||
def _value_or_raise(value):
|
||||
# Most methods attempting to deliver PropagateError should raise that
|
||||
# instead of simply returning it.
|
||||
if isinstance(value, PropagateError):
|
||||
raise value
|
||||
return value
|
||||
|
||||
def _wait_each_raw(self, pending):
|
||||
"""
|
||||
pending is a set() of keys for which we intend to wait. THIS SET WILL
|
||||
BE DESTRUCTIVELY MODIFIED: as each key acquires a value, that key will
|
||||
be removed from the passed 'pending' set.
|
||||
|
||||
_wait_each_raw() does not treat a PropagateError instance specially:
|
||||
it will be yielded to the caller like any other value.
|
||||
|
||||
In all other respects, _wait_each_raw() behaves like wait_each().
|
||||
"""
|
||||
while True:
|
||||
# Before even waiting, show caller any (key, value) pairs that
|
||||
# are already available. Copy 'pending' because we want to be able
|
||||
# to remove items from the original set while iterating.
|
||||
for key in pending.copy():
|
||||
value = self.values.get(key, _MISSING)
|
||||
if value is not _MISSING:
|
||||
# found one, it's no longer pending
|
||||
pending.remove(key)
|
||||
yield (key, value)
|
||||
|
||||
if not pending:
|
||||
# Once we've yielded all the caller's keys, done.
|
||||
break
|
||||
|
||||
# There are still more keys pending, so wait.
|
||||
self.event.wait()
|
||||
|
||||
def spawn(self, key, depends, function, *args, **kwds):
|
||||
"""
|
||||
Launch the passed *function(key, results, ...)* as a greenthread,
|
||||
passing it:
|
||||
|
||||
- the specified *key*
|
||||
- an iterable of (key, value) pairs
|
||||
- whatever other positional args or keywords you specify.
|
||||
|
||||
Iterating over the *results* iterable behaves like calling
|
||||
:meth:`wait_each(depends) <DAGPool.wait_each>`.
|
||||
|
||||
Returning from *function()* behaves like
|
||||
:meth:`post(key, return_value) <DAGPool.post>`.
|
||||
|
||||
If *function()* terminates with an exception, that exception is wrapped
|
||||
in :class:`PropagateError` with the greenthread's *key* and (effectively) posted
|
||||
as the value for that key. Attempting to retrieve that value will
|
||||
raise that PropagateError.
|
||||
|
||||
Thus, if the greenthread with key 'a' terminates with an exception,
|
||||
and greenthread 'b' depends on 'a', when greenthread 'b' attempts to
|
||||
iterate through its *results* argument, it will encounter
|
||||
PropagateError. So by default, an uncaught exception will propagate
|
||||
through all the downstream dependencies.
|
||||
|
||||
If you pass :meth:`spawn` a key already passed to spawn() or :meth:`post`, spawn()
|
||||
raises :class:`Collision`.
|
||||
"""
|
||||
if key in self.coros or key in self.values:
|
||||
raise Collision(key)
|
||||
|
||||
# The order is a bit tricky. First construct the set() of keys.
|
||||
pending = set(depends)
|
||||
# It's important that we pass to _wait_each() the same 'pending' set()
|
||||
# that we store in self.coros for this key. The generator-iterator
|
||||
# returned by _wait_each() becomes the function's 'results' iterable.
|
||||
newcoro = greenthread.spawn(self._wrapper, function, key,
|
||||
self._wait_each(pending),
|
||||
*args, **kwds)
|
||||
# Also capture the same (!) set in the new _Coro object for this key.
|
||||
# We must be able to observe ready keys being removed from the set.
|
||||
self.coros[key] = self._Coro(newcoro, pending)
|
||||
|
||||
def _wrapper(self, function, key, results, *args, **kwds):
|
||||
"""
|
||||
This wrapper runs the top-level function in a DAGPool greenthread,
|
||||
posting its return value (or PropagateError) to the DAGPool.
|
||||
"""
|
||||
try:
|
||||
# call our passed function
|
||||
result = function(key, results, *args, **kwds)
|
||||
except Exception as err:
|
||||
# Wrap any exception it may raise in a PropagateError.
|
||||
result = PropagateError(key, err)
|
||||
finally:
|
||||
# function() has returned (or terminated with an exception). We no
|
||||
# longer need to track this greenthread in self.coros. Remove it
|
||||
# first so post() won't complain about a running greenthread.
|
||||
del self.coros[key]
|
||||
|
||||
try:
|
||||
# as advertised, try to post() our return value
|
||||
self.post(key, result)
|
||||
except Collision:
|
||||
# if we've already post()ed a result, oh well
|
||||
pass
|
||||
|
||||
# also, in case anyone cares...
|
||||
return result
|
||||
|
||||
def spawn_many(self, depends, function, *args, **kwds):
|
||||
"""
|
||||
spawn_many() accepts a single *function* whose parameters are the same
|
||||
as for :meth:`spawn`.
|
||||
|
||||
The difference is that spawn_many() accepts a dependency dict
|
||||
*depends*. A new greenthread is spawned for each key in the dict. That
|
||||
dict key's value should be an iterable of other keys on which this
|
||||
greenthread depends.
|
||||
|
||||
If the *depends* dict contains any key already passed to :meth:`spawn`
|
||||
or :meth:`post`, spawn_many() raises :class:`Collision`. It is
|
||||
indeterminate how many of the other keys in *depends* will have
|
||||
successfully spawned greenthreads.
|
||||
"""
|
||||
# Iterate over 'depends' items, relying on self.spawn() not to
|
||||
# context-switch so no one can modify 'depends' along the way.
|
||||
for key, deps in six.iteritems(depends):
|
||||
self.spawn(key, deps, function, *args, **kwds)
|
||||
|
||||
def kill(self, key):
|
||||
"""
|
||||
Kill the greenthread that was spawned with the specified *key*.
|
||||
|
||||
If no such greenthread was spawned, raise KeyError.
|
||||
"""
|
||||
# let KeyError, if any, propagate
|
||||
self.coros[key].greenthread.kill()
|
||||
# once killed, remove it
|
||||
del self.coros[key]
|
||||
|
||||
def post(self, key, value, replace=False):
|
||||
"""
|
||||
post(key, value) stores the passed *value* for the passed *key*. It
|
||||
then causes each greenthread blocked on its results iterable, or on
|
||||
:meth:`wait_each(keys) <DAGPool.wait_each>`, to check for new values.
|
||||
A waiting greenthread might not literally resume on every single
|
||||
post() of a relevant key, but the first post() of a relevant key
|
||||
ensures that it will resume eventually, and when it does it will catch
|
||||
up with all relevant post() calls.
|
||||
|
||||
Calling post(key, value) when there is a running greenthread with that
|
||||
same *key* raises :class:`Collision`. If you must post(key, value) instead of
|
||||
letting the greenthread run to completion, you must first call
|
||||
:meth:`kill(key) <DAGPool.kill>`.
|
||||
|
||||
The DAGPool implicitly post()s the return value from each of its
|
||||
greenthreads. But a greenthread may explicitly post() a value for its
|
||||
own key, which will cause its return value to be discarded.
|
||||
|
||||
Calling post(key, value, replace=False) (the default *replace*) when a
|
||||
value for that key has already been posted, by any means, raises
|
||||
:class:`Collision`.
|
||||
|
||||
Calling post(key, value, replace=True) when a value for that key has
|
||||
already been posted, by any means, replaces the previously-stored
|
||||
value. However, that may make it complicated to reason about the
|
||||
behavior of greenthreads waiting on that key.
|
||||
|
||||
After a post(key, value1) followed by post(key, value2, replace=True),
|
||||
it is unspecified which pending :meth:`wait_each([key...]) <DAGPool.wait_each>`
|
||||
calls (or greenthreads iterating over *results* involving that key)
|
||||
will observe *value1* versus *value2*. It is guaranteed that
|
||||
subsequent wait_each([key...]) calls (or greenthreads spawned after
|
||||
that point) will observe *value2*.
|
||||
|
||||
A successful call to
|
||||
post(key, :class:`PropagateError(key, ExceptionSubclass) <PropagateError>`)
|
||||
ensures that any subsequent attempt to retrieve that key's value will
|
||||
raise that PropagateError instance.
|
||||
"""
|
||||
# First, check if we're trying to post() to a key with a running
|
||||
# greenthread.
|
||||
# A DAGPool greenthread is explicitly permitted to post() to its
|
||||
# OWN key.
|
||||
coro = self.coros.get(key, _MISSING)
|
||||
if coro is not _MISSING and coro.greenthread is not greenthread.getcurrent():
|
||||
# oh oh, trying to post a value for running greenthread from
|
||||
# some other greenthread
|
||||
raise Collision(key)
|
||||
|
||||
# Here, either we're posting a value for a key with no greenthread or
|
||||
# we're posting from that greenthread itself.
|
||||
|
||||
# Has somebody already post()ed a value for this key?
|
||||
# Unless replace == True, this is a problem.
|
||||
if key in self.values and not replace:
|
||||
raise Collision(key)
|
||||
|
||||
# Either we've never before posted a value for this key, or we're
|
||||
# posting with replace == True.
|
||||
|
||||
# update our database
|
||||
self.values[key] = value
|
||||
# and wake up pending waiters
|
||||
self.event.send()
|
||||
# The comment in Event.reset() says: "it's better to create a new
|
||||
# event rather than reset an old one". Okay, fine. We do want to be
|
||||
# able to support new waiters, so create a new Event.
|
||||
self.event = Event()
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""
|
||||
__getitem__(key) (aka dagpool[key]) blocks until *key* has a value,
|
||||
then delivers that value.
|
||||
"""
|
||||
# This is a degenerate case of wait_each(). Construct a tuple
|
||||
# containing only this 'key'. wait_each() will yield exactly one (key,
|
||||
# value) pair. Return just its value.
|
||||
for _, value in self.wait_each((key,)):
|
||||
return value
|
||||
|
||||
def get(self, key, default=None):
|
||||
"""
|
||||
get() returns the value for *key*. If *key* does not yet have a value,
|
||||
get() returns *default*.
|
||||
"""
|
||||
return self._value_or_raise(self.values.get(key, default))
|
||||
|
||||
def keys(self):
|
||||
"""
|
||||
Return a snapshot tuple of keys for which we currently have values.
|
||||
"""
|
||||
# Explicitly return a copy rather than an iterator: don't assume our
|
||||
# caller will finish iterating before new values are posted.
|
||||
return tuple(six.iterkeys(self.values))
|
||||
|
||||
def items(self):
|
||||
"""
|
||||
Return a snapshot tuple of currently-available (key, value) pairs.
|
||||
"""
|
||||
# Don't assume our caller will finish iterating before new values are
|
||||
# posted.
|
||||
return tuple((key, self._value_or_raise(value))
|
||||
for key, value in six.iteritems(self.values))
|
||||
|
||||
def running(self):
|
||||
"""
|
||||
Return number of running DAGPool greenthreads. This includes
|
||||
greenthreads blocked while iterating through their *results* iterable,
|
||||
that is, greenthreads waiting on values from other keys.
|
||||
"""
|
||||
return len(self.coros)
|
||||
|
||||
def running_keys(self):
|
||||
"""
|
||||
Return keys for running DAGPool greenthreads. This includes
|
||||
greenthreads blocked while iterating through their *results* iterable,
|
||||
that is, greenthreads waiting on values from other keys.
|
||||
"""
|
||||
# return snapshot; don't assume caller will finish iterating before we
|
||||
# next modify self.coros
|
||||
return tuple(six.iterkeys(self.coros))
|
||||
|
||||
def waiting(self):
|
||||
"""
|
||||
Return number of waiting DAGPool greenthreads, that is, greenthreads
|
||||
still waiting on values from other keys. This explicitly does *not*
|
||||
include external greenthreads waiting on :meth:`wait`,
|
||||
:meth:`waitall`, :meth:`wait_each`.
|
||||
"""
|
||||
# n.b. if Event would provide a count of its waiters, we could say
|
||||
# something about external greenthreads as well.
|
||||
# The logic to determine this count is exactly the same as the general
|
||||
# waiting_for() call.
|
||||
return len(self.waiting_for())
|
||||
|
||||
# Use _MISSING instead of None as the default 'key' param so we can permit
|
||||
# None as a supported key.
|
||||
def waiting_for(self, key=_MISSING):
|
||||
"""
|
||||
waiting_for(key) returns a set() of the keys for which the DAGPool
|
||||
greenthread spawned with that *key* is still waiting. If you pass a
|
||||
*key* for which no greenthread was spawned, waiting_for() raises
|
||||
KeyError.
|
||||
|
||||
waiting_for() without argument returns a dict. Its keys are the keys
|
||||
of DAGPool greenthreads still waiting on one or more values. In the
|
||||
returned dict, the value of each such key is the set of other keys for
|
||||
which that greenthread is still waiting.
|
||||
|
||||
This method allows diagnosing a "hung" DAGPool. If certain
|
||||
greenthreads are making no progress, it's possible that they are
|
||||
waiting on keys for which there is no greenthread and no :meth:`post` data.
|
||||
"""
|
||||
# We may have greenthreads whose 'pending' entry indicates they're
|
||||
# waiting on some keys even though values have now been posted for
|
||||
# some or all of those keys, because those greenthreads have not yet
|
||||
# regained control since values were posted. So make a point of
|
||||
# excluding values that are now available.
|
||||
available = set(six.iterkeys(self.values))
|
||||
|
||||
if key is not _MISSING:
|
||||
# waiting_for(key) is semantically different than waiting_for().
|
||||
# It's just that they both seem to want the same method name.
|
||||
coro = self.coros.get(key, _MISSING)
|
||||
if coro is _MISSING:
|
||||
# Hmm, no running greenthread with this key. But was there
|
||||
# EVER a greenthread with this key? If not, let KeyError
|
||||
# propagate.
|
||||
self.values[key]
|
||||
# Oh good, there's a value for this key. Either the
|
||||
# greenthread finished, or somebody posted a value. Just say
|
||||
# the greenthread isn't waiting for anything.
|
||||
return set()
|
||||
else:
|
||||
# coro is the _Coro for the running greenthread with the
|
||||
# specified key.
|
||||
return coro.pending - available
|
||||
|
||||
# This is a waiting_for() call, i.e. a general query rather than for a
|
||||
# specific key.
|
||||
|
||||
# Start by iterating over (key, coro) pairs in self.coros. Generate
|
||||
# (key, pending) pairs in which 'pending' is the set of keys on which
|
||||
# the greenthread believes it's waiting, minus the set of keys that
|
||||
# are now available. Filter out any pair in which 'pending' is empty,
|
||||
# that is, that greenthread will be unblocked next time it resumes.
|
||||
# Make a dict from those pairs.
|
||||
return dict((key, pending)
|
||||
for key, pending in ((key, (coro.pending - available))
|
||||
for key, coro in six.iteritems(self.coros))
|
||||
if pending)
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/dagpool.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/dagpool.pyc
Normal file
Binary file not shown.
461
venv/lib/python2.7/site-packages/eventlet/db_pool.py
Normal file
461
venv/lib/python2.7/site-packages/eventlet/db_pool.py
Normal file
@@ -0,0 +1,461 @@
|
||||
from __future__ import print_function
|
||||
|
||||
from collections import deque
|
||||
from contextlib import contextmanager
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet.pools import Pool
|
||||
from eventlet import timeout
|
||||
from eventlet import hubs
|
||||
from eventlet.hubs.timer import Timer
|
||||
from eventlet.greenthread import GreenThread
|
||||
|
||||
|
||||
_MISSING = object()
|
||||
|
||||
|
||||
class ConnectTimeout(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def cleanup_rollback(conn):
|
||||
conn.rollback()
|
||||
|
||||
|
||||
class BaseConnectionPool(Pool):
|
||||
def __init__(self, db_module,
|
||||
min_size=0, max_size=4,
|
||||
max_idle=10, max_age=30,
|
||||
connect_timeout=5,
|
||||
cleanup=cleanup_rollback,
|
||||
*args, **kwargs):
|
||||
"""
|
||||
Constructs a pool with at least *min_size* connections and at most
|
||||
*max_size* connections. Uses *db_module* to construct new connections.
|
||||
|
||||
The *max_idle* parameter determines how long pooled connections can
|
||||
remain idle, in seconds. After *max_idle* seconds have elapsed
|
||||
without the connection being used, the pool closes the connection.
|
||||
|
||||
*max_age* is how long any particular connection is allowed to live.
|
||||
Connections that have been open for longer than *max_age* seconds are
|
||||
closed, regardless of idle time. If *max_age* is 0, all connections are
|
||||
closed on return to the pool, reducing it to a concurrency limiter.
|
||||
|
||||
*connect_timeout* is the duration in seconds that the pool will wait
|
||||
before timing out on connect() to the database. If triggered, the
|
||||
timeout will raise a ConnectTimeout from get().
|
||||
|
||||
The remainder of the arguments are used as parameters to the
|
||||
*db_module*'s connection constructor.
|
||||
"""
|
||||
assert(db_module)
|
||||
self._db_module = db_module
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
self.max_idle = max_idle
|
||||
self.max_age = max_age
|
||||
self.connect_timeout = connect_timeout
|
||||
self._expiration_timer = None
|
||||
self.cleanup = cleanup
|
||||
super(BaseConnectionPool, self).__init__(min_size=min_size,
|
||||
max_size=max_size,
|
||||
order_as_stack=True)
|
||||
|
||||
def _schedule_expiration(self):
|
||||
"""Sets up a timer that will call _expire_old_connections when the
|
||||
oldest connection currently in the free pool is ready to expire. This
|
||||
is the earliest possible time that a connection could expire, thus, the
|
||||
timer will be running as infrequently as possible without missing a
|
||||
possible expiration.
|
||||
|
||||
If this function is called when a timer is already scheduled, it does
|
||||
nothing.
|
||||
|
||||
If max_age or max_idle is 0, _schedule_expiration likewise does nothing.
|
||||
"""
|
||||
if self.max_age is 0 or self.max_idle is 0:
|
||||
# expiration is unnecessary because all connections will be expired
|
||||
# on put
|
||||
return
|
||||
|
||||
if (self._expiration_timer is not None
|
||||
and not getattr(self._expiration_timer, 'called', False)):
|
||||
# the next timer is already scheduled
|
||||
return
|
||||
|
||||
try:
|
||||
now = time.time()
|
||||
self._expire_old_connections(now)
|
||||
# the last item in the list, because of the stack ordering,
|
||||
# is going to be the most-idle
|
||||
idle_delay = (self.free_items[-1][0] - now) + self.max_idle
|
||||
oldest = min([t[1] for t in self.free_items])
|
||||
age_delay = (oldest - now) + self.max_age
|
||||
|
||||
next_delay = min(idle_delay, age_delay)
|
||||
except (IndexError, ValueError):
|
||||
# no free items, unschedule ourselves
|
||||
self._expiration_timer = None
|
||||
return
|
||||
|
||||
if next_delay > 0:
|
||||
# set up a continuous self-calling loop
|
||||
self._expiration_timer = Timer(next_delay, GreenThread(hubs.get_hub().greenlet).switch,
|
||||
self._schedule_expiration, [], {})
|
||||
self._expiration_timer.schedule()
|
||||
|
||||
def _expire_old_connections(self, now):
|
||||
"""Iterates through the open connections contained in the pool, closing
|
||||
ones that have remained idle for longer than max_idle seconds, or have
|
||||
been in existence for longer than max_age seconds.
|
||||
|
||||
*now* is the current time, as returned by time.time().
|
||||
"""
|
||||
original_count = len(self.free_items)
|
||||
expired = [
|
||||
conn
|
||||
for last_used, created_at, conn in self.free_items
|
||||
if self._is_expired(now, last_used, created_at)]
|
||||
|
||||
new_free = [
|
||||
(last_used, created_at, conn)
|
||||
for last_used, created_at, conn in self.free_items
|
||||
if not self._is_expired(now, last_used, created_at)]
|
||||
self.free_items.clear()
|
||||
self.free_items.extend(new_free)
|
||||
|
||||
# adjust the current size counter to account for expired
|
||||
# connections
|
||||
self.current_size -= original_count - len(self.free_items)
|
||||
|
||||
for conn in expired:
|
||||
self._safe_close(conn, quiet=True)
|
||||
|
||||
def _is_expired(self, now, last_used, created_at):
|
||||
"""Returns true and closes the connection if it's expired.
|
||||
"""
|
||||
if (self.max_idle <= 0 or self.max_age <= 0
|
||||
or now - last_used > self.max_idle
|
||||
or now - created_at > self.max_age):
|
||||
return True
|
||||
return False
|
||||
|
||||
def _unwrap_connection(self, conn):
|
||||
"""If the connection was wrapped by a subclass of
|
||||
BaseConnectionWrapper and is still functional (as determined
|
||||
by the __nonzero__, or __bool__ in python3, method), returns
|
||||
the unwrapped connection. If anything goes wrong with this
|
||||
process, returns None.
|
||||
"""
|
||||
base = None
|
||||
try:
|
||||
if conn:
|
||||
base = conn._base
|
||||
conn._destroy()
|
||||
else:
|
||||
base = None
|
||||
except AttributeError:
|
||||
pass
|
||||
return base
|
||||
|
||||
def _safe_close(self, conn, quiet=False):
|
||||
"""Closes the (already unwrapped) connection, squelching any
|
||||
exceptions.
|
||||
"""
|
||||
try:
|
||||
conn.close()
|
||||
except AttributeError:
|
||||
pass # conn is None, or junk
|
||||
except Exception:
|
||||
if not quiet:
|
||||
print("Connection.close raised: %s" % (sys.exc_info()[1]))
|
||||
|
||||
def get(self):
|
||||
conn = super(BaseConnectionPool, self).get()
|
||||
|
||||
# None is a flag value that means that put got called with
|
||||
# something it couldn't use
|
||||
if conn is None:
|
||||
try:
|
||||
conn = self.create()
|
||||
except Exception:
|
||||
# unconditionally increase the free pool because
|
||||
# even if there are waiters, doing a full put
|
||||
# would incur a greenlib switch and thus lose the
|
||||
# exception stack
|
||||
self.current_size -= 1
|
||||
raise
|
||||
|
||||
# if the call to get() draws from the free pool, it will come
|
||||
# back as a tuple
|
||||
if isinstance(conn, tuple):
|
||||
_last_used, created_at, conn = conn
|
||||
else:
|
||||
created_at = time.time()
|
||||
|
||||
# wrap the connection so the consumer can call close() safely
|
||||
wrapped = PooledConnectionWrapper(conn, self)
|
||||
# annotating the wrapper so that when it gets put in the pool
|
||||
# again, we'll know how old it is
|
||||
wrapped._db_pool_created_at = created_at
|
||||
return wrapped
|
||||
|
||||
def put(self, conn, cleanup=_MISSING):
|
||||
created_at = getattr(conn, '_db_pool_created_at', 0)
|
||||
now = time.time()
|
||||
conn = self._unwrap_connection(conn)
|
||||
|
||||
if self._is_expired(now, now, created_at):
|
||||
self._safe_close(conn, quiet=False)
|
||||
conn = None
|
||||
elif cleanup is not None:
|
||||
if cleanup is _MISSING:
|
||||
cleanup = self.cleanup
|
||||
# by default, call rollback in case the connection is in the middle
|
||||
# of a transaction. However, rollback has performance implications
|
||||
# so optionally do nothing or call something else like ping
|
||||
try:
|
||||
if conn:
|
||||
cleanup(conn)
|
||||
except Exception as e:
|
||||
# we don't care what the exception was, we just know the
|
||||
# connection is dead
|
||||
print("WARNING: cleanup %s raised: %s" % (cleanup, e))
|
||||
conn = None
|
||||
except:
|
||||
conn = None
|
||||
raise
|
||||
|
||||
if conn is not None:
|
||||
super(BaseConnectionPool, self).put((now, created_at, conn))
|
||||
else:
|
||||
# wake up any waiters with a flag value that indicates
|
||||
# they need to manufacture a connection
|
||||
if self.waiting() > 0:
|
||||
super(BaseConnectionPool, self).put(None)
|
||||
else:
|
||||
# no waiters -- just change the size
|
||||
self.current_size -= 1
|
||||
self._schedule_expiration()
|
||||
|
||||
@contextmanager
|
||||
def item(self, cleanup=_MISSING):
|
||||
conn = self.get()
|
||||
try:
|
||||
yield conn
|
||||
finally:
|
||||
self.put(conn, cleanup=cleanup)
|
||||
|
||||
def clear(self):
|
||||
"""Close all connections that this pool still holds a reference to,
|
||||
and removes all references to them.
|
||||
"""
|
||||
if self._expiration_timer:
|
||||
self._expiration_timer.cancel()
|
||||
free_items, self.free_items = self.free_items, deque()
|
||||
for item in free_items:
|
||||
# Free items created using min_size>0 are not tuples.
|
||||
conn = item[2] if isinstance(item, tuple) else item
|
||||
self._safe_close(conn, quiet=True)
|
||||
self.current_size -= 1
|
||||
|
||||
def __del__(self):
|
||||
self.clear()
|
||||
|
||||
|
||||
class TpooledConnectionPool(BaseConnectionPool):
|
||||
"""A pool which gives out :class:`~eventlet.tpool.Proxy`-based database
|
||||
connections.
|
||||
"""
|
||||
|
||||
def create(self):
|
||||
now = time.time()
|
||||
return now, now, self.connect(
|
||||
self._db_module, self.connect_timeout, *self._args, **self._kwargs)
|
||||
|
||||
@classmethod
|
||||
def connect(cls, db_module, connect_timeout, *args, **kw):
|
||||
t = timeout.Timeout(connect_timeout, ConnectTimeout())
|
||||
try:
|
||||
from eventlet import tpool
|
||||
conn = tpool.execute(db_module.connect, *args, **kw)
|
||||
return tpool.Proxy(conn, autowrap_names=('cursor',))
|
||||
finally:
|
||||
t.cancel()
|
||||
|
||||
|
||||
class RawConnectionPool(BaseConnectionPool):
|
||||
"""A pool which gives out plain database connections.
|
||||
"""
|
||||
|
||||
def create(self):
|
||||
now = time.time()
|
||||
return now, now, self.connect(
|
||||
self._db_module, self.connect_timeout, *self._args, **self._kwargs)
|
||||
|
||||
@classmethod
|
||||
def connect(cls, db_module, connect_timeout, *args, **kw):
|
||||
t = timeout.Timeout(connect_timeout, ConnectTimeout())
|
||||
try:
|
||||
return db_module.connect(*args, **kw)
|
||||
finally:
|
||||
t.cancel()
|
||||
|
||||
|
||||
# default connection pool is the tpool one
|
||||
ConnectionPool = TpooledConnectionPool
|
||||
|
||||
|
||||
class GenericConnectionWrapper(object):
|
||||
def __init__(self, baseconn):
|
||||
self._base = baseconn
|
||||
|
||||
# Proxy all method calls to self._base
|
||||
# FIXME: remove repetition; options to consider:
|
||||
# * for name in (...):
|
||||
# setattr(class, name, lambda self, *a, **kw: getattr(self._base, name)(*a, **kw))
|
||||
# * def __getattr__(self, name): if name in (...): return getattr(self._base, name)
|
||||
# * other?
|
||||
def __enter__(self):
|
||||
return self._base.__enter__()
|
||||
|
||||
def __exit__(self, exc, value, tb):
|
||||
return self._base.__exit__(exc, value, tb)
|
||||
|
||||
def __repr__(self):
|
||||
return self._base.__repr__()
|
||||
|
||||
_proxy_funcs = (
|
||||
'affected_rows',
|
||||
'autocommit',
|
||||
'begin',
|
||||
'change_user',
|
||||
'character_set_name',
|
||||
'close',
|
||||
'commit',
|
||||
'cursor',
|
||||
'dump_debug_info',
|
||||
'errno',
|
||||
'error',
|
||||
'errorhandler',
|
||||
'insert_id',
|
||||
'literal',
|
||||
'ping',
|
||||
'query',
|
||||
'rollback',
|
||||
'select_db',
|
||||
'server_capabilities',
|
||||
'set_character_set',
|
||||
'set_isolation_level',
|
||||
'set_server_option',
|
||||
'set_sql_mode',
|
||||
'show_warnings',
|
||||
'shutdown',
|
||||
'sqlstate',
|
||||
'stat',
|
||||
'store_result',
|
||||
'string_literal',
|
||||
'thread_id',
|
||||
'use_result',
|
||||
'warning_count',
|
||||
)
|
||||
for _proxy_fun in GenericConnectionWrapper._proxy_funcs:
|
||||
# excess wrapper for early binding (closure by value)
|
||||
def _wrapper(_proxy_fun=_proxy_fun):
|
||||
def _proxy_method(self, *args, **kwargs):
|
||||
return getattr(self._base, _proxy_fun)(*args, **kwargs)
|
||||
_proxy_method.func_name = _proxy_fun
|
||||
_proxy_method.__name__ = _proxy_fun
|
||||
_proxy_method.__qualname__ = 'GenericConnectionWrapper.' + _proxy_fun
|
||||
return _proxy_method
|
||||
setattr(GenericConnectionWrapper, _proxy_fun, _wrapper(_proxy_fun))
|
||||
del GenericConnectionWrapper._proxy_funcs
|
||||
del _proxy_fun
|
||||
del _wrapper
|
||||
|
||||
|
||||
class PooledConnectionWrapper(GenericConnectionWrapper):
|
||||
"""A connection wrapper where:
|
||||
- the close method returns the connection to the pool instead of closing it directly
|
||||
- ``bool(conn)`` returns a reasonable value
|
||||
- returns itself to the pool if it gets garbage collected
|
||||
"""
|
||||
|
||||
def __init__(self, baseconn, pool):
|
||||
super(PooledConnectionWrapper, self).__init__(baseconn)
|
||||
self._pool = pool
|
||||
|
||||
def __nonzero__(self):
|
||||
return (hasattr(self, '_base') and bool(self._base))
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def _destroy(self):
|
||||
self._pool = None
|
||||
try:
|
||||
del self._base
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
def close(self):
|
||||
"""Return the connection to the pool, and remove the
|
||||
reference to it so that you can't use it again through this
|
||||
wrapper object.
|
||||
"""
|
||||
if self and self._pool:
|
||||
self._pool.put(self)
|
||||
self._destroy()
|
||||
|
||||
def __del__(self):
|
||||
return # this causes some issues if __del__ is called in the
|
||||
# main coroutine, so for now this is disabled
|
||||
# self.close()
|
||||
|
||||
|
||||
class DatabaseConnector(object):
|
||||
"""
|
||||
This is an object which will maintain a collection of database
|
||||
connection pools on a per-host basis.
|
||||
"""
|
||||
|
||||
def __init__(self, module, credentials,
|
||||
conn_pool=None, *args, **kwargs):
|
||||
"""constructor
|
||||
*module*
|
||||
Database module to use.
|
||||
*credentials*
|
||||
Mapping of hostname to connect arguments (e.g. username and password)
|
||||
"""
|
||||
assert(module)
|
||||
self._conn_pool_class = conn_pool
|
||||
if self._conn_pool_class is None:
|
||||
self._conn_pool_class = ConnectionPool
|
||||
self._module = module
|
||||
self._args = args
|
||||
self._kwargs = kwargs
|
||||
# this is a map of hostname to username/password
|
||||
self._credentials = credentials
|
||||
self._databases = {}
|
||||
|
||||
def credentials_for(self, host):
|
||||
if host in self._credentials:
|
||||
return self._credentials[host]
|
||||
else:
|
||||
return self._credentials.get('default', None)
|
||||
|
||||
def get(self, host, dbname):
|
||||
"""Returns a ConnectionPool to the target host and schema.
|
||||
"""
|
||||
key = (host, dbname)
|
||||
if key not in self._databases:
|
||||
new_kwargs = self._kwargs.copy()
|
||||
new_kwargs['db'] = dbname
|
||||
new_kwargs['host'] = host
|
||||
new_kwargs.update(self.credentials_for(host))
|
||||
dbpool = self._conn_pool_class(
|
||||
self._module, *self._args, **new_kwargs)
|
||||
self._databases[key] = dbpool
|
||||
|
||||
return self._databases[key]
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/db_pool.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/db_pool.pyc
Normal file
Binary file not shown.
174
venv/lib/python2.7/site-packages/eventlet/debug.py
Normal file
174
venv/lib/python2.7/site-packages/eventlet/debug.py
Normal file
@@ -0,0 +1,174 @@
|
||||
"""The debug module contains utilities and functions for better
|
||||
debugging Eventlet-powered applications."""
|
||||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
import linecache
|
||||
import re
|
||||
import inspect
|
||||
|
||||
__all__ = ['spew', 'unspew', 'format_hub_listeners', 'format_hub_timers',
|
||||
'hub_listener_stacks', 'hub_exceptions', 'tpool_exceptions',
|
||||
'hub_prevent_multiple_readers', 'hub_timer_stacks',
|
||||
'hub_blocking_detection']
|
||||
|
||||
_token_splitter = re.compile('\W+')
|
||||
|
||||
|
||||
class Spew(object):
|
||||
|
||||
def __init__(self, trace_names=None, show_values=True):
|
||||
self.trace_names = trace_names
|
||||
self.show_values = show_values
|
||||
|
||||
def __call__(self, frame, event, arg):
|
||||
if event == 'line':
|
||||
lineno = frame.f_lineno
|
||||
if '__file__' in frame.f_globals:
|
||||
filename = frame.f_globals['__file__']
|
||||
if (filename.endswith('.pyc') or
|
||||
filename.endswith('.pyo')):
|
||||
filename = filename[:-1]
|
||||
name = frame.f_globals['__name__']
|
||||
line = linecache.getline(filename, lineno)
|
||||
else:
|
||||
name = '[unknown]'
|
||||
try:
|
||||
src = inspect.getsourcelines(frame)
|
||||
line = src[lineno]
|
||||
except IOError:
|
||||
line = 'Unknown code named [%s]. VM instruction #%d' % (
|
||||
frame.f_code.co_name, frame.f_lasti)
|
||||
if self.trace_names is None or name in self.trace_names:
|
||||
print('%s:%s: %s' % (name, lineno, line.rstrip()))
|
||||
if not self.show_values:
|
||||
return self
|
||||
details = []
|
||||
tokens = _token_splitter.split(line)
|
||||
for tok in tokens:
|
||||
if tok in frame.f_globals:
|
||||
details.append('%s=%r' % (tok, frame.f_globals[tok]))
|
||||
if tok in frame.f_locals:
|
||||
details.append('%s=%r' % (tok, frame.f_locals[tok]))
|
||||
if details:
|
||||
print("\t%s" % ' '.join(details))
|
||||
return self
|
||||
|
||||
|
||||
def spew(trace_names=None, show_values=False):
|
||||
"""Install a trace hook which writes incredibly detailed logs
|
||||
about what code is being executed to stdout.
|
||||
"""
|
||||
sys.settrace(Spew(trace_names, show_values))
|
||||
|
||||
|
||||
def unspew():
|
||||
"""Remove the trace hook installed by spew.
|
||||
"""
|
||||
sys.settrace(None)
|
||||
|
||||
|
||||
def format_hub_listeners():
|
||||
""" Returns a formatted string of the current listeners on the current
|
||||
hub. This can be useful in determining what's going on in the event system,
|
||||
especially when used in conjunction with :func:`hub_listener_stacks`.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
hub = hubs.get_hub()
|
||||
result = ['READERS:']
|
||||
for l in hub.get_readers():
|
||||
result.append(repr(l))
|
||||
result.append('WRITERS:')
|
||||
for l in hub.get_writers():
|
||||
result.append(repr(l))
|
||||
return os.linesep.join(result)
|
||||
|
||||
|
||||
def format_hub_timers():
|
||||
""" Returns a formatted string of the current timers on the current
|
||||
hub. This can be useful in determining what's going on in the event system,
|
||||
especially when used in conjunction with :func:`hub_timer_stacks`.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
hub = hubs.get_hub()
|
||||
result = ['TIMERS:']
|
||||
for l in hub.timers:
|
||||
result.append(repr(l))
|
||||
return os.linesep.join(result)
|
||||
|
||||
|
||||
def hub_listener_stacks(state=False):
|
||||
"""Toggles whether or not the hub records the stack when clients register
|
||||
listeners on file descriptors. This can be useful when trying to figure
|
||||
out what the hub is up to at any given moment. To inspect the stacks
|
||||
of the current listeners, call :func:`format_hub_listeners` at critical
|
||||
junctures in the application logic.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
hubs.get_hub().set_debug_listeners(state)
|
||||
|
||||
|
||||
def hub_timer_stacks(state=False):
|
||||
"""Toggles whether or not the hub records the stack when timers are set.
|
||||
To inspect the stacks of the current timers, call :func:`format_hub_timers`
|
||||
at critical junctures in the application logic.
|
||||
"""
|
||||
from eventlet.hubs import timer
|
||||
timer._g_debug = state
|
||||
|
||||
|
||||
def hub_prevent_multiple_readers(state=True):
|
||||
"""Toggle prevention of multiple greenlets reading from a socket
|
||||
|
||||
When multiple greenlets read from the same socket it is often hard
|
||||
to predict which greenlet will receive what data. To achieve
|
||||
resource sharing consider using ``eventlet.pools.Pool`` instead.
|
||||
|
||||
But if you really know what you are doing you can change the state
|
||||
to ``False`` to stop the hub from protecting against this mistake.
|
||||
"""
|
||||
from eventlet.hubs import hub
|
||||
hub.g_prevent_multiple_readers = state
|
||||
|
||||
|
||||
def hub_exceptions(state=True):
|
||||
"""Toggles whether the hub prints exceptions that are raised from its
|
||||
timers. This can be useful to see how greenthreads are terminating.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
hubs.get_hub().set_timer_exceptions(state)
|
||||
from eventlet import greenpool
|
||||
greenpool.DEBUG = state
|
||||
|
||||
|
||||
def tpool_exceptions(state=False):
|
||||
"""Toggles whether tpool itself prints exceptions that are raised from
|
||||
functions that are executed in it, in addition to raising them like
|
||||
it normally does."""
|
||||
from eventlet import tpool
|
||||
tpool.QUIET = not state
|
||||
|
||||
|
||||
def hub_blocking_detection(state=False, resolution=1):
|
||||
"""Toggles whether Eventlet makes an effort to detect blocking
|
||||
behavior in an application.
|
||||
|
||||
It does this by telling the kernel to raise a SIGALARM after a
|
||||
short timeout, and clearing the timeout every time the hub
|
||||
greenlet is resumed. Therefore, any code that runs for a long
|
||||
time without yielding to the hub will get interrupted by the
|
||||
blocking detector (don't use it in production!).
|
||||
|
||||
The *resolution* argument governs how long the SIGALARM timeout
|
||||
waits in seconds. The implementation uses :func:`signal.setitimer`
|
||||
and can be specified as a floating-point value.
|
||||
The shorter the resolution, the greater the chance of false
|
||||
positives.
|
||||
"""
|
||||
from eventlet import hubs
|
||||
assert resolution > 0
|
||||
hubs.get_hub().debug_blocking = state
|
||||
hubs.get_hub().debug_blocking_resolution = resolution
|
||||
if not state:
|
||||
hubs.get_hub().block_detect_post()
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/debug.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/debug.pyc
Normal file
Binary file not shown.
213
venv/lib/python2.7/site-packages/eventlet/event.py
Normal file
213
venv/lib/python2.7/site-packages/eventlet/event.py
Normal file
@@ -0,0 +1,213 @@
|
||||
from __future__ import print_function
|
||||
|
||||
from eventlet import hubs
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
__all__ = ['Event']
|
||||
|
||||
|
||||
class NOT_USED:
|
||||
def __repr__(self):
|
||||
return 'NOT_USED'
|
||||
|
||||
NOT_USED = NOT_USED()
|
||||
|
||||
|
||||
class Event(object):
|
||||
"""An abstraction where an arbitrary number of coroutines
|
||||
can wait for one event from another.
|
||||
|
||||
Events are similar to a Queue that can only hold one item, but differ
|
||||
in two important ways:
|
||||
|
||||
1. calling :meth:`send` never unschedules the current greenthread
|
||||
2. :meth:`send` can only be called once; create a new event to send again.
|
||||
|
||||
They are good for communicating results between coroutines, and
|
||||
are the basis for how
|
||||
:meth:`GreenThread.wait() <eventlet.greenthread.GreenThread.wait>`
|
||||
is implemented.
|
||||
|
||||
>>> from eventlet import event
|
||||
>>> import eventlet
|
||||
>>> evt = event.Event()
|
||||
>>> def baz(b):
|
||||
... evt.send(b + 1)
|
||||
...
|
||||
>>> _ = eventlet.spawn_n(baz, 3)
|
||||
>>> evt.wait()
|
||||
4
|
||||
"""
|
||||
_result = None
|
||||
_exc = None
|
||||
|
||||
def __init__(self):
|
||||
self._waiters = set()
|
||||
self.reset()
|
||||
|
||||
def __str__(self):
|
||||
params = (self.__class__.__name__, hex(id(self)),
|
||||
self._result, self._exc, len(self._waiters))
|
||||
return '<%s at %s result=%r _exc=%r _waiters[%d]>' % params
|
||||
|
||||
def reset(self):
|
||||
# this is kind of a misfeature and doesn't work perfectly well,
|
||||
# it's better to create a new event rather than reset an old one
|
||||
# removing documentation so that we don't get new use cases for it
|
||||
assert self._result is not NOT_USED, 'Trying to re-reset() a fresh event.'
|
||||
self._result = NOT_USED
|
||||
self._exc = None
|
||||
|
||||
def ready(self):
|
||||
""" Return true if the :meth:`wait` call will return immediately.
|
||||
Used to avoid waiting for things that might take a while to time out.
|
||||
For example, you can put a bunch of events into a list, and then visit
|
||||
them all repeatedly, calling :meth:`ready` until one returns ``True``,
|
||||
and then you can :meth:`wait` on that one."""
|
||||
return self._result is not NOT_USED
|
||||
|
||||
def has_exception(self):
|
||||
return self._exc is not None
|
||||
|
||||
def has_result(self):
|
||||
return self._result is not NOT_USED and self._exc is None
|
||||
|
||||
def poll(self, notready=None):
|
||||
if self.ready():
|
||||
return self.wait()
|
||||
return notready
|
||||
|
||||
# QQQ make it return tuple (type, value, tb) instead of raising
|
||||
# because
|
||||
# 1) "poll" does not imply raising
|
||||
# 2) it's better not to screw up caller's sys.exc_info() by default
|
||||
# (e.g. if caller wants to calls the function in except or finally)
|
||||
def poll_exception(self, notready=None):
|
||||
if self.has_exception():
|
||||
return self.wait()
|
||||
return notready
|
||||
|
||||
def poll_result(self, notready=None):
|
||||
if self.has_result():
|
||||
return self.wait()
|
||||
return notready
|
||||
|
||||
def wait(self):
|
||||
"""Wait until another coroutine calls :meth:`send`.
|
||||
Returns the value the other coroutine passed to
|
||||
:meth:`send`.
|
||||
|
||||
>>> from eventlet import event
|
||||
>>> import eventlet
|
||||
>>> evt = event.Event()
|
||||
>>> def wait_on():
|
||||
... retval = evt.wait()
|
||||
... print("waited for {0}".format(retval))
|
||||
>>> _ = eventlet.spawn(wait_on)
|
||||
>>> evt.send('result')
|
||||
>>> eventlet.sleep(0)
|
||||
waited for result
|
||||
|
||||
Returns immediately if the event has already
|
||||
occurred.
|
||||
|
||||
>>> evt.wait()
|
||||
'result'
|
||||
"""
|
||||
current = greenlet.getcurrent()
|
||||
if self._result is NOT_USED:
|
||||
self._waiters.add(current)
|
||||
try:
|
||||
return hubs.get_hub().switch()
|
||||
finally:
|
||||
self._waiters.discard(current)
|
||||
if self._exc is not None:
|
||||
current.throw(*self._exc)
|
||||
return self._result
|
||||
|
||||
def send(self, result=None, exc=None):
|
||||
"""Makes arrangements for the waiters to be woken with the
|
||||
result and then returns immediately to the parent.
|
||||
|
||||
>>> from eventlet import event
|
||||
>>> import eventlet
|
||||
>>> evt = event.Event()
|
||||
>>> def waiter():
|
||||
... print('about to wait')
|
||||
... result = evt.wait()
|
||||
... print('waited for {0}'.format(result))
|
||||
>>> _ = eventlet.spawn(waiter)
|
||||
>>> eventlet.sleep(0)
|
||||
about to wait
|
||||
>>> evt.send('a')
|
||||
>>> eventlet.sleep(0)
|
||||
waited for a
|
||||
|
||||
It is an error to call :meth:`send` multiple times on the same event.
|
||||
|
||||
>>> evt.send('whoops')
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError: Trying to re-send() an already-triggered event.
|
||||
|
||||
Use :meth:`reset` between :meth:`send` s to reuse an event object.
|
||||
"""
|
||||
assert self._result is NOT_USED, 'Trying to re-send() an already-triggered event.'
|
||||
self._result = result
|
||||
if exc is not None and not isinstance(exc, tuple):
|
||||
exc = (exc, )
|
||||
self._exc = exc
|
||||
hub = hubs.get_hub()
|
||||
for waiter in self._waiters:
|
||||
hub.schedule_call_global(
|
||||
0, self._do_send, self._result, self._exc, waiter)
|
||||
|
||||
def _do_send(self, result, exc, waiter):
|
||||
if waiter in self._waiters:
|
||||
if exc is None:
|
||||
waiter.switch(result)
|
||||
else:
|
||||
waiter.throw(*exc)
|
||||
|
||||
def send_exception(self, *args):
|
||||
"""Same as :meth:`send`, but sends an exception to waiters.
|
||||
|
||||
The arguments to send_exception are the same as the arguments
|
||||
to ``raise``. If a single exception object is passed in, it
|
||||
will be re-raised when :meth:`wait` is called, generating a
|
||||
new stacktrace.
|
||||
|
||||
>>> from eventlet import event
|
||||
>>> evt = event.Event()
|
||||
>>> evt.send_exception(RuntimeError())
|
||||
>>> evt.wait()
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in <module>
|
||||
File "eventlet/event.py", line 120, in wait
|
||||
current.throw(*self._exc)
|
||||
RuntimeError
|
||||
|
||||
If it's important to preserve the entire original stack trace,
|
||||
you must pass in the entire :func:`sys.exc_info` tuple.
|
||||
|
||||
>>> import sys
|
||||
>>> evt = event.Event()
|
||||
>>> try:
|
||||
... raise RuntimeError()
|
||||
... except RuntimeError:
|
||||
... evt.send_exception(*sys.exc_info())
|
||||
...
|
||||
>>> evt.wait()
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in <module>
|
||||
File "eventlet/event.py", line 120, in wait
|
||||
current.throw(*self._exc)
|
||||
File "<stdin>", line 2, in <module>
|
||||
RuntimeError
|
||||
|
||||
Note that doing so stores a traceback object directly on the
|
||||
Event object, which may cause reference cycles. See the
|
||||
:func:`sys.exc_info` documentation.
|
||||
"""
|
||||
# the arguments and the same as for greenlet.throw
|
||||
return self.send(None, args)
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/event.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/event.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,16 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import SocketServer
|
||||
from eventlet.support import six
|
||||
|
||||
patcher.inject(
|
||||
'BaseHTTPServer' if six.PY2 else 'http.server',
|
||||
globals(),
|
||||
('socket', socket),
|
||||
('SocketServer', SocketServer),
|
||||
('socketserver', SocketServer))
|
||||
|
||||
del patcher
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
Binary file not shown.
@@ -0,0 +1,19 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import BaseHTTPServer
|
||||
from eventlet.green import SimpleHTTPServer
|
||||
from eventlet.green import urllib
|
||||
from eventlet.green import select
|
||||
|
||||
test = None # bind prior to patcher.inject to silence pyflakes warning below
|
||||
patcher.inject(
|
||||
'CGIHTTPServer',
|
||||
globals(),
|
||||
('BaseHTTPServer', BaseHTTPServer),
|
||||
('SimpleHTTPServer', SimpleHTTPServer),
|
||||
('urllib', urllib),
|
||||
('select', select))
|
||||
|
||||
del patcher
|
||||
|
||||
if __name__ == '__main__':
|
||||
test() # pyflakes false alarm here unless test = None above
|
||||
Binary file not shown.
37
venv/lib/python2.7/site-packages/eventlet/green/MySQLdb.py
Normal file
37
venv/lib/python2.7/site-packages/eventlet/green/MySQLdb.py
Normal file
@@ -0,0 +1,37 @@
|
||||
__MySQLdb = __import__('MySQLdb')
|
||||
|
||||
__all__ = __MySQLdb.__all__
|
||||
__patched__ = ["connect", "Connect", 'Connection', 'connections']
|
||||
|
||||
from eventlet.patcher import slurp_properties
|
||||
slurp_properties(
|
||||
__MySQLdb, globals(),
|
||||
ignore=__patched__, srckeys=dir(__MySQLdb))
|
||||
|
||||
from eventlet import tpool
|
||||
|
||||
__orig_connections = __import__('MySQLdb.connections').connections
|
||||
|
||||
|
||||
def Connection(*args, **kw):
|
||||
conn = tpool.execute(__orig_connections.Connection, *args, **kw)
|
||||
return tpool.Proxy(conn, autowrap_names=('cursor',))
|
||||
connect = Connect = Connection
|
||||
|
||||
|
||||
# replicate the MySQLdb.connections module but with a tpooled Connection factory
|
||||
class MySQLdbConnectionsModule(object):
|
||||
pass
|
||||
|
||||
connections = MySQLdbConnectionsModule()
|
||||
for var in dir(__orig_connections):
|
||||
if not var.startswith('__'):
|
||||
setattr(connections, var, getattr(__orig_connections, var))
|
||||
connections.Connection = Connection
|
||||
|
||||
cursors = __import__('MySQLdb.cursors').cursors
|
||||
converters = __import__('MySQLdb.converters').converters
|
||||
|
||||
# TODO support instantiating cursors.FooCursor objects directly
|
||||
# TODO though this is a low priority, it would be nice if we supported
|
||||
# subclassing eventlet.green.MySQLdb.connections.Connection
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/MySQLdb.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/MySQLdb.pyc
Normal file
Binary file not shown.
124
venv/lib/python2.7/site-packages/eventlet/green/OpenSSL/SSL.py
Normal file
124
venv/lib/python2.7/site-packages/eventlet/green/OpenSSL/SSL.py
Normal file
@@ -0,0 +1,124 @@
|
||||
from OpenSSL import SSL as orig_SSL
|
||||
from OpenSSL.SSL import *
|
||||
from eventlet.support import get_errno
|
||||
from eventlet import greenio
|
||||
from eventlet.hubs import trampoline
|
||||
import socket
|
||||
|
||||
|
||||
class GreenConnection(greenio.GreenSocket):
|
||||
""" Nonblocking wrapper for SSL.Connection objects.
|
||||
"""
|
||||
|
||||
def __init__(self, ctx, sock=None):
|
||||
if sock is not None:
|
||||
fd = orig_SSL.Connection(ctx, sock)
|
||||
else:
|
||||
# if we're given a Connection object directly, use it;
|
||||
# this is used in the inherited accept() method
|
||||
fd = ctx
|
||||
super(ConnectionType, self).__init__(fd)
|
||||
|
||||
def do_handshake(self):
|
||||
""" Perform an SSL handshake (usually called after renegotiate or one of
|
||||
set_accept_state or set_accept_state). This can raise the same exceptions as
|
||||
send and recv. """
|
||||
if self.act_non_blocking:
|
||||
return self.fd.do_handshake()
|
||||
while True:
|
||||
try:
|
||||
return self.fd.do_handshake()
|
||||
except WantReadError:
|
||||
trampoline(self.fd.fileno(),
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except WantWriteError:
|
||||
trampoline(self.fd.fileno(),
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
|
||||
def dup(self):
|
||||
raise NotImplementedError("Dup not supported on SSL sockets")
|
||||
|
||||
def makefile(self, mode='r', bufsize=-1):
|
||||
raise NotImplementedError("Makefile not supported on SSL sockets")
|
||||
|
||||
def read(self, size):
|
||||
"""Works like a blocking call to SSL_read(), whose behavior is
|
||||
described here: http://www.openssl.org/docs/ssl/SSL_read.html"""
|
||||
if self.act_non_blocking:
|
||||
return self.fd.read(size)
|
||||
while True:
|
||||
try:
|
||||
return self.fd.read(size)
|
||||
except WantReadError:
|
||||
trampoline(self.fd.fileno(),
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except WantWriteError:
|
||||
trampoline(self.fd.fileno(),
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except SysCallError as e:
|
||||
if get_errno(e) == -1 or get_errno(e) > 0:
|
||||
return ''
|
||||
|
||||
recv = read
|
||||
|
||||
def write(self, data):
|
||||
"""Works like a blocking call to SSL_write(), whose behavior is
|
||||
described here: http://www.openssl.org/docs/ssl/SSL_write.html"""
|
||||
if not data:
|
||||
return 0 # calling SSL_write() with 0 bytes to be sent is undefined
|
||||
if self.act_non_blocking:
|
||||
return self.fd.write(data)
|
||||
while True:
|
||||
try:
|
||||
return self.fd.write(data)
|
||||
except WantReadError:
|
||||
trampoline(self.fd.fileno(),
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except WantWriteError:
|
||||
trampoline(self.fd.fileno(),
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
|
||||
send = write
|
||||
|
||||
def sendall(self, data):
|
||||
"""Send "all" data on the connection. This calls send() repeatedly until
|
||||
all data is sent. If an error occurs, it's impossible to tell how much data
|
||||
has been sent.
|
||||
|
||||
No return value."""
|
||||
tail = self.send(data)
|
||||
while tail < len(data):
|
||||
tail += self.send(data[tail:])
|
||||
|
||||
def shutdown(self):
|
||||
if self.act_non_blocking:
|
||||
return self.fd.shutdown()
|
||||
while True:
|
||||
try:
|
||||
return self.fd.shutdown()
|
||||
except WantReadError:
|
||||
trampoline(self.fd.fileno(),
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
except WantWriteError:
|
||||
trampoline(self.fd.fileno(),
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=socket.timeout)
|
||||
|
||||
Connection = ConnectionType = GreenConnection
|
||||
|
||||
del greenio
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/OpenSSL/SSL.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/OpenSSL/SSL.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,5 @@
|
||||
from . import rand
|
||||
from . import crypto
|
||||
from . import SSL
|
||||
from . import tsafe
|
||||
from .version import __version__
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
from OpenSSL.crypto import *
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
from OpenSSL.rand import *
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/OpenSSL/rand.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/OpenSSL/rand.pyc
Normal file
Binary file not shown.
@@ -0,0 +1 @@
|
||||
from OpenSSL.tsafe import *
|
||||
Binary file not shown.
@@ -0,0 +1 @@
|
||||
from OpenSSL.version import __version__, __doc__
|
||||
Binary file not shown.
32
venv/lib/python2.7/site-packages/eventlet/green/Queue.py
Normal file
32
venv/lib/python2.7/site-packages/eventlet/green/Queue.py
Normal file
@@ -0,0 +1,32 @@
|
||||
from eventlet import queue
|
||||
|
||||
__all__ = ['Empty', 'Full', 'LifoQueue', 'PriorityQueue', 'Queue']
|
||||
|
||||
__patched__ = ['LifoQueue', 'PriorityQueue', 'Queue']
|
||||
|
||||
# these classes exist to paper over the major operational difference between
|
||||
# eventlet.queue.Queue and the stdlib equivalents
|
||||
|
||||
|
||||
class Queue(queue.Queue):
|
||||
def __init__(self, maxsize=0):
|
||||
if maxsize == 0:
|
||||
maxsize = None
|
||||
super(Queue, self).__init__(maxsize)
|
||||
|
||||
|
||||
class PriorityQueue(queue.PriorityQueue):
|
||||
def __init__(self, maxsize=0):
|
||||
if maxsize == 0:
|
||||
maxsize = None
|
||||
super(PriorityQueue, self).__init__(maxsize)
|
||||
|
||||
|
||||
class LifoQueue(queue.LifoQueue):
|
||||
def __init__(self, maxsize=0):
|
||||
if maxsize == 0:
|
||||
maxsize = None
|
||||
super(LifoQueue, self).__init__(maxsize)
|
||||
|
||||
Empty = queue.Empty
|
||||
Full = queue.Full
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/Queue.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/Queue.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,14 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import BaseHTTPServer
|
||||
from eventlet.green import urllib
|
||||
|
||||
patcher.inject(
|
||||
'SimpleHTTPServer',
|
||||
globals(),
|
||||
('BaseHTTPServer', BaseHTTPServer),
|
||||
('urllib', urllib))
|
||||
|
||||
del patcher
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
Binary file not shown.
@@ -0,0 +1,15 @@
|
||||
from eventlet import patcher
|
||||
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import select
|
||||
from eventlet.green import threading
|
||||
from eventlet.support import six
|
||||
|
||||
patcher.inject(
|
||||
'SocketServer' if six.PY2 else 'socketserver',
|
||||
globals(),
|
||||
('socket', socket),
|
||||
('select', select),
|
||||
('threading', threading))
|
||||
|
||||
# QQQ ForkingMixIn should be fixed to use green waitpid?
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/SocketServer.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/SocketServer.pyc
Normal file
Binary file not shown.
@@ -0,0 +1 @@
|
||||
# this package contains modules from the standard library converted to use eventlet
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/__init__.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/__init__.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,33 @@
|
||||
__socket = __import__('socket')
|
||||
|
||||
__all__ = __socket.__all__
|
||||
__patched__ = ['fromfd', 'socketpair', 'ssl', 'socket', 'timeout']
|
||||
|
||||
import eventlet.patcher
|
||||
eventlet.patcher.slurp_properties(__socket, globals(), ignore=__patched__, srckeys=dir(__socket))
|
||||
|
||||
os = __import__('os')
|
||||
import sys
|
||||
from eventlet import greenio
|
||||
|
||||
|
||||
socket = greenio.GreenSocket
|
||||
_GLOBAL_DEFAULT_TIMEOUT = greenio._GLOBAL_DEFAULT_TIMEOUT
|
||||
timeout = greenio.socket_timeout
|
||||
|
||||
try:
|
||||
__original_fromfd__ = __socket.fromfd
|
||||
|
||||
def fromfd(*args):
|
||||
return socket(__original_fromfd__(*args))
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
__original_socketpair__ = __socket.socketpair
|
||||
|
||||
def socketpair(*args):
|
||||
one, two = __original_socketpair__(*args)
|
||||
return socket(one), socket(two)
|
||||
except AttributeError:
|
||||
pass
|
||||
Binary file not shown.
11
venv/lib/python2.7/site-packages/eventlet/green/asynchat.py
Normal file
11
venv/lib/python2.7/site-packages/eventlet/green/asynchat.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import asyncore
|
||||
from eventlet.green import socket
|
||||
|
||||
patcher.inject(
|
||||
'asynchat',
|
||||
globals(),
|
||||
('asyncore', asyncore),
|
||||
('socket', socket))
|
||||
|
||||
del patcher
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/asynchat.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/asynchat.pyc
Normal file
Binary file not shown.
13
venv/lib/python2.7/site-packages/eventlet/green/asyncore.py
Normal file
13
venv/lib/python2.7/site-packages/eventlet/green/asyncore.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import select
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import time
|
||||
|
||||
patcher.inject(
|
||||
"asyncore",
|
||||
globals(),
|
||||
('select', select),
|
||||
('socket', socket),
|
||||
('time', time))
|
||||
|
||||
del patcher
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/asyncore.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/asyncore.pyc
Normal file
Binary file not shown.
47
venv/lib/python2.7/site-packages/eventlet/green/builtin.py
Normal file
47
venv/lib/python2.7/site-packages/eventlet/green/builtin.py
Normal file
@@ -0,0 +1,47 @@
|
||||
"""
|
||||
In order to detect a filehandle that's been closed, our only clue may be
|
||||
the operating system returning the same filehandle in response to some
|
||||
other operation.
|
||||
|
||||
The builtins 'file' and 'open' are patched to collaborate with the
|
||||
notify_opened protocol.
|
||||
"""
|
||||
|
||||
builtins_orig = __builtins__
|
||||
|
||||
from eventlet import hubs
|
||||
from eventlet.hubs import hub
|
||||
from eventlet.patcher import slurp_properties
|
||||
import sys
|
||||
|
||||
__all__ = dir(builtins_orig)
|
||||
__patched__ = ['file', 'open']
|
||||
|
||||
slurp_properties(builtins_orig, globals(),
|
||||
ignore=__patched__, srckeys=dir(builtins_orig))
|
||||
|
||||
hubs.get_hub()
|
||||
|
||||
__original_file = file
|
||||
|
||||
|
||||
class file(__original_file):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(file, self).__init__(*args, **kwargs)
|
||||
hubs.notify_opened(self.fileno())
|
||||
|
||||
__original_open = open
|
||||
__opening = False
|
||||
|
||||
|
||||
def open(*args):
|
||||
global __opening
|
||||
result = __original_open(*args)
|
||||
if not __opening:
|
||||
# This is incredibly ugly. 'open' is used under the hood by
|
||||
# the import process. So, ensure we don't wind up in an
|
||||
# infinite loop.
|
||||
__opening = True
|
||||
hubs.notify_opened(result.fileno())
|
||||
__opening = False
|
||||
return result
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/builtin.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/builtin.pyc
Normal file
Binary file not shown.
13
venv/lib/python2.7/site-packages/eventlet/green/ftplib.py
Normal file
13
venv/lib/python2.7/site-packages/eventlet/green/ftplib.py
Normal file
@@ -0,0 +1,13 @@
|
||||
from eventlet import patcher
|
||||
|
||||
# *NOTE: there might be some funny business with the "SOCKS" module
|
||||
# if it even still exists
|
||||
from eventlet.green import socket
|
||||
|
||||
patcher.inject('ftplib', globals(), ('socket', socket))
|
||||
|
||||
del patcher
|
||||
|
||||
# Run test program when run as a script
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/ftplib.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/ftplib.pyc
Normal file
Binary file not shown.
191
venv/lib/python2.7/site-packages/eventlet/green/http/__init__.py
Normal file
191
venv/lib/python2.7/site-packages/eventlet/green/http/__init__.py
Normal file
@@ -0,0 +1,191 @@
|
||||
# This is part of Python source code with Eventlet-specific modifications.
|
||||
#
|
||||
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
|
||||
# Reserved
|
||||
#
|
||||
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
# --------------------------------------------
|
||||
#
|
||||
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
# otherwise using this software ("Python") in source or binary form and
|
||||
# its associated documentation.
|
||||
#
|
||||
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
# analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
# distribute, and otherwise use Python alone or in any derivative version,
|
||||
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
|
||||
# Reserved" are retained in Python alone or in any derivative version prepared by
|
||||
# Licensee.
|
||||
#
|
||||
# 3. In the event Licensee prepares a derivative work that is based on
|
||||
# or incorporates Python or any part thereof, and wants to make
|
||||
# the derivative work available to others as provided herein, then
|
||||
# Licensee hereby agrees to include in any such work a brief summary of
|
||||
# the changes made to Python.
|
||||
#
|
||||
# 4. PSF is making Python available to Licensee on an "AS IS"
|
||||
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
# INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
#
|
||||
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
#
|
||||
# 6. This License Agreement will automatically terminate upon a material
|
||||
# breach of its terms and conditions.
|
||||
#
|
||||
# 7. Nothing in this License Agreement shall be deemed to create any
|
||||
# relationship of agency, partnership, or joint venture between PSF and
|
||||
# Licensee. This License Agreement does not grant permission to use PSF
|
||||
# trademarks or trade name in a trademark sense to endorse or promote
|
||||
# products or services of Licensee, or any third party.
|
||||
#
|
||||
# 8. By copying, installing or otherwise using Python, Licensee
|
||||
# agrees to be bound by the terms and conditions of this License
|
||||
# Agreement.
|
||||
from eventlet.support import six
|
||||
assert six.PY3, 'This is a Python 3 module'
|
||||
|
||||
from enum import IntEnum
|
||||
|
||||
__all__ = ['HTTPStatus']
|
||||
|
||||
class HTTPStatus(IntEnum):
|
||||
"""HTTP status codes and reason phrases
|
||||
|
||||
Status codes from the following RFCs are all observed:
|
||||
|
||||
* RFC 7231: Hypertext Transfer Protocol (HTTP/1.1), obsoletes 2616
|
||||
* RFC 6585: Additional HTTP Status Codes
|
||||
* RFC 3229: Delta encoding in HTTP
|
||||
* RFC 4918: HTTP Extensions for WebDAV, obsoletes 2518
|
||||
* RFC 5842: Binding Extensions to WebDAV
|
||||
* RFC 7238: Permanent Redirect
|
||||
* RFC 2295: Transparent Content Negotiation in HTTP
|
||||
* RFC 2774: An HTTP Extension Framework
|
||||
"""
|
||||
def __new__(cls, value, phrase, description=''):
|
||||
obj = int.__new__(cls, value)
|
||||
obj._value_ = value
|
||||
|
||||
obj.phrase = phrase
|
||||
obj.description = description
|
||||
return obj
|
||||
|
||||
# informational
|
||||
CONTINUE = 100, 'Continue', 'Request received, please continue'
|
||||
SWITCHING_PROTOCOLS = (101, 'Switching Protocols',
|
||||
'Switching to new protocol; obey Upgrade header')
|
||||
PROCESSING = 102, 'Processing'
|
||||
|
||||
# success
|
||||
OK = 200, 'OK', 'Request fulfilled, document follows'
|
||||
CREATED = 201, 'Created', 'Document created, URL follows'
|
||||
ACCEPTED = (202, 'Accepted',
|
||||
'Request accepted, processing continues off-line')
|
||||
NON_AUTHORITATIVE_INFORMATION = (203,
|
||||
'Non-Authoritative Information', 'Request fulfilled from cache')
|
||||
NO_CONTENT = 204, 'No Content', 'Request fulfilled, nothing follows'
|
||||
RESET_CONTENT = 205, 'Reset Content', 'Clear input form for further input'
|
||||
PARTIAL_CONTENT = 206, 'Partial Content', 'Partial content follows'
|
||||
MULTI_STATUS = 207, 'Multi-Status'
|
||||
ALREADY_REPORTED = 208, 'Already Reported'
|
||||
IM_USED = 226, 'IM Used'
|
||||
|
||||
# redirection
|
||||
MULTIPLE_CHOICES = (300, 'Multiple Choices',
|
||||
'Object has several resources -- see URI list')
|
||||
MOVED_PERMANENTLY = (301, 'Moved Permanently',
|
||||
'Object moved permanently -- see URI list')
|
||||
FOUND = 302, 'Found', 'Object moved temporarily -- see URI list'
|
||||
SEE_OTHER = 303, 'See Other', 'Object moved -- see Method and URL list'
|
||||
NOT_MODIFIED = (304, 'Not Modified',
|
||||
'Document has not changed since given time')
|
||||
USE_PROXY = (305, 'Use Proxy',
|
||||
'You must use proxy specified in Location to access this resource')
|
||||
TEMPORARY_REDIRECT = (307, 'Temporary Redirect',
|
||||
'Object moved temporarily -- see URI list')
|
||||
PERMANENT_REDIRECT = (308, 'Permanent Redirect',
|
||||
'Object moved temporarily -- see URI list')
|
||||
|
||||
# client error
|
||||
BAD_REQUEST = (400, 'Bad Request',
|
||||
'Bad request syntax or unsupported method')
|
||||
UNAUTHORIZED = (401, 'Unauthorized',
|
||||
'No permission -- see authorization schemes')
|
||||
PAYMENT_REQUIRED = (402, 'Payment Required',
|
||||
'No payment -- see charging schemes')
|
||||
FORBIDDEN = (403, 'Forbidden',
|
||||
'Request forbidden -- authorization will not help')
|
||||
NOT_FOUND = (404, 'Not Found',
|
||||
'Nothing matches the given URI')
|
||||
METHOD_NOT_ALLOWED = (405, 'Method Not Allowed',
|
||||
'Specified method is invalid for this resource')
|
||||
NOT_ACCEPTABLE = (406, 'Not Acceptable',
|
||||
'URI not available in preferred format')
|
||||
PROXY_AUTHENTICATION_REQUIRED = (407,
|
||||
'Proxy Authentication Required',
|
||||
'You must authenticate with this proxy before proceeding')
|
||||
REQUEST_TIMEOUT = (408, 'Request Timeout',
|
||||
'Request timed out; try again later')
|
||||
CONFLICT = 409, 'Conflict', 'Request conflict'
|
||||
GONE = (410, 'Gone',
|
||||
'URI no longer exists and has been permanently removed')
|
||||
LENGTH_REQUIRED = (411, 'Length Required',
|
||||
'Client must specify Content-Length')
|
||||
PRECONDITION_FAILED = (412, 'Precondition Failed',
|
||||
'Precondition in headers is false')
|
||||
REQUEST_ENTITY_TOO_LARGE = (413, 'Request Entity Too Large',
|
||||
'Entity is too large')
|
||||
REQUEST_URI_TOO_LONG = (414, 'Request-URI Too Long',
|
||||
'URI is too long')
|
||||
UNSUPPORTED_MEDIA_TYPE = (415, 'Unsupported Media Type',
|
||||
'Entity body in unsupported format')
|
||||
REQUESTED_RANGE_NOT_SATISFIABLE = (416,
|
||||
'Requested Range Not Satisfiable',
|
||||
'Cannot satisfy request range')
|
||||
EXPECTATION_FAILED = (417, 'Expectation Failed',
|
||||
'Expect condition could not be satisfied')
|
||||
UNPROCESSABLE_ENTITY = 422, 'Unprocessable Entity'
|
||||
LOCKED = 423, 'Locked'
|
||||
FAILED_DEPENDENCY = 424, 'Failed Dependency'
|
||||
UPGRADE_REQUIRED = 426, 'Upgrade Required'
|
||||
PRECONDITION_REQUIRED = (428, 'Precondition Required',
|
||||
'The origin server requires the request to be conditional')
|
||||
TOO_MANY_REQUESTS = (429, 'Too Many Requests',
|
||||
'The user has sent too many requests in '
|
||||
'a given amount of time ("rate limiting")')
|
||||
REQUEST_HEADER_FIELDS_TOO_LARGE = (431,
|
||||
'Request Header Fields Too Large',
|
||||
'The server is unwilling to process the request because its header '
|
||||
'fields are too large')
|
||||
|
||||
# server errors
|
||||
INTERNAL_SERVER_ERROR = (500, 'Internal Server Error',
|
||||
'Server got itself in trouble')
|
||||
NOT_IMPLEMENTED = (501, 'Not Implemented',
|
||||
'Server does not support this operation')
|
||||
BAD_GATEWAY = (502, 'Bad Gateway',
|
||||
'Invalid responses from another server/proxy')
|
||||
SERVICE_UNAVAILABLE = (503, 'Service Unavailable',
|
||||
'The server cannot process the request due to a high load')
|
||||
GATEWAY_TIMEOUT = (504, 'Gateway Timeout',
|
||||
'The gateway server did not receive a timely response')
|
||||
HTTP_VERSION_NOT_SUPPORTED = (505, 'HTTP Version Not Supported',
|
||||
'Cannot fulfill request')
|
||||
VARIANT_ALSO_NEGOTIATES = 506, 'Variant Also Negotiates'
|
||||
INSUFFICIENT_STORAGE = 507, 'Insufficient Storage'
|
||||
LOOP_DETECTED = 508, 'Loop Detected'
|
||||
NOT_EXTENDED = 510, 'Not Extended'
|
||||
NETWORK_AUTHENTICATION_REQUIRED = (511,
|
||||
'Network Authentication Required',
|
||||
'The client needs to authenticate to gain network access')
|
||||
Binary file not shown.
1557
venv/lib/python2.7/site-packages/eventlet/green/http/client.py
Normal file
1557
venv/lib/python2.7/site-packages/eventlet/green/http/client.py
Normal file
File diff suppressed because it is too large
Load Diff
2152
venv/lib/python2.7/site-packages/eventlet/green/http/cookiejar.py
Normal file
2152
venv/lib/python2.7/site-packages/eventlet/green/http/cookiejar.py
Normal file
File diff suppressed because it is too large
Load Diff
691
venv/lib/python2.7/site-packages/eventlet/green/http/cookies.py
Normal file
691
venv/lib/python2.7/site-packages/eventlet/green/http/cookies.py
Normal file
@@ -0,0 +1,691 @@
|
||||
# This is part of Python source code with Eventlet-specific modifications.
|
||||
#
|
||||
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
|
||||
# Reserved
|
||||
#
|
||||
# PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
# --------------------------------------------
|
||||
#
|
||||
# 1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
# ("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
# otherwise using this software ("Python") in source or binary form and
|
||||
# its associated documentation.
|
||||
#
|
||||
# 2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
# grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
# analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
# distribute, and otherwise use Python alone or in any derivative version,
|
||||
# provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
# i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
# 2011, 2012, 2013, 2014, 2015, 2016 Python Software Foundation; All Rights
|
||||
# Reserved" are retained in Python alone or in any derivative version prepared by
|
||||
# Licensee.
|
||||
#
|
||||
# 3. In the event Licensee prepares a derivative work that is based on
|
||||
# or incorporates Python or any part thereof, and wants to make
|
||||
# the derivative work available to others as provided herein, then
|
||||
# Licensee hereby agrees to include in any such work a brief summary of
|
||||
# the changes made to Python.
|
||||
#
|
||||
# 4. PSF is making Python available to Licensee on an "AS IS"
|
||||
# basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
# IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
# DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
# FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
# INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
#
|
||||
# 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
# FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
# A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
# OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
#
|
||||
# 6. This License Agreement will automatically terminate upon a material
|
||||
# breach of its terms and conditions.
|
||||
#
|
||||
# 7. Nothing in this License Agreement shall be deemed to create any
|
||||
# relationship of agency, partnership, or joint venture between PSF and
|
||||
# Licensee. This License Agreement does not grant permission to use PSF
|
||||
# trademarks or trade name in a trademark sense to endorse or promote
|
||||
# products or services of Licensee, or any third party.
|
||||
#
|
||||
# 8. By copying, installing or otherwise using Python, Licensee
|
||||
# agrees to be bound by the terms and conditions of this License
|
||||
# Agreement.
|
||||
####
|
||||
# Copyright 2000 by Timothy O'Malley <timo@alum.mit.edu>
|
||||
#
|
||||
# All Rights Reserved
|
||||
#
|
||||
# Permission to use, copy, modify, and distribute this software
|
||||
# and its documentation for any purpose and without fee is hereby
|
||||
# granted, provided that the above copyright notice appear in all
|
||||
# copies and that both that copyright notice and this permission
|
||||
# notice appear in supporting documentation, and that the name of
|
||||
# Timothy O'Malley not be used in advertising or publicity
|
||||
# pertaining to distribution of the software without specific, written
|
||||
# prior permission.
|
||||
#
|
||||
# Timothy O'Malley DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
|
||||
# SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
|
||||
# AND FITNESS, IN NO EVENT SHALL Timothy O'Malley BE LIABLE FOR
|
||||
# ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
|
||||
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
# PERFORMANCE OF THIS SOFTWARE.
|
||||
#
|
||||
####
|
||||
#
|
||||
# Id: Cookie.py,v 2.29 2000/08/23 05:28:49 timo Exp
|
||||
# by Timothy O'Malley <timo@alum.mit.edu>
|
||||
#
|
||||
# Cookie.py is a Python module for the handling of HTTP
|
||||
# cookies as a Python dictionary. See RFC 2109 for more
|
||||
# information on cookies.
|
||||
#
|
||||
# The original idea to treat Cookies as a dictionary came from
|
||||
# Dave Mitchell (davem@magnet.com) in 1995, when he released the
|
||||
# first version of nscookie.py.
|
||||
#
|
||||
####
|
||||
|
||||
r"""
|
||||
Here's a sample session to show how to use this module.
|
||||
At the moment, this is the only documentation.
|
||||
|
||||
The Basics
|
||||
----------
|
||||
|
||||
Importing is easy...
|
||||
|
||||
>>> from http import cookies
|
||||
|
||||
Most of the time you start by creating a cookie.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
|
||||
Once you've created your Cookie, you can add values just as if it were
|
||||
a dictionary.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["fig"] = "newton"
|
||||
>>> C["sugar"] = "wafer"
|
||||
>>> C.output()
|
||||
'Set-Cookie: fig=newton\r\nSet-Cookie: sugar=wafer'
|
||||
|
||||
Notice that the printable representation of a Cookie is the
|
||||
appropriate format for a Set-Cookie: header. This is the
|
||||
default behavior. You can change the header and printed
|
||||
attributes by using the .output() function
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["rocky"] = "road"
|
||||
>>> C["rocky"]["path"] = "/cookie"
|
||||
>>> print(C.output(header="Cookie:"))
|
||||
Cookie: rocky=road; Path=/cookie
|
||||
>>> print(C.output(attrs=[], header="Cookie:"))
|
||||
Cookie: rocky=road
|
||||
|
||||
The load() method of a Cookie extracts cookies from a string. In a
|
||||
CGI script, you would use this method to extract the cookies from the
|
||||
HTTP_COOKIE environment variable.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C.load("chips=ahoy; vienna=finger")
|
||||
>>> C.output()
|
||||
'Set-Cookie: chips=ahoy\r\nSet-Cookie: vienna=finger'
|
||||
|
||||
The load() method is darn-tootin smart about identifying cookies
|
||||
within a string. Escaped quotation marks, nested semicolons, and other
|
||||
such trickeries do not confuse it.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C.load('keebler="E=everybody; L=\\"Loves\\"; fudge=\\012;";')
|
||||
>>> print(C)
|
||||
Set-Cookie: keebler="E=everybody; L=\"Loves\"; fudge=\012;"
|
||||
|
||||
Each element of the Cookie also supports all of the RFC 2109
|
||||
Cookie attributes. Here's an example which sets the Path
|
||||
attribute.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["oreo"] = "doublestuff"
|
||||
>>> C["oreo"]["path"] = "/"
|
||||
>>> print(C)
|
||||
Set-Cookie: oreo=doublestuff; Path=/
|
||||
|
||||
Each dictionary element has a 'value' attribute, which gives you
|
||||
back the value associated with the key.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["twix"] = "none for you"
|
||||
>>> C["twix"].value
|
||||
'none for you'
|
||||
|
||||
The SimpleCookie expects that all values should be standard strings.
|
||||
Just to be sure, SimpleCookie invokes the str() builtin to convert
|
||||
the value to a string, when the values are set dictionary-style.
|
||||
|
||||
>>> C = cookies.SimpleCookie()
|
||||
>>> C["number"] = 7
|
||||
>>> C["string"] = "seven"
|
||||
>>> C["number"].value
|
||||
'7'
|
||||
>>> C["string"].value
|
||||
'seven'
|
||||
>>> C.output()
|
||||
'Set-Cookie: number=7\r\nSet-Cookie: string=seven'
|
||||
|
||||
Finis.
|
||||
"""
|
||||
|
||||
#
|
||||
# Import our required modules
|
||||
#
|
||||
import re
|
||||
import string
|
||||
|
||||
__all__ = ["CookieError", "BaseCookie", "SimpleCookie"]
|
||||
|
||||
_nulljoin = ''.join
|
||||
_semispacejoin = '; '.join
|
||||
_spacejoin = ' '.join
|
||||
|
||||
def _warn_deprecated_setter(setter):
|
||||
import warnings
|
||||
msg = ('The .%s setter is deprecated. The attribute will be read-only in '
|
||||
'future releases. Please use the set() method instead.' % setter)
|
||||
warnings.warn(msg, DeprecationWarning, stacklevel=3)
|
||||
|
||||
#
|
||||
# Define an exception visible to External modules
|
||||
#
|
||||
class CookieError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# These quoting routines conform to the RFC2109 specification, which in
|
||||
# turn references the character definitions from RFC2068. They provide
|
||||
# a two-way quoting algorithm. Any non-text character is translated
|
||||
# into a 4 character sequence: a forward-slash followed by the
|
||||
# three-digit octal equivalent of the character. Any '\' or '"' is
|
||||
# quoted with a preceding '\' slash.
|
||||
# Because of the way browsers really handle cookies (as opposed to what
|
||||
# the RFC says) we also encode "," and ";".
|
||||
#
|
||||
# These are taken from RFC2068 and RFC2109.
|
||||
# _LegalChars is the list of chars which don't require "'s
|
||||
# _Translator hash-table for fast quoting
|
||||
#
|
||||
_LegalChars = string.ascii_letters + string.digits + "!#$%&'*+-.^_`|~:"
|
||||
_UnescapedChars = _LegalChars + ' ()/<=>?@[]{}'
|
||||
|
||||
_Translator = {n: '\\%03o' % n
|
||||
for n in set(range(256)) - set(map(ord, _UnescapedChars))}
|
||||
_Translator.update({
|
||||
ord('"'): '\\"',
|
||||
ord('\\'): '\\\\',
|
||||
})
|
||||
|
||||
# Eventlet change: match used instead of fullmatch for Python 3.3 compatibility
|
||||
_is_legal_key = re.compile(r'[%s]+\Z' % re.escape(_LegalChars)).match
|
||||
|
||||
def _quote(str):
|
||||
r"""Quote a string for use in a cookie header.
|
||||
|
||||
If the string does not need to be double-quoted, then just return the
|
||||
string. Otherwise, surround the string in doublequotes and quote
|
||||
(with a \) special characters.
|
||||
"""
|
||||
if str is None or _is_legal_key(str):
|
||||
return str
|
||||
else:
|
||||
return '"' + str.translate(_Translator) + '"'
|
||||
|
||||
|
||||
_OctalPatt = re.compile(r"\\[0-3][0-7][0-7]")
|
||||
_QuotePatt = re.compile(r"[\\].")
|
||||
|
||||
def _unquote(str):
|
||||
# If there aren't any doublequotes,
|
||||
# then there can't be any special characters. See RFC 2109.
|
||||
if str is None or len(str) < 2:
|
||||
return str
|
||||
if str[0] != '"' or str[-1] != '"':
|
||||
return str
|
||||
|
||||
# We have to assume that we must decode this string.
|
||||
# Down to work.
|
||||
|
||||
# Remove the "s
|
||||
str = str[1:-1]
|
||||
|
||||
# Check for special sequences. Examples:
|
||||
# \012 --> \n
|
||||
# \" --> "
|
||||
#
|
||||
i = 0
|
||||
n = len(str)
|
||||
res = []
|
||||
while 0 <= i < n:
|
||||
o_match = _OctalPatt.search(str, i)
|
||||
q_match = _QuotePatt.search(str, i)
|
||||
if not o_match and not q_match: # Neither matched
|
||||
res.append(str[i:])
|
||||
break
|
||||
# else:
|
||||
j = k = -1
|
||||
if o_match:
|
||||
j = o_match.start(0)
|
||||
if q_match:
|
||||
k = q_match.start(0)
|
||||
if q_match and (not o_match or k < j): # QuotePatt matched
|
||||
res.append(str[i:k])
|
||||
res.append(str[k+1])
|
||||
i = k + 2
|
||||
else: # OctalPatt matched
|
||||
res.append(str[i:j])
|
||||
res.append(chr(int(str[j+1:j+4], 8)))
|
||||
i = j + 4
|
||||
return _nulljoin(res)
|
||||
|
||||
# The _getdate() routine is used to set the expiration time in the cookie's HTTP
|
||||
# header. By default, _getdate() returns the current time in the appropriate
|
||||
# "expires" format for a Set-Cookie header. The one optional argument is an
|
||||
# offset from now, in seconds. For example, an offset of -3600 means "one hour
|
||||
# ago". The offset may be a floating point number.
|
||||
#
|
||||
|
||||
_weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
|
||||
|
||||
_monthname = [None,
|
||||
'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
|
||||
'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
|
||||
|
||||
def _getdate(future=0, weekdayname=_weekdayname, monthname=_monthname):
|
||||
from eventlet.green.time import gmtime, time
|
||||
now = time()
|
||||
year, month, day, hh, mm, ss, wd, y, z = gmtime(now + future)
|
||||
return "%s, %02d %3s %4d %02d:%02d:%02d GMT" % \
|
||||
(weekdayname[wd], day, monthname[month], year, hh, mm, ss)
|
||||
|
||||
|
||||
class Morsel(dict):
|
||||
"""A class to hold ONE (key, value) pair.
|
||||
|
||||
In a cookie, each such pair may have several attributes, so this class is
|
||||
used to keep the attributes associated with the appropriate key,value pair.
|
||||
This class also includes a coded_value attribute, which is used to hold
|
||||
the network representation of the value. This is most useful when Python
|
||||
objects are pickled for network transit.
|
||||
"""
|
||||
# RFC 2109 lists these attributes as reserved:
|
||||
# path comment domain
|
||||
# max-age secure version
|
||||
#
|
||||
# For historical reasons, these attributes are also reserved:
|
||||
# expires
|
||||
#
|
||||
# This is an extension from Microsoft:
|
||||
# httponly
|
||||
#
|
||||
# This dictionary provides a mapping from the lowercase
|
||||
# variant on the left to the appropriate traditional
|
||||
# formatting on the right.
|
||||
_reserved = {
|
||||
"expires" : "expires",
|
||||
"path" : "Path",
|
||||
"comment" : "Comment",
|
||||
"domain" : "Domain",
|
||||
"max-age" : "Max-Age",
|
||||
"secure" : "Secure",
|
||||
"httponly" : "HttpOnly",
|
||||
"version" : "Version",
|
||||
}
|
||||
|
||||
_flags = {'secure', 'httponly'}
|
||||
|
||||
def __init__(self):
|
||||
# Set defaults
|
||||
self._key = self._value = self._coded_value = None
|
||||
|
||||
# Set default attributes
|
||||
for key in self._reserved:
|
||||
dict.__setitem__(self, key, "")
|
||||
|
||||
@property
|
||||
def key(self):
|
||||
return self._key
|
||||
|
||||
@key.setter
|
||||
def key(self, key):
|
||||
_warn_deprecated_setter('key')
|
||||
self._key = key
|
||||
|
||||
@property
|
||||
def value(self):
|
||||
return self._value
|
||||
|
||||
@value.setter
|
||||
def value(self, value):
|
||||
_warn_deprecated_setter('value')
|
||||
self._value = value
|
||||
|
||||
@property
|
||||
def coded_value(self):
|
||||
return self._coded_value
|
||||
|
||||
@coded_value.setter
|
||||
def coded_value(self, coded_value):
|
||||
_warn_deprecated_setter('coded_value')
|
||||
self._coded_value = coded_value
|
||||
|
||||
def __setitem__(self, K, V):
|
||||
K = K.lower()
|
||||
if not K in self._reserved:
|
||||
raise CookieError("Invalid attribute %r" % (K,))
|
||||
dict.__setitem__(self, K, V)
|
||||
|
||||
def setdefault(self, key, val=None):
|
||||
key = key.lower()
|
||||
if key not in self._reserved:
|
||||
raise CookieError("Invalid attribute %r" % (key,))
|
||||
return dict.setdefault(self, key, val)
|
||||
|
||||
def __eq__(self, morsel):
|
||||
if not isinstance(morsel, Morsel):
|
||||
return NotImplemented
|
||||
return (dict.__eq__(self, morsel) and
|
||||
self._value == morsel._value and
|
||||
self._key == morsel._key and
|
||||
self._coded_value == morsel._coded_value)
|
||||
|
||||
__ne__ = object.__ne__
|
||||
|
||||
def copy(self):
|
||||
morsel = Morsel()
|
||||
dict.update(morsel, self)
|
||||
morsel.__dict__.update(self.__dict__)
|
||||
return morsel
|
||||
|
||||
def update(self, values):
|
||||
data = {}
|
||||
for key, val in dict(values).items():
|
||||
key = key.lower()
|
||||
if key not in self._reserved:
|
||||
raise CookieError("Invalid attribute %r" % (key,))
|
||||
data[key] = val
|
||||
dict.update(self, data)
|
||||
|
||||
def isReservedKey(self, K):
|
||||
return K.lower() in self._reserved
|
||||
|
||||
def set(self, key, val, coded_val, LegalChars=_LegalChars):
|
||||
if LegalChars != _LegalChars:
|
||||
import warnings
|
||||
warnings.warn(
|
||||
'LegalChars parameter is deprecated, ignored and will '
|
||||
'be removed in future versions.', DeprecationWarning,
|
||||
stacklevel=2)
|
||||
|
||||
if key.lower() in self._reserved:
|
||||
raise CookieError('Attempt to set a reserved key %r' % (key,))
|
||||
if not _is_legal_key(key):
|
||||
raise CookieError('Illegal key %r' % (key,))
|
||||
|
||||
# It's a good key, so save it.
|
||||
self._key = key
|
||||
self._value = val
|
||||
self._coded_value = coded_val
|
||||
|
||||
def __getstate__(self):
|
||||
return {
|
||||
'key': self._key,
|
||||
'value': self._value,
|
||||
'coded_value': self._coded_value,
|
||||
}
|
||||
|
||||
def __setstate__(self, state):
|
||||
self._key = state['key']
|
||||
self._value = state['value']
|
||||
self._coded_value = state['coded_value']
|
||||
|
||||
def output(self, attrs=None, header="Set-Cookie:"):
|
||||
return "%s %s" % (header, self.OutputString(attrs))
|
||||
|
||||
__str__ = output
|
||||
|
||||
def __repr__(self):
|
||||
return '<%s: %s>' % (self.__class__.__name__, self.OutputString())
|
||||
|
||||
def js_output(self, attrs=None):
|
||||
# Print javascript
|
||||
return """
|
||||
<script type="text/javascript">
|
||||
<!-- begin hiding
|
||||
document.cookie = \"%s\";
|
||||
// end hiding -->
|
||||
</script>
|
||||
""" % (self.OutputString(attrs).replace('"', r'\"'))
|
||||
|
||||
def OutputString(self, attrs=None):
|
||||
# Build up our result
|
||||
#
|
||||
result = []
|
||||
append = result.append
|
||||
|
||||
# First, the key=value pair
|
||||
append("%s=%s" % (self.key, self.coded_value))
|
||||
|
||||
# Now add any defined attributes
|
||||
if attrs is None:
|
||||
attrs = self._reserved
|
||||
items = sorted(self.items())
|
||||
for key, value in items:
|
||||
if value == "":
|
||||
continue
|
||||
if key not in attrs:
|
||||
continue
|
||||
if key == "expires" and isinstance(value, int):
|
||||
append("%s=%s" % (self._reserved[key], _getdate(value)))
|
||||
elif key == "max-age" and isinstance(value, int):
|
||||
append("%s=%d" % (self._reserved[key], value))
|
||||
elif key in self._flags:
|
||||
if value:
|
||||
append(str(self._reserved[key]))
|
||||
else:
|
||||
append("%s=%s" % (self._reserved[key], value))
|
||||
|
||||
# Return the result
|
||||
return _semispacejoin(result)
|
||||
|
||||
|
||||
#
|
||||
# Pattern for finding cookie
|
||||
#
|
||||
# This used to be strict parsing based on the RFC2109 and RFC2068
|
||||
# specifications. I have since discovered that MSIE 3.0x doesn't
|
||||
# follow the character rules outlined in those specs. As a
|
||||
# result, the parsing rules here are less strict.
|
||||
#
|
||||
|
||||
_LegalKeyChars = r"\w\d!#%&'~_`><@,:/\$\*\+\-\.\^\|\)\(\?\}\{\="
|
||||
_LegalValueChars = _LegalKeyChars + '\[\]'
|
||||
_CookiePattern = re.compile(r"""
|
||||
(?x) # This is a verbose pattern
|
||||
\s* # Optional whitespace at start of cookie
|
||||
(?P<key> # Start of group 'key'
|
||||
[""" + _LegalKeyChars + r"""]+? # Any word of at least one letter
|
||||
) # End of group 'key'
|
||||
( # Optional group: there may not be a value.
|
||||
\s*=\s* # Equal Sign
|
||||
(?P<val> # Start of group 'val'
|
||||
"(?:[^\\"]|\\.)*" # Any doublequoted string
|
||||
| # or
|
||||
\w{3},\s[\w\d\s-]{9,11}\s[\d:]{8}\sGMT # Special case for "expires" attr
|
||||
| # or
|
||||
[""" + _LegalValueChars + r"""]* # Any word or empty string
|
||||
) # End of group 'val'
|
||||
)? # End of optional value group
|
||||
\s* # Any number of spaces.
|
||||
(\s+|;|$) # Ending either at space, semicolon, or EOS.
|
||||
""", re.ASCII) # May be removed if safe.
|
||||
|
||||
|
||||
# At long last, here is the cookie class. Using this class is almost just like
|
||||
# using a dictionary. See this module's docstring for example usage.
|
||||
#
|
||||
class BaseCookie(dict):
|
||||
"""A container class for a set of Morsels."""
|
||||
|
||||
def value_decode(self, val):
|
||||
"""real_value, coded_value = value_decode(STRING)
|
||||
Called prior to setting a cookie's value from the network
|
||||
representation. The VALUE is the value read from HTTP
|
||||
header.
|
||||
Override this function to modify the behavior of cookies.
|
||||
"""
|
||||
return val, val
|
||||
|
||||
def value_encode(self, val):
|
||||
"""real_value, coded_value = value_encode(VALUE)
|
||||
Called prior to setting a cookie's value from the dictionary
|
||||
representation. The VALUE is the value being assigned.
|
||||
Override this function to modify the behavior of cookies.
|
||||
"""
|
||||
strval = str(val)
|
||||
return strval, strval
|
||||
|
||||
def __init__(self, input=None):
|
||||
if input:
|
||||
self.load(input)
|
||||
|
||||
def __set(self, key, real_value, coded_value):
|
||||
"""Private method for setting a cookie's value"""
|
||||
M = self.get(key, Morsel())
|
||||
M.set(key, real_value, coded_value)
|
||||
dict.__setitem__(self, key, M)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
"""Dictionary style assignment."""
|
||||
if isinstance(value, Morsel):
|
||||
# allow assignment of constructed Morsels (e.g. for pickling)
|
||||
dict.__setitem__(self, key, value)
|
||||
else:
|
||||
rval, cval = self.value_encode(value)
|
||||
self.__set(key, rval, cval)
|
||||
|
||||
def output(self, attrs=None, header="Set-Cookie:", sep="\015\012"):
|
||||
"""Return a string suitable for HTTP."""
|
||||
result = []
|
||||
items = sorted(self.items())
|
||||
for key, value in items:
|
||||
result.append(value.output(attrs, header))
|
||||
return sep.join(result)
|
||||
|
||||
__str__ = output
|
||||
|
||||
def __repr__(self):
|
||||
l = []
|
||||
items = sorted(self.items())
|
||||
for key, value in items:
|
||||
l.append('%s=%s' % (key, repr(value.value)))
|
||||
return '<%s: %s>' % (self.__class__.__name__, _spacejoin(l))
|
||||
|
||||
def js_output(self, attrs=None):
|
||||
"""Return a string suitable for JavaScript."""
|
||||
result = []
|
||||
items = sorted(self.items())
|
||||
for key, value in items:
|
||||
result.append(value.js_output(attrs))
|
||||
return _nulljoin(result)
|
||||
|
||||
def load(self, rawdata):
|
||||
"""Load cookies from a string (presumably HTTP_COOKIE) or
|
||||
from a dictionary. Loading cookies from a dictionary 'd'
|
||||
is equivalent to calling:
|
||||
map(Cookie.__setitem__, d.keys(), d.values())
|
||||
"""
|
||||
if isinstance(rawdata, str):
|
||||
self.__parse_string(rawdata)
|
||||
else:
|
||||
# self.update() wouldn't call our custom __setitem__
|
||||
for key, value in rawdata.items():
|
||||
self[key] = value
|
||||
return
|
||||
|
||||
def __parse_string(self, str, patt=_CookiePattern):
|
||||
i = 0 # Our starting point
|
||||
n = len(str) # Length of string
|
||||
parsed_items = [] # Parsed (type, key, value) triples
|
||||
morsel_seen = False # A key=value pair was previously encountered
|
||||
|
||||
TYPE_ATTRIBUTE = 1
|
||||
TYPE_KEYVALUE = 2
|
||||
|
||||
# We first parse the whole cookie string and reject it if it's
|
||||
# syntactically invalid (this helps avoid some classes of injection
|
||||
# attacks).
|
||||
while 0 <= i < n:
|
||||
# Start looking for a cookie
|
||||
match = patt.match(str, i)
|
||||
if not match:
|
||||
# No more cookies
|
||||
break
|
||||
|
||||
key, value = match.group("key"), match.group("val")
|
||||
i = match.end(0)
|
||||
|
||||
if key[0] == "$":
|
||||
if not morsel_seen:
|
||||
# We ignore attributes which pertain to the cookie
|
||||
# mechanism as a whole, such as "$Version".
|
||||
# See RFC 2965. (Does anyone care?)
|
||||
continue
|
||||
parsed_items.append((TYPE_ATTRIBUTE, key[1:], value))
|
||||
elif key.lower() in Morsel._reserved:
|
||||
if not morsel_seen:
|
||||
# Invalid cookie string
|
||||
return
|
||||
if value is None:
|
||||
if key.lower() in Morsel._flags:
|
||||
parsed_items.append((TYPE_ATTRIBUTE, key, True))
|
||||
else:
|
||||
# Invalid cookie string
|
||||
return
|
||||
else:
|
||||
parsed_items.append((TYPE_ATTRIBUTE, key, _unquote(value)))
|
||||
elif value is not None:
|
||||
parsed_items.append((TYPE_KEYVALUE, key, self.value_decode(value)))
|
||||
morsel_seen = True
|
||||
else:
|
||||
# Invalid cookie string
|
||||
return
|
||||
|
||||
# The cookie string is valid, apply it.
|
||||
M = None # current morsel
|
||||
for tp, key, value in parsed_items:
|
||||
if tp == TYPE_ATTRIBUTE:
|
||||
assert M is not None
|
||||
M[key] = value
|
||||
else:
|
||||
assert tp == TYPE_KEYVALUE
|
||||
rval, cval = value
|
||||
self.__set(key, rval, cval)
|
||||
M = self[key]
|
||||
|
||||
|
||||
class SimpleCookie(BaseCookie):
|
||||
"""
|
||||
SimpleCookie supports strings as cookie values. When setting
|
||||
the value using the dictionary assignment notation, SimpleCookie
|
||||
calls the builtin str() to convert the value to a string. Values
|
||||
received from HTTP are kept as strings.
|
||||
"""
|
||||
def value_decode(self, val):
|
||||
return _unquote(val), val
|
||||
|
||||
def value_encode(self, val):
|
||||
strval = str(val)
|
||||
return strval, _quote(strval)
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/http/cookies.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/http/cookies.pyc
Normal file
Binary file not shown.
1266
venv/lib/python2.7/site-packages/eventlet/green/http/server.py
Normal file
1266
venv/lib/python2.7/site-packages/eventlet/green/http/server.py
Normal file
File diff suppressed because it is too large
Load Diff
BIN
venv/lib/python2.7/site-packages/eventlet/green/http/server.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/http/server.pyc
Normal file
Binary file not shown.
22
venv/lib/python2.7/site-packages/eventlet/green/httplib.py
Normal file
22
venv/lib/python2.7/site-packages/eventlet/green/httplib.py
Normal file
@@ -0,0 +1,22 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import socket
|
||||
from eventlet.support import six
|
||||
|
||||
to_patch = [('socket', socket)]
|
||||
|
||||
try:
|
||||
from eventlet.green import ssl
|
||||
to_patch.append(('ssl', ssl))
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
if six.PY2:
|
||||
patcher.inject('httplib', globals(), *to_patch)
|
||||
if six.PY3:
|
||||
from eventlet.green.http import client
|
||||
for name in dir(client):
|
||||
if name not in patcher.__exclude:
|
||||
globals()[name] = getattr(client, name)
|
||||
|
||||
if __name__ == '__main__':
|
||||
test()
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/httplib.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/httplib.pyc
Normal file
Binary file not shown.
111
venv/lib/python2.7/site-packages/eventlet/green/os.py
Normal file
111
venv/lib/python2.7/site-packages/eventlet/green/os.py
Normal file
@@ -0,0 +1,111 @@
|
||||
os_orig = __import__("os")
|
||||
import errno
|
||||
socket = __import__("socket")
|
||||
|
||||
from eventlet import greenio
|
||||
from eventlet.support import get_errno
|
||||
from eventlet import greenthread
|
||||
from eventlet import hubs
|
||||
from eventlet.patcher import slurp_properties
|
||||
|
||||
__all__ = os_orig.__all__
|
||||
__patched__ = ['fdopen', 'read', 'write', 'wait', 'waitpid', 'open']
|
||||
|
||||
slurp_properties(
|
||||
os_orig,
|
||||
globals(),
|
||||
ignore=__patched__,
|
||||
srckeys=dir(os_orig))
|
||||
|
||||
|
||||
def fdopen(fd, *args, **kw):
|
||||
"""fdopen(fd [, mode='r' [, bufsize]]) -> file_object
|
||||
|
||||
Return an open file object connected to a file descriptor."""
|
||||
if not isinstance(fd, int):
|
||||
raise TypeError('fd should be int, not %r' % fd)
|
||||
try:
|
||||
return greenio.GreenPipe(fd, *args, **kw)
|
||||
except IOError as e:
|
||||
raise OSError(*e.args)
|
||||
|
||||
__original_read__ = os_orig.read
|
||||
|
||||
|
||||
def read(fd, n):
|
||||
"""read(fd, buffersize) -> string
|
||||
|
||||
Read a file descriptor."""
|
||||
while True:
|
||||
try:
|
||||
return __original_read__(fd, n)
|
||||
except (OSError, IOError) as e:
|
||||
if get_errno(e) != errno.EAGAIN:
|
||||
raise
|
||||
except socket.error as e:
|
||||
if get_errno(e) == errno.EPIPE:
|
||||
return ''
|
||||
raise
|
||||
try:
|
||||
hubs.trampoline(fd, read=True)
|
||||
except hubs.IOClosed:
|
||||
return ''
|
||||
|
||||
__original_write__ = os_orig.write
|
||||
|
||||
|
||||
def write(fd, st):
|
||||
"""write(fd, string) -> byteswritten
|
||||
|
||||
Write a string to a file descriptor.
|
||||
"""
|
||||
while True:
|
||||
try:
|
||||
return __original_write__(fd, st)
|
||||
except (OSError, IOError) as e:
|
||||
if get_errno(e) != errno.EAGAIN:
|
||||
raise
|
||||
except socket.error as e:
|
||||
if get_errno(e) != errno.EPIPE:
|
||||
raise
|
||||
hubs.trampoline(fd, write=True)
|
||||
|
||||
|
||||
def wait():
|
||||
"""wait() -> (pid, status)
|
||||
|
||||
Wait for completion of a child process."""
|
||||
return waitpid(0, 0)
|
||||
|
||||
__original_waitpid__ = os_orig.waitpid
|
||||
|
||||
|
||||
def waitpid(pid, options):
|
||||
"""waitpid(...)
|
||||
waitpid(pid, options) -> (pid, status)
|
||||
|
||||
Wait for completion of a given child process."""
|
||||
if options & os_orig.WNOHANG != 0:
|
||||
return __original_waitpid__(pid, options)
|
||||
else:
|
||||
new_options = options | os_orig.WNOHANG
|
||||
while True:
|
||||
rpid, status = __original_waitpid__(pid, new_options)
|
||||
if rpid and status >= 0:
|
||||
return rpid, status
|
||||
greenthread.sleep(0.01)
|
||||
|
||||
__original_open__ = os_orig.open
|
||||
|
||||
|
||||
def open(file, flags, mode=0o777, dir_fd=None):
|
||||
""" Wrap os.open
|
||||
This behaves identically, but collaborates with
|
||||
the hub's notify_opened protocol.
|
||||
"""
|
||||
if dir_fd is not None:
|
||||
fd = __original_open__(file, flags, mode, dir_fd=dir_fd)
|
||||
else:
|
||||
fd = __original_open__(file, flags, mode)
|
||||
hubs.notify_opened(fd)
|
||||
return fd
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/os.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/os.pyc
Normal file
Binary file not shown.
257
venv/lib/python2.7/site-packages/eventlet/green/profile.py
Normal file
257
venv/lib/python2.7/site-packages/eventlet/green/profile.py
Normal file
@@ -0,0 +1,257 @@
|
||||
# Copyright (c) 2010, CCP Games
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
# * Redistributions of source code must retain the above copyright
|
||||
# notice, this list of conditions and the following disclaimer.
|
||||
# * Redistributions in binary form must reproduce the above copyright
|
||||
# notice, this list of conditions and the following disclaimer in the
|
||||
# documentation and/or other materials provided with the distribution.
|
||||
# * Neither the name of CCP Games nor the
|
||||
# names of its contributors may be used to endorse or promote products
|
||||
# derived from this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY CCP GAMES ``AS IS'' AND ANY
|
||||
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL CCP GAMES BE LIABLE FOR ANY
|
||||
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
||||
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
"""This module is API-equivalent to the standard library :mod:`profile` module
|
||||
lbut it is greenthread-aware as well as thread-aware. Use this module
|
||||
to profile Eventlet-based applications in preference to either :mod:`profile` or :mod:`cProfile`.
|
||||
FIXME: No testcases for this module.
|
||||
"""
|
||||
|
||||
profile_orig = __import__('profile')
|
||||
__all__ = profile_orig.__all__
|
||||
|
||||
from eventlet.patcher import slurp_properties
|
||||
slurp_properties(profile_orig, globals(), srckeys=dir(profile_orig))
|
||||
|
||||
import sys
|
||||
import functools
|
||||
|
||||
from eventlet import greenthread
|
||||
from eventlet import patcher
|
||||
from eventlet.support import six
|
||||
|
||||
thread = patcher.original(six.moves._thread.__name__) # non-monkeypatched module needed
|
||||
|
||||
|
||||
# This class provides the start() and stop() functions
|
||||
class Profile(profile_orig.Profile):
|
||||
base = profile_orig.Profile
|
||||
|
||||
def __init__(self, timer=None, bias=None):
|
||||
self.current_tasklet = greenthread.getcurrent()
|
||||
self.thread_id = thread.get_ident()
|
||||
self.base.__init__(self, timer, bias)
|
||||
self.sleeping = {}
|
||||
|
||||
def __call__(self, *args):
|
||||
"""make callable, allowing an instance to be the profiler"""
|
||||
self.dispatcher(*args)
|
||||
|
||||
def _setup(self):
|
||||
self._has_setup = True
|
||||
self.cur = None
|
||||
self.timings = {}
|
||||
self.current_tasklet = greenthread.getcurrent()
|
||||
self.thread_id = thread.get_ident()
|
||||
self.simulate_call("profiler")
|
||||
|
||||
def start(self, name="start"):
|
||||
if getattr(self, "running", False):
|
||||
return
|
||||
self._setup()
|
||||
self.simulate_call("start")
|
||||
self.running = True
|
||||
sys.setprofile(self.dispatcher)
|
||||
|
||||
def stop(self):
|
||||
sys.setprofile(None)
|
||||
self.running = False
|
||||
self.TallyTimings()
|
||||
|
||||
# special cases for the original run commands, makin sure to
|
||||
# clear the timer context.
|
||||
def runctx(self, cmd, globals, locals):
|
||||
if not getattr(self, "_has_setup", False):
|
||||
self._setup()
|
||||
try:
|
||||
return profile_orig.Profile.runctx(self, cmd, globals, locals)
|
||||
finally:
|
||||
self.TallyTimings()
|
||||
|
||||
def runcall(self, func, *args, **kw):
|
||||
if not getattr(self, "_has_setup", False):
|
||||
self._setup()
|
||||
try:
|
||||
return profile_orig.Profile.runcall(self, func, *args, **kw)
|
||||
finally:
|
||||
self.TallyTimings()
|
||||
|
||||
def trace_dispatch_return_extend_back(self, frame, t):
|
||||
"""A hack function to override error checking in parent class. It
|
||||
allows invalid returns (where frames weren't preveiously entered into
|
||||
the profiler) which can happen for all the tasklets that suddenly start
|
||||
to get monitored. This means that the time will eventually be attributed
|
||||
to a call high in the chain, when there is a tasklet switch
|
||||
"""
|
||||
if isinstance(self.cur[-2], Profile.fake_frame):
|
||||
return False
|
||||
self.trace_dispatch_call(frame, 0)
|
||||
return self.trace_dispatch_return(frame, t)
|
||||
|
||||
def trace_dispatch_c_return_extend_back(self, frame, t):
|
||||
# same for c return
|
||||
if isinstance(self.cur[-2], Profile.fake_frame):
|
||||
return False # ignore bogus returns
|
||||
self.trace_dispatch_c_call(frame, 0)
|
||||
return self.trace_dispatch_return(frame, t)
|
||||
|
||||
def SwitchTasklet(self, t0, t1, t):
|
||||
# tally the time spent in the old tasklet
|
||||
pt, it, et, fn, frame, rcur = self.cur
|
||||
cur = (pt, it + t, et, fn, frame, rcur)
|
||||
|
||||
# we are switching to a new tasklet, store the old
|
||||
self.sleeping[t0] = cur, self.timings
|
||||
self.current_tasklet = t1
|
||||
|
||||
# find the new one
|
||||
try:
|
||||
self.cur, self.timings = self.sleeping.pop(t1)
|
||||
except KeyError:
|
||||
self.cur, self.timings = None, {}
|
||||
self.simulate_call("profiler")
|
||||
self.simulate_call("new_tasklet")
|
||||
|
||||
def TallyTimings(self):
|
||||
oldtimings = self.sleeping
|
||||
self.sleeping = {}
|
||||
|
||||
# first, unwind the main "cur"
|
||||
self.cur = self.Unwind(self.cur, self.timings)
|
||||
|
||||
# we must keep the timings dicts separate for each tasklet, since it contains
|
||||
# the 'ns' item, recursion count of each function in that tasklet. This is
|
||||
# used in the Unwind dude.
|
||||
for tasklet, (cur, timings) in six.iteritems(oldtimings):
|
||||
self.Unwind(cur, timings)
|
||||
|
||||
for k, v in six.iteritems(timings):
|
||||
if k not in self.timings:
|
||||
self.timings[k] = v
|
||||
else:
|
||||
# accumulate all to the self.timings
|
||||
cc, ns, tt, ct, callers = self.timings[k]
|
||||
# ns should be 0 after unwinding
|
||||
cc += v[0]
|
||||
tt += v[2]
|
||||
ct += v[3]
|
||||
for k1, v1 in six.iteritems(v[4]):
|
||||
callers[k1] = callers.get(k1, 0) + v1
|
||||
self.timings[k] = cc, ns, tt, ct, callers
|
||||
|
||||
def Unwind(self, cur, timings):
|
||||
"A function to unwind a 'cur' frame and tally the results"
|
||||
"see profile.trace_dispatch_return() for details"
|
||||
# also see simulate_cmd_complete()
|
||||
while(cur[-1]):
|
||||
rpt, rit, ret, rfn, frame, rcur = cur
|
||||
frame_total = rit + ret
|
||||
|
||||
if rfn in timings:
|
||||
cc, ns, tt, ct, callers = timings[rfn]
|
||||
else:
|
||||
cc, ns, tt, ct, callers = 0, 0, 0, 0, {}
|
||||
|
||||
if not ns:
|
||||
ct = ct + frame_total
|
||||
cc = cc + 1
|
||||
|
||||
if rcur:
|
||||
ppt, pit, pet, pfn, pframe, pcur = rcur
|
||||
else:
|
||||
pfn = None
|
||||
|
||||
if pfn in callers:
|
||||
callers[pfn] = callers[pfn] + 1 # hack: gather more
|
||||
elif pfn:
|
||||
callers[pfn] = 1
|
||||
|
||||
timings[rfn] = cc, ns - 1, tt + rit, ct, callers
|
||||
|
||||
ppt, pit, pet, pfn, pframe, pcur = rcur
|
||||
rcur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
|
||||
cur = rcur
|
||||
return cur
|
||||
|
||||
|
||||
def ContextWrap(f):
|
||||
@functools.wraps(f)
|
||||
def ContextWrapper(self, arg, t):
|
||||
current = greenthread.getcurrent()
|
||||
if current != self.current_tasklet:
|
||||
self.SwitchTasklet(self.current_tasklet, current, t)
|
||||
t = 0.0 # the time was billed to the previous tasklet
|
||||
return f(self, arg, t)
|
||||
return ContextWrapper
|
||||
|
||||
|
||||
# Add "return safety" to the dispatchers
|
||||
Profile.dispatch = dict(profile_orig.Profile.dispatch, **{
|
||||
'return': Profile.trace_dispatch_return_extend_back,
|
||||
'c_return': Profile.trace_dispatch_c_return_extend_back,
|
||||
})
|
||||
# Add automatic tasklet detection to the callbacks.
|
||||
Profile.dispatch = dict((k, ContextWrap(v)) for k, v in six.viewitems(Profile.dispatch))
|
||||
|
||||
|
||||
# run statements shamelessly stolen from profile.py
|
||||
def run(statement, filename=None, sort=-1):
|
||||
"""Run statement under profiler optionally saving results in filename
|
||||
|
||||
This function takes a single argument that can be passed to the
|
||||
"exec" statement, and an optional file name. In all cases this
|
||||
routine attempts to "exec" its first argument and gather profiling
|
||||
statistics from the execution. If no file name is present, then this
|
||||
function automatically prints a simple profiling report, sorted by the
|
||||
standard name string (file/line/function-name) that is presented in
|
||||
each line.
|
||||
"""
|
||||
prof = Profile()
|
||||
try:
|
||||
prof = prof.run(statement)
|
||||
except SystemExit:
|
||||
pass
|
||||
if filename is not None:
|
||||
prof.dump_stats(filename)
|
||||
else:
|
||||
return prof.print_stats(sort)
|
||||
|
||||
|
||||
def runctx(statement, globals, locals, filename=None):
|
||||
"""Run statement under profiler, supplying your own globals and locals,
|
||||
optionally saving results in filename.
|
||||
|
||||
statement and filename have the same semantics as profile.run
|
||||
"""
|
||||
prof = Profile()
|
||||
try:
|
||||
prof = prof.runctx(statement, globals, locals)
|
||||
except SystemExit:
|
||||
pass
|
||||
|
||||
if filename is not None:
|
||||
prof.dump_stats(filename)
|
||||
else:
|
||||
return prof.print_stats()
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/profile.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/profile.pyc
Normal file
Binary file not shown.
86
venv/lib/python2.7/site-packages/eventlet/green/select.py
Normal file
86
venv/lib/python2.7/site-packages/eventlet/green/select.py
Normal file
@@ -0,0 +1,86 @@
|
||||
import eventlet
|
||||
from eventlet.hubs import get_hub
|
||||
from eventlet.support import six
|
||||
__select = eventlet.patcher.original('select')
|
||||
error = __select.error
|
||||
|
||||
|
||||
__patched__ = ['select']
|
||||
__deleted__ = ['devpoll', 'poll', 'epoll', 'kqueue', 'kevent']
|
||||
|
||||
|
||||
def get_fileno(obj):
|
||||
# The purpose of this function is to exactly replicate
|
||||
# the behavior of the select module when confronted with
|
||||
# abnormal filenos; the details are extensively tested in
|
||||
# the stdlib test/test_select.py.
|
||||
try:
|
||||
f = obj.fileno
|
||||
except AttributeError:
|
||||
if not isinstance(obj, six.integer_types):
|
||||
raise TypeError("Expected int or long, got %s" % type(obj))
|
||||
return obj
|
||||
else:
|
||||
rv = f()
|
||||
if not isinstance(rv, six.integer_types):
|
||||
raise TypeError("Expected int or long, got %s" % type(rv))
|
||||
return rv
|
||||
|
||||
|
||||
def select(read_list, write_list, error_list, timeout=None):
|
||||
# error checking like this is required by the stdlib unit tests
|
||||
if timeout is not None:
|
||||
try:
|
||||
timeout = float(timeout)
|
||||
except ValueError:
|
||||
raise TypeError("Expected number for timeout")
|
||||
hub = get_hub()
|
||||
timers = []
|
||||
current = eventlet.getcurrent()
|
||||
assert hub.greenlet is not current, 'do not call blocking functions from the mainloop'
|
||||
ds = {}
|
||||
for r in read_list:
|
||||
ds[get_fileno(r)] = {'read': r}
|
||||
for w in write_list:
|
||||
ds.setdefault(get_fileno(w), {})['write'] = w
|
||||
for e in error_list:
|
||||
ds.setdefault(get_fileno(e), {})['error'] = e
|
||||
|
||||
listeners = []
|
||||
|
||||
def on_read(d):
|
||||
original = ds[get_fileno(d)]['read']
|
||||
current.switch(([original], [], []))
|
||||
|
||||
def on_write(d):
|
||||
original = ds[get_fileno(d)]['write']
|
||||
current.switch(([], [original], []))
|
||||
|
||||
def on_timeout2():
|
||||
current.switch(([], [], []))
|
||||
|
||||
def on_timeout():
|
||||
# ensure that BaseHub.run() has a chance to call self.wait()
|
||||
# at least once before timed out. otherwise the following code
|
||||
# can time out erroneously.
|
||||
#
|
||||
# s1, s2 = socket.socketpair()
|
||||
# print(select.select([], [s1], [], 0))
|
||||
timers.append(hub.schedule_call_global(0, on_timeout2))
|
||||
|
||||
if timeout is not None:
|
||||
timers.append(hub.schedule_call_global(timeout, on_timeout))
|
||||
try:
|
||||
for k, v in six.iteritems(ds):
|
||||
if v.get('read'):
|
||||
listeners.append(hub.add(hub.READ, k, on_read, current.throw, lambda: None))
|
||||
if v.get('write'):
|
||||
listeners.append(hub.add(hub.WRITE, k, on_write, current.throw, lambda: None))
|
||||
try:
|
||||
return hub.switch()
|
||||
finally:
|
||||
for l in listeners:
|
||||
hub.remove(l)
|
||||
finally:
|
||||
for t in timers:
|
||||
t.cancel()
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/select.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/select.pyc
Normal file
Binary file not shown.
34
venv/lib/python2.7/site-packages/eventlet/green/selectors.py
Normal file
34
venv/lib/python2.7/site-packages/eventlet/green/selectors.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import sys
|
||||
|
||||
from eventlet import patcher
|
||||
from eventlet.green import select
|
||||
|
||||
__patched__ = [
|
||||
'DefaultSelector',
|
||||
'SelectSelector',
|
||||
]
|
||||
|
||||
# We only have green select so the options are:
|
||||
# * leave it be and have selectors that block
|
||||
# * try to pretend the "bad" selectors don't exist
|
||||
# * replace all with SelectSelector for the price of possibly different
|
||||
# performance characteristic and missing fileno() method (if someone
|
||||
# uses it it'll result in a crash, we may want to implement it in the future)
|
||||
#
|
||||
# This module used to follow the third approach but just removing the offending
|
||||
# selectors is less error prone and less confusing approach.
|
||||
__deleted__ = [
|
||||
'PollSelector',
|
||||
'EpollSelector',
|
||||
'DevpollSelector',
|
||||
'KqueueSelector',
|
||||
]
|
||||
|
||||
patcher.inject('selectors', globals(), ('select', select))
|
||||
|
||||
del patcher
|
||||
|
||||
if sys.platform != 'win32':
|
||||
SelectSelector._select = staticmethod(select.select)
|
||||
|
||||
DefaultSelector = SelectSelector
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/selectors.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/selectors.pyc
Normal file
Binary file not shown.
63
venv/lib/python2.7/site-packages/eventlet/green/socket.py
Normal file
63
venv/lib/python2.7/site-packages/eventlet/green/socket.py
Normal file
@@ -0,0 +1,63 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
__import__('eventlet.green._socket_nodns')
|
||||
__socket = sys.modules['eventlet.green._socket_nodns']
|
||||
|
||||
__all__ = __socket.__all__
|
||||
__patched__ = __socket.__patched__ + [
|
||||
'create_connection',
|
||||
'getaddrinfo',
|
||||
'gethostbyname',
|
||||
'gethostbyname_ex',
|
||||
'getnameinfo',
|
||||
]
|
||||
|
||||
from eventlet.patcher import slurp_properties
|
||||
slurp_properties(__socket, globals(), srckeys=dir(__socket))
|
||||
|
||||
|
||||
if os.environ.get("EVENTLET_NO_GREENDNS", '').lower() != 'yes':
|
||||
from eventlet.support import greendns
|
||||
gethostbyname = greendns.gethostbyname
|
||||
getaddrinfo = greendns.getaddrinfo
|
||||
gethostbyname_ex = greendns.gethostbyname_ex
|
||||
getnameinfo = greendns.getnameinfo
|
||||
del greendns
|
||||
|
||||
|
||||
def create_connection(address,
|
||||
timeout=_GLOBAL_DEFAULT_TIMEOUT,
|
||||
source_address=None):
|
||||
"""Connect to *address* and return the socket object.
|
||||
|
||||
Convenience function. Connect to *address* (a 2-tuple ``(host,
|
||||
port)``) and return the socket object. Passing the optional
|
||||
*timeout* parameter will set the timeout on the socket instance
|
||||
before attempting to connect. If no *timeout* is supplied, the
|
||||
global default timeout setting returned by :func:`getdefaulttimeout`
|
||||
is used.
|
||||
"""
|
||||
|
||||
err = "getaddrinfo returns an empty list"
|
||||
host, port = address
|
||||
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
|
||||
af, socktype, proto, canonname, sa = res
|
||||
sock = None
|
||||
try:
|
||||
sock = socket(af, socktype, proto)
|
||||
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
|
||||
sock.settimeout(timeout)
|
||||
if source_address:
|
||||
sock.bind(source_address)
|
||||
sock.connect(sa)
|
||||
return sock
|
||||
|
||||
except error as e:
|
||||
err = e
|
||||
if sock is not None:
|
||||
sock.close()
|
||||
|
||||
if not isinstance(err, error):
|
||||
err = error(err)
|
||||
raise err
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/socket.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/socket.pyc
Normal file
Binary file not shown.
439
venv/lib/python2.7/site-packages/eventlet/green/ssl.py
Normal file
439
venv/lib/python2.7/site-packages/eventlet/green/ssl.py
Normal file
@@ -0,0 +1,439 @@
|
||||
__ssl = __import__('ssl')
|
||||
|
||||
from eventlet.patcher import slurp_properties
|
||||
slurp_properties(__ssl, globals(), srckeys=dir(__ssl))
|
||||
|
||||
import errno
|
||||
import functools
|
||||
import sys
|
||||
import time
|
||||
|
||||
from eventlet import greenio
|
||||
from eventlet.greenio import (
|
||||
set_nonblocking, GreenSocket, CONNECT_ERR, CONNECT_SUCCESS,
|
||||
)
|
||||
from eventlet.hubs import trampoline, IOClosed
|
||||
from eventlet.support import get_errno, PY33, six
|
||||
orig_socket = __import__('socket')
|
||||
socket = orig_socket.socket
|
||||
if sys.version_info >= (2, 7):
|
||||
has_ciphers = True
|
||||
timeout_exc = SSLError
|
||||
else:
|
||||
has_ciphers = False
|
||||
timeout_exc = orig_socket.timeout
|
||||
|
||||
__patched__ = [
|
||||
'SSLSocket', 'SSLContext', 'wrap_socket', 'sslwrap_simple',
|
||||
'create_default_context', '_create_default_https_context']
|
||||
|
||||
_original_sslsocket = __ssl.SSLSocket
|
||||
|
||||
|
||||
class GreenSSLSocket(_original_sslsocket):
|
||||
""" This is a green version of the SSLSocket class from the ssl module added
|
||||
in 2.6. For documentation on it, please see the Python standard
|
||||
documentation.
|
||||
|
||||
Python nonblocking ssl objects don't give errors when the other end
|
||||
of the socket is closed (they do notice when the other end is shutdown,
|
||||
though). Any write/read operations will simply hang if the socket is
|
||||
closed from the other end. There is no obvious fix for this problem;
|
||||
it appears to be a limitation of Python's ssl object implementation.
|
||||
A workaround is to set a reasonable timeout on the socket using
|
||||
settimeout(), and to close/reopen the connection when a timeout
|
||||
occurs at an unexpected juncture in the code.
|
||||
"""
|
||||
# we are inheriting from SSLSocket because its constructor calls
|
||||
# do_handshake whose behavior we wish to override
|
||||
|
||||
def __init__(self, sock, keyfile=None, certfile=None,
|
||||
server_side=False, cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_SSLv23, ca_certs=None,
|
||||
do_handshake_on_connect=True, *args, **kw):
|
||||
if not isinstance(sock, GreenSocket):
|
||||
sock = GreenSocket(sock)
|
||||
|
||||
self.act_non_blocking = sock.act_non_blocking
|
||||
|
||||
if six.PY2:
|
||||
# On Python 2 SSLSocket constructor queries the timeout, it'd break without
|
||||
# this assignment
|
||||
self._timeout = sock.gettimeout()
|
||||
|
||||
# nonblocking socket handshaking on connect got disabled so let's pretend it's disabled
|
||||
# even when it's on
|
||||
super(GreenSSLSocket, self).__init__(
|
||||
sock.fd, keyfile, certfile, server_side, cert_reqs, ssl_version,
|
||||
ca_certs, do_handshake_on_connect and six.PY2, *args, **kw)
|
||||
|
||||
# the superclass initializer trashes the methods so we remove
|
||||
# the local-object versions of them and let the actual class
|
||||
# methods shine through
|
||||
# Note: This for Python 2
|
||||
try:
|
||||
for fn in orig_socket._delegate_methods:
|
||||
delattr(self, fn)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
if six.PY3:
|
||||
# Python 3 SSLSocket construction process overwrites the timeout so restore it
|
||||
self._timeout = sock.gettimeout()
|
||||
|
||||
# it also sets timeout to None internally apparently (tested with 3.4.2)
|
||||
_original_sslsocket.settimeout(self, 0.0)
|
||||
assert _original_sslsocket.gettimeout(self) == 0.0
|
||||
|
||||
# see note above about handshaking
|
||||
self.do_handshake_on_connect = do_handshake_on_connect
|
||||
if do_handshake_on_connect and self._connected:
|
||||
self.do_handshake()
|
||||
|
||||
def settimeout(self, timeout):
|
||||
self._timeout = timeout
|
||||
|
||||
def gettimeout(self):
|
||||
return self._timeout
|
||||
|
||||
def setblocking(self, flag):
|
||||
if flag:
|
||||
self.act_non_blocking = False
|
||||
self._timeout = None
|
||||
else:
|
||||
self.act_non_blocking = True
|
||||
self._timeout = 0.0
|
||||
|
||||
def _call_trampolining(self, func, *a, **kw):
|
||||
if self.act_non_blocking:
|
||||
return func(*a, **kw)
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
return func(*a, **kw)
|
||||
except SSLError as exc:
|
||||
if get_errno(exc) == SSL_ERROR_WANT_READ:
|
||||
trampoline(self,
|
||||
read=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
elif get_errno(exc) == SSL_ERROR_WANT_WRITE:
|
||||
trampoline(self,
|
||||
write=True,
|
||||
timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
else:
|
||||
raise
|
||||
|
||||
def write(self, data):
|
||||
"""Write DATA to the underlying SSL channel. Returns
|
||||
number of bytes of DATA actually transmitted."""
|
||||
return self._call_trampolining(
|
||||
super(GreenSSLSocket, self).write, data)
|
||||
|
||||
def read(self, *args, **kwargs):
|
||||
"""Read up to LEN bytes and return them.
|
||||
Return zero-length string on EOF."""
|
||||
try:
|
||||
return self._call_trampolining(
|
||||
super(GreenSSLSocket, self).read, *args, **kwargs)
|
||||
except IOClosed:
|
||||
return b''
|
||||
|
||||
def send(self, data, flags=0):
|
||||
if self._sslobj:
|
||||
return self._call_trampolining(
|
||||
super(GreenSSLSocket, self).send, data, flags)
|
||||
else:
|
||||
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
|
||||
return socket.send(self, data, flags)
|
||||
|
||||
def sendto(self, data, addr, flags=0):
|
||||
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
|
||||
if self._sslobj:
|
||||
raise ValueError("sendto not allowed on instances of %s" %
|
||||
self.__class__)
|
||||
else:
|
||||
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
|
||||
return socket.sendto(self, data, addr, flags)
|
||||
|
||||
def sendall(self, data, flags=0):
|
||||
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to sendall() on %s" %
|
||||
self.__class__)
|
||||
amount = len(data)
|
||||
count = 0
|
||||
data_to_send = data
|
||||
while (count < amount):
|
||||
v = self.send(data_to_send)
|
||||
count += v
|
||||
if v == 0:
|
||||
trampoline(self, write=True, timeout_exc=timeout_exc('timed out'))
|
||||
else:
|
||||
data_to_send = data[count:]
|
||||
return amount
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
return socket.sendall(self, data, flags)
|
||||
except orig_socket.error as e:
|
||||
if self.act_non_blocking:
|
||||
raise
|
||||
erno = get_errno(e)
|
||||
if erno in greenio.SOCKET_BLOCKING:
|
||||
trampoline(self, write=True,
|
||||
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
|
||||
elif erno in greenio.SOCKET_CLOSED:
|
||||
return ''
|
||||
raise
|
||||
|
||||
def recv(self, buflen=1024, flags=0):
|
||||
return self._base_recv(buflen, flags, into=False)
|
||||
|
||||
def recv_into(self, buffer, nbytes=None, flags=0):
|
||||
# Copied verbatim from CPython
|
||||
if buffer and nbytes is None:
|
||||
nbytes = len(buffer)
|
||||
elif nbytes is None:
|
||||
nbytes = 1024
|
||||
# end of CPython code
|
||||
|
||||
return self._base_recv(nbytes, flags, into=True, buffer_=buffer)
|
||||
|
||||
def _base_recv(self, nbytes, flags, into, buffer_=None):
|
||||
if into:
|
||||
plain_socket_function = socket.recv_into
|
||||
else:
|
||||
plain_socket_function = socket.recv
|
||||
|
||||
# *NOTE: gross, copied code from ssl.py becase it's not factored well enough to be used as-is
|
||||
if self._sslobj:
|
||||
if flags != 0:
|
||||
raise ValueError(
|
||||
"non-zero flags not allowed in calls to %s() on %s" %
|
||||
plain_socket_function.__name__, self.__class__)
|
||||
if sys.version_info < (2, 7) and into:
|
||||
# Python 2.6 SSLSocket.read() doesn't support reading into
|
||||
# a given buffer so we need to emulate
|
||||
data = self.read(nbytes)
|
||||
buffer_[:len(data)] = data
|
||||
read = len(data)
|
||||
elif into:
|
||||
read = self.read(nbytes, buffer_)
|
||||
else:
|
||||
read = self.read(nbytes)
|
||||
return read
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
args = [self, nbytes, flags]
|
||||
if into:
|
||||
args.insert(1, buffer_)
|
||||
return plain_socket_function(*args)
|
||||
except orig_socket.error as e:
|
||||
if self.act_non_blocking:
|
||||
raise
|
||||
erno = get_errno(e)
|
||||
if erno in greenio.SOCKET_BLOCKING:
|
||||
try:
|
||||
trampoline(
|
||||
self, read=True,
|
||||
timeout=self.gettimeout(), timeout_exc=timeout_exc('timed out'))
|
||||
except IOClosed:
|
||||
return b''
|
||||
elif erno in greenio.SOCKET_CLOSED:
|
||||
return b''
|
||||
raise
|
||||
|
||||
def recvfrom(self, addr, buflen=1024, flags=0):
|
||||
if not self.act_non_blocking:
|
||||
trampoline(self, read=True, timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
return super(GreenSSLSocket, self).recvfrom(addr, buflen, flags)
|
||||
|
||||
def recvfrom_into(self, buffer, nbytes=None, flags=0):
|
||||
if not self.act_non_blocking:
|
||||
trampoline(self, read=True, timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
return super(GreenSSLSocket, self).recvfrom_into(buffer, nbytes, flags)
|
||||
|
||||
def unwrap(self):
|
||||
return GreenSocket(self._call_trampolining(
|
||||
super(GreenSSLSocket, self).unwrap))
|
||||
|
||||
def do_handshake(self):
|
||||
"""Perform a TLS/SSL handshake."""
|
||||
return self._call_trampolining(
|
||||
super(GreenSSLSocket, self).do_handshake)
|
||||
|
||||
def _socket_connect(self, addr):
|
||||
real_connect = socket.connect
|
||||
if self.act_non_blocking:
|
||||
return real_connect(self, addr)
|
||||
else:
|
||||
# *NOTE: gross, copied code from greenio because it's not factored
|
||||
# well enough to reuse
|
||||
if self.gettimeout() is None:
|
||||
while True:
|
||||
try:
|
||||
return real_connect(self, addr)
|
||||
except orig_socket.error as exc:
|
||||
if get_errno(exc) in CONNECT_ERR:
|
||||
trampoline(self, write=True)
|
||||
elif get_errno(exc) in CONNECT_SUCCESS:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
end = time.time() + self.gettimeout()
|
||||
while True:
|
||||
try:
|
||||
real_connect(self, addr)
|
||||
except orig_socket.error as exc:
|
||||
if get_errno(exc) in CONNECT_ERR:
|
||||
trampoline(
|
||||
self, write=True,
|
||||
timeout=end - time.time(), timeout_exc=timeout_exc('timed out'))
|
||||
elif get_errno(exc) in CONNECT_SUCCESS:
|
||||
return
|
||||
else:
|
||||
raise
|
||||
if time.time() >= end:
|
||||
raise timeout_exc('timed out')
|
||||
|
||||
def connect(self, addr):
|
||||
"""Connects to remote ADDR, and then wraps the connection in
|
||||
an SSL channel."""
|
||||
# *NOTE: grrrrr copied this code from ssl.py because of the reference
|
||||
# to socket.connect which we don't want to call directly
|
||||
if self._sslobj:
|
||||
raise ValueError("attempt to connect already-connected SSLSocket!")
|
||||
self._socket_connect(addr)
|
||||
server_side = False
|
||||
try:
|
||||
sslwrap = _ssl.sslwrap
|
||||
except AttributeError:
|
||||
# sslwrap was removed in 3.x and later in 2.7.9
|
||||
if six.PY2:
|
||||
sslobj = self._context._wrap_socket(self._sock, server_side, ssl_sock=self)
|
||||
else:
|
||||
context = self.context if PY33 else self._context
|
||||
sslobj = context._wrap_socket(self, server_side)
|
||||
else:
|
||||
sslobj = sslwrap(self._sock, server_side, self.keyfile, self.certfile,
|
||||
self.cert_reqs, self.ssl_version,
|
||||
self.ca_certs, *([self.ciphers] if has_ciphers else []))
|
||||
|
||||
try:
|
||||
# This is added in Python 3.5, http://bugs.python.org/issue21965
|
||||
SSLObject
|
||||
except NameError:
|
||||
self._sslobj = sslobj
|
||||
else:
|
||||
self._sslobj = SSLObject(sslobj, owner=self)
|
||||
|
||||
if self.do_handshake_on_connect:
|
||||
self.do_handshake()
|
||||
|
||||
def accept(self):
|
||||
"""Accepts a new connection from a remote client, and returns
|
||||
a tuple containing that new connection wrapped with a server-side
|
||||
SSL channel, and the address of the remote client."""
|
||||
# RDW grr duplication of code from greenio
|
||||
if self.act_non_blocking:
|
||||
newsock, addr = socket.accept(self)
|
||||
else:
|
||||
while True:
|
||||
try:
|
||||
newsock, addr = socket.accept(self)
|
||||
set_nonblocking(newsock)
|
||||
break
|
||||
except orig_socket.error as e:
|
||||
if get_errno(e) not in greenio.SOCKET_BLOCKING:
|
||||
raise
|
||||
trampoline(self, read=True, timeout=self.gettimeout(),
|
||||
timeout_exc=timeout_exc('timed out'))
|
||||
|
||||
new_ssl = type(self)(
|
||||
newsock,
|
||||
keyfile=self.keyfile,
|
||||
certfile=self.certfile,
|
||||
server_side=True,
|
||||
cert_reqs=self.cert_reqs,
|
||||
ssl_version=self.ssl_version,
|
||||
ca_certs=self.ca_certs,
|
||||
do_handshake_on_connect=False,
|
||||
suppress_ragged_eofs=self.suppress_ragged_eofs)
|
||||
return (new_ssl, addr)
|
||||
|
||||
def dup(self):
|
||||
raise NotImplementedError("Can't dup an ssl object")
|
||||
|
||||
SSLSocket = GreenSSLSocket
|
||||
|
||||
|
||||
def wrap_socket(sock, *a, **kw):
|
||||
return GreenSSLSocket(sock, *a, **kw)
|
||||
|
||||
|
||||
if hasattr(__ssl, 'sslwrap_simple'):
|
||||
def sslwrap_simple(sock, keyfile=None, certfile=None):
|
||||
"""A replacement for the old socket.ssl function. Designed
|
||||
for compatibility with Python 2.5 and earlier. Will disappear in
|
||||
Python 3.0."""
|
||||
ssl_sock = GreenSSLSocket(sock, keyfile=keyfile, certfile=certfile,
|
||||
server_side=False,
|
||||
cert_reqs=CERT_NONE,
|
||||
ssl_version=PROTOCOL_SSLv23,
|
||||
ca_certs=None)
|
||||
return ssl_sock
|
||||
|
||||
|
||||
if hasattr(__ssl, 'SSLContext'):
|
||||
_original_sslcontext = __ssl.SSLContext
|
||||
|
||||
class GreenSSLContext(_original_sslcontext):
|
||||
__slots__ = ()
|
||||
|
||||
def wrap_socket(self, sock, *a, **kw):
|
||||
return GreenSSLSocket(sock, *a, _context=self, **kw)
|
||||
|
||||
# https://github.com/eventlet/eventlet/issues/371
|
||||
# Thanks to Gevent developers for sharing patch to this problem.
|
||||
if hasattr(_original_sslcontext.options, 'setter'):
|
||||
# In 3.6, these became properties. They want to access the
|
||||
# property __set__ method in the superclass, and they do so by using
|
||||
# super(SSLContext, SSLContext). But we rebind SSLContext when we monkey
|
||||
# patch, which causes infinite recursion.
|
||||
# https://github.com/python/cpython/commit/328067c468f82e4ec1b5c510a4e84509e010f296
|
||||
@_original_sslcontext.options.setter
|
||||
def options(self, value):
|
||||
super(_original_sslcontext, _original_sslcontext).options.__set__(self, value)
|
||||
|
||||
@_original_sslcontext.verify_flags.setter
|
||||
def verify_flags(self, value):
|
||||
super(_original_sslcontext, _original_sslcontext).verify_flags.__set__(self, value)
|
||||
|
||||
@_original_sslcontext.verify_mode.setter
|
||||
def verify_mode(self, value):
|
||||
super(_original_sslcontext, _original_sslcontext).verify_mode.__set__(self, value)
|
||||
|
||||
SSLContext = GreenSSLContext
|
||||
|
||||
if hasattr(__ssl, 'create_default_context'):
|
||||
_original_create_default_context = __ssl.create_default_context
|
||||
|
||||
def green_create_default_context(*a, **kw):
|
||||
# We can't just monkey-patch on the green version of `wrap_socket`
|
||||
# on to SSLContext instances, but SSLContext.create_default_context
|
||||
# does a bunch of work. Rather than re-implementing it all, just
|
||||
# switch out the __class__ to get our `wrap_socket` implementation
|
||||
context = _original_create_default_context(*a, **kw)
|
||||
context.__class__ = GreenSSLContext
|
||||
return context
|
||||
|
||||
create_default_context = green_create_default_context
|
||||
_create_default_https_context = green_create_default_context
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/ssl.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/ssl.pyc
Normal file
Binary file not shown.
135
venv/lib/python2.7/site-packages/eventlet/green/subprocess.py
Normal file
135
venv/lib/python2.7/site-packages/eventlet/green/subprocess.py
Normal file
@@ -0,0 +1,135 @@
|
||||
import errno
|
||||
import sys
|
||||
from types import FunctionType
|
||||
|
||||
import eventlet
|
||||
from eventlet import greenio
|
||||
from eventlet import patcher
|
||||
from eventlet.green import select, threading, time
|
||||
from eventlet.support import six
|
||||
|
||||
|
||||
__patched__ = ['call', 'check_call', 'Popen']
|
||||
to_patch = [('select', select), ('threading', threading), ('time', time)]
|
||||
|
||||
if sys.version_info > (3, 4):
|
||||
from eventlet.green import selectors
|
||||
to_patch.append(('selectors', selectors))
|
||||
|
||||
patcher.inject('subprocess', globals(), *to_patch)
|
||||
subprocess_orig = patcher.original("subprocess")
|
||||
mswindows = sys.platform == "win32"
|
||||
|
||||
|
||||
if getattr(subprocess_orig, 'TimeoutExpired', None) is None:
|
||||
# Backported from Python 3.3.
|
||||
# https://bitbucket.org/eventlet/eventlet/issue/89
|
||||
class TimeoutExpired(Exception):
|
||||
"""This exception is raised when the timeout expires while waiting for
|
||||
a child process.
|
||||
"""
|
||||
|
||||
def __init__(self, cmd, timeout, output=None):
|
||||
self.cmd = cmd
|
||||
self.timeout = timeout
|
||||
self.output = output
|
||||
|
||||
def __str__(self):
|
||||
return ("Command '%s' timed out after %s seconds" %
|
||||
(self.cmd, self.timeout))
|
||||
|
||||
|
||||
# This is the meat of this module, the green version of Popen.
|
||||
class Popen(subprocess_orig.Popen):
|
||||
"""eventlet-friendly version of subprocess.Popen"""
|
||||
# We do not believe that Windows pipes support non-blocking I/O. At least,
|
||||
# the Python file objects stored on our base-class object have no
|
||||
# setblocking() method, and the Python fcntl module doesn't exist on
|
||||
# Windows. (see eventlet.greenio.set_nonblocking()) As the sole purpose of
|
||||
# this __init__() override is to wrap the pipes for eventlet-friendly
|
||||
# non-blocking I/O, don't even bother overriding it on Windows.
|
||||
if not mswindows:
|
||||
def __init__(self, args, bufsize=0, *argss, **kwds):
|
||||
self.args = args
|
||||
# Forward the call to base-class constructor
|
||||
subprocess_orig.Popen.__init__(self, args, 0, *argss, **kwds)
|
||||
# Now wrap the pipes, if any. This logic is loosely borrowed from
|
||||
# eventlet.processes.Process.run() method.
|
||||
for attr in "stdin", "stdout", "stderr":
|
||||
pipe = getattr(self, attr)
|
||||
if pipe is not None and type(pipe) != greenio.GreenPipe:
|
||||
# https://github.com/eventlet/eventlet/issues/243
|
||||
# AttributeError: '_io.TextIOWrapper' object has no attribute 'mode'
|
||||
mode = getattr(pipe, 'mode', '')
|
||||
if not mode:
|
||||
if pipe.readable():
|
||||
mode += 'r'
|
||||
if pipe.writable():
|
||||
mode += 'w'
|
||||
# ValueError: can't have unbuffered text I/O
|
||||
if bufsize == 0:
|
||||
bufsize = -1
|
||||
wrapped_pipe = greenio.GreenPipe(pipe, mode, bufsize)
|
||||
setattr(self, attr, wrapped_pipe)
|
||||
__init__.__doc__ = subprocess_orig.Popen.__init__.__doc__
|
||||
|
||||
def wait(self, timeout=None, check_interval=0.01):
|
||||
# Instead of a blocking OS call, this version of wait() uses logic
|
||||
# borrowed from the eventlet 0.2 processes.Process.wait() method.
|
||||
if timeout is not None:
|
||||
endtime = time.time() + timeout
|
||||
try:
|
||||
while True:
|
||||
status = self.poll()
|
||||
if status is not None:
|
||||
return status
|
||||
if timeout is not None and time.time() > endtime:
|
||||
raise TimeoutExpired(self.args, timeout)
|
||||
eventlet.sleep(check_interval)
|
||||
except OSError as e:
|
||||
if e.errno == errno.ECHILD:
|
||||
# no child process, this happens if the child process
|
||||
# already died and has been cleaned up
|
||||
return -1
|
||||
else:
|
||||
raise
|
||||
wait.__doc__ = subprocess_orig.Popen.wait.__doc__
|
||||
|
||||
if not mswindows:
|
||||
# don't want to rewrite the original _communicate() method, we
|
||||
# just want a version that uses eventlet.green.select.select()
|
||||
# instead of select.select().
|
||||
_communicate = FunctionType(
|
||||
six.get_function_code(six.get_unbound_function(
|
||||
subprocess_orig.Popen._communicate)),
|
||||
globals())
|
||||
try:
|
||||
_communicate_with_select = FunctionType(
|
||||
six.get_function_code(six.get_unbound_function(
|
||||
subprocess_orig.Popen._communicate_with_select)),
|
||||
globals())
|
||||
_communicate_with_poll = FunctionType(
|
||||
six.get_function_code(six.get_unbound_function(
|
||||
subprocess_orig.Popen._communicate_with_poll)),
|
||||
globals())
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
|
||||
# Borrow subprocess.call() and check_call(), but patch them so they reference
|
||||
# OUR Popen class rather than subprocess.Popen.
|
||||
def patched_function(function):
|
||||
new_function = FunctionType(six.get_function_code(function), globals())
|
||||
if six.PY3:
|
||||
new_function.__kwdefaults__ = function.__kwdefaults__
|
||||
new_function.__defaults__ = function.__defaults__
|
||||
return new_function
|
||||
|
||||
|
||||
call = patched_function(subprocess_orig.call)
|
||||
check_call = patched_function(subprocess_orig.check_call)
|
||||
# check_output is Python 2.7+
|
||||
if hasattr(subprocess_orig, 'check_output'):
|
||||
__patched__.append('check_output')
|
||||
check_output = patched_function(subprocess_orig.check_output)
|
||||
del patched_function
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/subprocess.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/subprocess.pyc
Normal file
Binary file not shown.
113
venv/lib/python2.7/site-packages/eventlet/green/thread.py
Normal file
113
venv/lib/python2.7/site-packages/eventlet/green/thread.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""Implements the standard thread module, using greenthreads."""
|
||||
from eventlet.support.six.moves import _thread as __thread
|
||||
from eventlet.support import greenlets as greenlet, six
|
||||
from eventlet import greenthread
|
||||
from eventlet.semaphore import Semaphore as LockType
|
||||
import sys
|
||||
|
||||
|
||||
__patched__ = ['get_ident', 'start_new_thread', 'start_new', 'allocate_lock',
|
||||
'allocate', 'exit', 'interrupt_main', 'stack_size', '_local',
|
||||
'LockType', '_count']
|
||||
|
||||
error = __thread.error
|
||||
__threadcount = 0
|
||||
|
||||
|
||||
if six.PY3:
|
||||
def _set_sentinel():
|
||||
# TODO this is a dummy code, reimplementing this may be needed:
|
||||
# https://hg.python.org/cpython/file/b5e9bc4352e1/Modules/_threadmodule.c#l1203
|
||||
return allocate_lock()
|
||||
|
||||
TIMEOUT_MAX = __thread.TIMEOUT_MAX
|
||||
|
||||
|
||||
def _count():
|
||||
return __threadcount
|
||||
|
||||
|
||||
def get_ident(gr=None):
|
||||
if gr is None:
|
||||
return id(greenlet.getcurrent())
|
||||
else:
|
||||
return id(gr)
|
||||
|
||||
|
||||
def __thread_body(func, args, kwargs):
|
||||
global __threadcount
|
||||
__threadcount += 1
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
finally:
|
||||
__threadcount -= 1
|
||||
|
||||
|
||||
def start_new_thread(function, args=(), kwargs=None):
|
||||
if (sys.version_info >= (3, 4)
|
||||
and getattr(function, '__module__', '') == 'threading'
|
||||
and hasattr(function, '__self__')):
|
||||
# Since Python 3.4, threading.Thread uses an internal lock
|
||||
# automatically released when the python thread state is deleted.
|
||||
# With monkey patching, eventlet uses green threads without python
|
||||
# thread state, so the lock is not automatically released.
|
||||
#
|
||||
# Wrap _bootstrap_inner() to release explicitly the thread state lock
|
||||
# when the thread completes.
|
||||
thread = function.__self__
|
||||
bootstrap_inner = thread._bootstrap_inner
|
||||
|
||||
def wrap_bootstrap_inner():
|
||||
try:
|
||||
bootstrap_inner()
|
||||
finally:
|
||||
# The lock can be cleared (ex: by a fork())
|
||||
if thread._tstate_lock is not None:
|
||||
thread._tstate_lock.release()
|
||||
|
||||
thread._bootstrap_inner = wrap_bootstrap_inner
|
||||
|
||||
kwargs = kwargs or {}
|
||||
g = greenthread.spawn_n(__thread_body, function, args, kwargs)
|
||||
return get_ident(g)
|
||||
|
||||
|
||||
start_new = start_new_thread
|
||||
|
||||
|
||||
def allocate_lock(*a):
|
||||
return LockType(1)
|
||||
|
||||
|
||||
allocate = allocate_lock
|
||||
|
||||
|
||||
def exit():
|
||||
raise greenlet.GreenletExit
|
||||
|
||||
|
||||
exit_thread = __thread.exit_thread
|
||||
|
||||
|
||||
def interrupt_main():
|
||||
curr = greenlet.getcurrent()
|
||||
if curr.parent and not curr.parent.dead:
|
||||
curr.parent.throw(KeyboardInterrupt())
|
||||
else:
|
||||
raise KeyboardInterrupt()
|
||||
|
||||
|
||||
if hasattr(__thread, 'stack_size'):
|
||||
__original_stack_size__ = __thread.stack_size
|
||||
|
||||
def stack_size(size=None):
|
||||
if size is None:
|
||||
return __original_stack_size__()
|
||||
if size > __original_stack_size__():
|
||||
return __original_stack_size__(size)
|
||||
else:
|
||||
pass
|
||||
# not going to decrease stack_size, because otherwise other greenlets in
|
||||
# this thread will suffer
|
||||
|
||||
from eventlet.corolocal import local as _local
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/thread.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/thread.pyc
Normal file
Binary file not shown.
120
venv/lib/python2.7/site-packages/eventlet/green/threading.py
Normal file
120
venv/lib/python2.7/site-packages/eventlet/green/threading.py
Normal file
@@ -0,0 +1,120 @@
|
||||
"""Implements the standard threading module, using greenthreads."""
|
||||
from eventlet import patcher
|
||||
from eventlet.green import thread
|
||||
from eventlet.green import time
|
||||
from eventlet.support import greenlets as greenlet, six
|
||||
|
||||
__patched__ = ['_start_new_thread', '_allocate_lock',
|
||||
'_sleep', 'local', 'stack_size', 'Lock', 'currentThread',
|
||||
'current_thread', '_after_fork', '_shutdown']
|
||||
|
||||
if six.PY2:
|
||||
__patched__ += ['_get_ident']
|
||||
else:
|
||||
__patched__ += ['get_ident', '_set_sentinel']
|
||||
|
||||
__orig_threading = patcher.original('threading')
|
||||
__threadlocal = __orig_threading.local()
|
||||
|
||||
|
||||
patcher.inject(
|
||||
'threading',
|
||||
globals(),
|
||||
('thread' if six.PY2 else '_thread', thread),
|
||||
('time', time))
|
||||
|
||||
del patcher
|
||||
|
||||
|
||||
_count = 1
|
||||
|
||||
|
||||
class _GreenThread(object):
|
||||
"""Wrapper for GreenThread objects to provide Thread-like attributes
|
||||
and methods"""
|
||||
|
||||
def __init__(self, g):
|
||||
global _count
|
||||
self._g = g
|
||||
self._name = 'GreenThread-%d' % _count
|
||||
_count += 1
|
||||
|
||||
def __repr__(self):
|
||||
return '<_GreenThread(%s, %r)>' % (self._name, self._g)
|
||||
|
||||
def join(self, timeout=None):
|
||||
return self._g.wait()
|
||||
|
||||
def getName(self):
|
||||
return self._name
|
||||
get_name = getName
|
||||
|
||||
def setName(self, name):
|
||||
self._name = str(name)
|
||||
set_name = setName
|
||||
|
||||
name = property(getName, setName)
|
||||
|
||||
ident = property(lambda self: id(self._g))
|
||||
|
||||
def isAlive(self):
|
||||
return True
|
||||
is_alive = isAlive
|
||||
|
||||
daemon = property(lambda self: True)
|
||||
|
||||
def isDaemon(self):
|
||||
return self.daemon
|
||||
is_daemon = isDaemon
|
||||
|
||||
|
||||
__threading = None
|
||||
|
||||
|
||||
def _fixup_thread(t):
|
||||
# Some third-party packages (lockfile) will try to patch the
|
||||
# threading.Thread class with a get_name attribute if it doesn't
|
||||
# exist. Since we might return Thread objects from the original
|
||||
# threading package that won't get patched, let's make sure each
|
||||
# individual object gets patched too our patched threading.Thread
|
||||
# class has been patched. This is why monkey patching can be bad...
|
||||
global __threading
|
||||
if not __threading:
|
||||
__threading = __import__('threading')
|
||||
|
||||
if (hasattr(__threading.Thread, 'get_name') and
|
||||
not hasattr(t, 'get_name')):
|
||||
t.get_name = t.getName
|
||||
return t
|
||||
|
||||
|
||||
def current_thread():
|
||||
g = greenlet.getcurrent()
|
||||
if not g:
|
||||
# Not currently in a greenthread, fall back to standard function
|
||||
return _fixup_thread(__orig_threading.current_thread())
|
||||
|
||||
try:
|
||||
active = __threadlocal.active
|
||||
except AttributeError:
|
||||
active = __threadlocal.active = {}
|
||||
|
||||
try:
|
||||
t = active[id(g)]
|
||||
except KeyError:
|
||||
# Add green thread to active if we can clean it up on exit
|
||||
def cleanup(g):
|
||||
del active[id(g)]
|
||||
try:
|
||||
g.link(cleanup)
|
||||
except AttributeError:
|
||||
# Not a GreenThread type, so there's no way to hook into
|
||||
# the green thread exiting. Fall back to the standard
|
||||
# function then.
|
||||
t = _fixup_thread(__orig_threading.currentThread())
|
||||
else:
|
||||
t = active[id(g)] = _GreenThread(g)
|
||||
|
||||
return t
|
||||
|
||||
currentThread = current_thread
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/threading.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/threading.pyc
Normal file
Binary file not shown.
6
venv/lib/python2.7/site-packages/eventlet/green/time.py
Normal file
6
venv/lib/python2.7/site-packages/eventlet/green/time.py
Normal file
@@ -0,0 +1,6 @@
|
||||
__time = __import__('time')
|
||||
from eventlet.patcher import slurp_properties
|
||||
__patched__ = ['sleep']
|
||||
slurp_properties(__time, globals(), ignore=__patched__, srckeys=dir(__time))
|
||||
from eventlet.greenthread import sleep
|
||||
sleep # silence pyflakes
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/time.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/time.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,40 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import time
|
||||
from eventlet.green import httplib
|
||||
from eventlet.green import ftplib
|
||||
from eventlet.support import six
|
||||
|
||||
if six.PY2:
|
||||
to_patch = [('socket', socket), ('httplib', httplib),
|
||||
('time', time), ('ftplib', ftplib)]
|
||||
try:
|
||||
from eventlet.green import ssl
|
||||
to_patch.append(('ssl', ssl))
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
patcher.inject('urllib', globals(), *to_patch)
|
||||
try:
|
||||
URLopener
|
||||
except NameError:
|
||||
patcher.inject('urllib.request', globals(), *to_patch)
|
||||
|
||||
|
||||
# patch a bunch of things that have imports inside the
|
||||
# function body; this is lame and hacky but I don't feel
|
||||
# too bad because urllib is a hacky pile of junk that no
|
||||
# one should be using anyhow
|
||||
URLopener.open_http = patcher.patch_function(URLopener.open_http, ('httplib', httplib))
|
||||
if hasattr(URLopener, 'open_https'):
|
||||
URLopener.open_https = patcher.patch_function(URLopener.open_https, ('httplib', httplib))
|
||||
|
||||
URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, ('ftplib', ftplib))
|
||||
ftpwrapper.init = patcher.patch_function(ftpwrapper.init, ('ftplib', ftplib))
|
||||
ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, ('ftplib', ftplib))
|
||||
|
||||
del patcher
|
||||
|
||||
# Run test program when run as a script
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
Binary file not shown.
@@ -0,0 +1,4 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green.urllib import response
|
||||
patcher.inject('urllib.error', globals(), ('urllib.response', response))
|
||||
del patcher
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/urllib/error.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/urllib/error.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,3 @@
|
||||
from eventlet import patcher
|
||||
patcher.inject('urllib.parse', globals())
|
||||
del patcher
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/urllib/parse.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/urllib/parse.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,50 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import ftplib, http, os, socket, time
|
||||
from eventlet.green.http import client as http_client
|
||||
from eventlet.green.urllib import error, parse, response
|
||||
|
||||
# TODO should we also have green email version?
|
||||
# import email
|
||||
|
||||
|
||||
to_patch = [
|
||||
# This (http module) is needed here, otherwise test__greenness hangs
|
||||
# forever on Python 3 because parts of non-green http (including
|
||||
# http.client) leak into our patched urllib.request. There may be a nicer
|
||||
# way to handle this (I didn't dig too deep) but this does the job. Jakub
|
||||
('http', http),
|
||||
|
||||
('http.client', http_client),
|
||||
('os', os),
|
||||
('socket', socket),
|
||||
('time', time),
|
||||
('urllib.error', error),
|
||||
('urllib.parse', parse),
|
||||
('urllib.response', response),
|
||||
]
|
||||
|
||||
try:
|
||||
from eventlet.green import ssl
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
to_patch.append(('ssl', ssl))
|
||||
|
||||
patcher.inject('urllib.request', globals(), *to_patch)
|
||||
del to_patch
|
||||
|
||||
to_patch_in_functions = [('ftplib', ftplib)]
|
||||
del ftplib
|
||||
|
||||
FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, *to_patch_in_functions)
|
||||
URLopener.open_ftp = patcher.patch_function(URLopener.open_ftp, *to_patch_in_functions)
|
||||
|
||||
ftperrors = patcher.patch_function(ftperrors, *to_patch_in_functions)
|
||||
|
||||
ftpwrapper.init = patcher.patch_function(ftpwrapper.init, *to_patch_in_functions)
|
||||
ftpwrapper.retrfile = patcher.patch_function(ftpwrapper.retrfile, *to_patch_in_functions)
|
||||
|
||||
del error
|
||||
del parse
|
||||
del response
|
||||
del to_patch_in_functions
|
||||
Binary file not shown.
@@ -0,0 +1,3 @@
|
||||
from eventlet import patcher
|
||||
patcher.inject('urllib.response', globals())
|
||||
del patcher
|
||||
Binary file not shown.
20
venv/lib/python2.7/site-packages/eventlet/green/urllib2.py
Normal file
20
venv/lib/python2.7/site-packages/eventlet/green/urllib2.py
Normal file
@@ -0,0 +1,20 @@
|
||||
from eventlet import patcher
|
||||
from eventlet.green import ftplib
|
||||
from eventlet.green import httplib
|
||||
from eventlet.green import socket
|
||||
from eventlet.green import ssl
|
||||
from eventlet.green import time
|
||||
from eventlet.green import urllib
|
||||
|
||||
patcher.inject(
|
||||
'urllib2',
|
||||
globals(),
|
||||
('httplib', httplib),
|
||||
('socket', socket),
|
||||
('ssl', ssl),
|
||||
('time', time),
|
||||
('urllib', urllib))
|
||||
|
||||
FTPHandler.ftp_open = patcher.patch_function(FTPHandler.ftp_open, ('ftplib', ftplib))
|
||||
|
||||
del patcher
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/urllib2.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/urllib2.pyc
Normal file
Binary file not shown.
468
venv/lib/python2.7/site-packages/eventlet/green/zmq.py
Normal file
468
venv/lib/python2.7/site-packages/eventlet/green/zmq.py
Normal file
@@ -0,0 +1,468 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
"""The :mod:`zmq` module wraps the :class:`Socket` and :class:`Context`
|
||||
found in :mod:`pyzmq <zmq>` to be non blocking
|
||||
"""
|
||||
|
||||
from __future__ import with_statement
|
||||
|
||||
__zmq__ = __import__('zmq')
|
||||
from eventlet import hubs
|
||||
from eventlet.patcher import slurp_properties
|
||||
from eventlet.support import greenlets as greenlet
|
||||
|
||||
__patched__ = ['Context', 'Socket']
|
||||
slurp_properties(__zmq__, globals(), ignore=__patched__)
|
||||
|
||||
from collections import deque
|
||||
|
||||
try:
|
||||
# alias XREQ/XREP to DEALER/ROUTER if available
|
||||
if not hasattr(__zmq__, 'XREQ'):
|
||||
XREQ = DEALER
|
||||
if not hasattr(__zmq__, 'XREP'):
|
||||
XREP = ROUTER
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
|
||||
class LockReleaseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _QueueLock(object):
|
||||
"""A Lock that can be acquired by at most one thread. Any other
|
||||
thread calling acquire will be blocked in a queue. When release
|
||||
is called, the threads are awoken in the order they blocked,
|
||||
one at a time. This lock can be required recursively by the same
|
||||
thread."""
|
||||
|
||||
def __init__(self):
|
||||
self._waiters = deque()
|
||||
self._count = 0
|
||||
self._holder = None
|
||||
self._hub = hubs.get_hub()
|
||||
|
||||
def __nonzero__(self):
|
||||
return bool(self._count)
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def __enter__(self):
|
||||
self.acquire()
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
self.release()
|
||||
|
||||
def acquire(self):
|
||||
current = greenlet.getcurrent()
|
||||
if (self._waiters or self._count > 0) and self._holder is not current:
|
||||
# block until lock is free
|
||||
self._waiters.append(current)
|
||||
self._hub.switch()
|
||||
w = self._waiters.popleft()
|
||||
|
||||
assert w is current, 'Waiting threads woken out of order'
|
||||
assert self._count == 0, 'After waking a thread, the lock must be unacquired'
|
||||
|
||||
self._holder = current
|
||||
self._count += 1
|
||||
|
||||
def release(self):
|
||||
if self._count <= 0:
|
||||
raise LockReleaseError("Cannot release unacquired lock")
|
||||
|
||||
self._count -= 1
|
||||
if self._count == 0:
|
||||
self._holder = None
|
||||
if self._waiters:
|
||||
# wake next
|
||||
self._hub.schedule_call_global(0, self._waiters[0].switch)
|
||||
|
||||
|
||||
class _BlockedThread(object):
|
||||
"""Is either empty, or represents a single blocked thread that
|
||||
blocked itself by calling the block() method. The thread can be
|
||||
awoken by calling wake(). Wake() can be called multiple times and
|
||||
all but the first call will have no effect."""
|
||||
|
||||
def __init__(self):
|
||||
self._blocked_thread = None
|
||||
self._wakeupper = None
|
||||
self._hub = hubs.get_hub()
|
||||
|
||||
def __nonzero__(self):
|
||||
return self._blocked_thread is not None
|
||||
|
||||
__bool__ = __nonzero__
|
||||
|
||||
def block(self, deadline=None):
|
||||
if self._blocked_thread is not None:
|
||||
raise Exception("Cannot block more than one thread on one BlockedThread")
|
||||
self._blocked_thread = greenlet.getcurrent()
|
||||
|
||||
if deadline is not None:
|
||||
self._hub.schedule_call_local(deadline - self._hub.clock(), self.wake)
|
||||
|
||||
try:
|
||||
self._hub.switch()
|
||||
finally:
|
||||
self._blocked_thread = None
|
||||
# cleanup the wakeup task
|
||||
if self._wakeupper is not None:
|
||||
# Important to cancel the wakeup task so it doesn't
|
||||
# spuriously wake this greenthread later on.
|
||||
self._wakeupper.cancel()
|
||||
self._wakeupper = None
|
||||
|
||||
def wake(self):
|
||||
"""Schedules the blocked thread to be awoken and return
|
||||
True. If wake has already been called or if there is no
|
||||
blocked thread, then this call has no effect and returns
|
||||
False."""
|
||||
if self._blocked_thread is not None and self._wakeupper is None:
|
||||
self._wakeupper = self._hub.schedule_call_global(0, self._blocked_thread.switch)
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class Context(__zmq__.Context):
|
||||
"""Subclass of :class:`zmq.Context`
|
||||
"""
|
||||
|
||||
def socket(self, socket_type):
|
||||
"""Overridden method to ensure that the green version of socket is used
|
||||
|
||||
Behaves the same as :meth:`zmq.Context.socket`, but ensures
|
||||
that a :class:`Socket` with all of its send and recv methods set to be
|
||||
non-blocking is returned
|
||||
"""
|
||||
if self.closed:
|
||||
raise ZMQError(ENOTSUP)
|
||||
return Socket(self, socket_type)
|
||||
|
||||
|
||||
def _wraps(source_fn):
|
||||
"""A decorator that copies the __name__ and __doc__ from the given
|
||||
function
|
||||
"""
|
||||
def wrapper(dest_fn):
|
||||
dest_fn.__name__ = source_fn.__name__
|
||||
dest_fn.__doc__ = source_fn.__doc__
|
||||
return dest_fn
|
||||
return wrapper
|
||||
|
||||
# Implementation notes: Each socket in 0mq contains a pipe that the
|
||||
# background IO threads use to communicate with the socket. These
|
||||
# events are important because they tell the socket when it is able to
|
||||
# send and when it has messages waiting to be received. The read end
|
||||
# of the events pipe is the same FD that getsockopt(zmq.FD) returns.
|
||||
#
|
||||
# Events are read from the socket's event pipe only on the thread that
|
||||
# the 0mq context is associated with, which is the native thread the
|
||||
# greenthreads are running on, and the only operations that cause the
|
||||
# events to be read and processed are send(), recv() and
|
||||
# getsockopt(zmq.EVENTS). This means that after doing any of these
|
||||
# three operations, the ability of the socket to send or receive a
|
||||
# message without blocking may have changed, but after the events are
|
||||
# read the FD is no longer readable so the hub may not signal our
|
||||
# listener.
|
||||
#
|
||||
# If we understand that after calling send() a message might be ready
|
||||
# to be received and that after calling recv() a message might be able
|
||||
# to be sent, what should we do next? There are two approaches:
|
||||
#
|
||||
# 1. Always wake the other thread if there is one waiting. This
|
||||
# wakeup may be spurious because the socket might not actually be
|
||||
# ready for a send() or recv(). However, if a thread is in a
|
||||
# tight-loop successfully calling send() or recv() then the wakeups
|
||||
# are naturally batched and there's very little cost added to each
|
||||
# send/recv call.
|
||||
#
|
||||
# or
|
||||
#
|
||||
# 2. Call getsockopt(zmq.EVENTS) and explicitly check if the other
|
||||
# thread should be woken up. This avoids spurious wake-ups but may
|
||||
# add overhead because getsockopt will cause all events to be
|
||||
# processed, whereas send and recv throttle processing
|
||||
# events. Admittedly, all of the events will need to be processed
|
||||
# eventually, but it is likely faster to batch the processing.
|
||||
#
|
||||
# Which approach is better? I have no idea.
|
||||
#
|
||||
# TODO:
|
||||
# - Support MessageTrackers and make MessageTracker.wait green
|
||||
|
||||
_Socket = __zmq__.Socket
|
||||
_Socket_recv = _Socket.recv
|
||||
_Socket_send = _Socket.send
|
||||
_Socket_send_multipart = _Socket.send_multipart
|
||||
_Socket_recv_multipart = _Socket.recv_multipart
|
||||
_Socket_send_string = _Socket.send_string
|
||||
_Socket_recv_string = _Socket.recv_string
|
||||
_Socket_send_pyobj = _Socket.send_pyobj
|
||||
_Socket_recv_pyobj = _Socket.recv_pyobj
|
||||
_Socket_send_json = _Socket.send_json
|
||||
_Socket_recv_json = _Socket.recv_json
|
||||
_Socket_getsockopt = _Socket.getsockopt
|
||||
|
||||
|
||||
class Socket(_Socket):
|
||||
"""Green version of :class:`zmq.core.socket.Socket
|
||||
|
||||
The following three methods are always overridden:
|
||||
* send
|
||||
* recv
|
||||
* getsockopt
|
||||
To ensure that the ``zmq.NOBLOCK`` flag is set and that sending or receiving
|
||||
is deferred to the hub (using :func:`eventlet.hubs.trampoline`) if a
|
||||
``zmq.EAGAIN`` (retry) error is raised
|
||||
|
||||
For some socket types, the following methods are also overridden:
|
||||
* send_multipart
|
||||
* recv_multipart
|
||||
"""
|
||||
|
||||
def __init__(self, context, socket_type):
|
||||
super(Socket, self).__init__(context, socket_type)
|
||||
|
||||
self.__dict__['_eventlet_send_event'] = _BlockedThread()
|
||||
self.__dict__['_eventlet_recv_event'] = _BlockedThread()
|
||||
self.__dict__['_eventlet_send_lock'] = _QueueLock()
|
||||
self.__dict__['_eventlet_recv_lock'] = _QueueLock()
|
||||
|
||||
def event(fd):
|
||||
# Some events arrived at the zmq socket. This may mean
|
||||
# there's a message that can be read or there's space for
|
||||
# a message to be written.
|
||||
send_wake = self._eventlet_send_event.wake()
|
||||
recv_wake = self._eventlet_recv_event.wake()
|
||||
if not send_wake and not recv_wake:
|
||||
# if no waiting send or recv thread was woken up, then
|
||||
# force the zmq socket's events to be processed to
|
||||
# avoid repeated wakeups
|
||||
_Socket_getsockopt(self, EVENTS)
|
||||
|
||||
hub = hubs.get_hub()
|
||||
self.__dict__['_eventlet_listener'] = hub.add(hub.READ,
|
||||
self.getsockopt(FD),
|
||||
event,
|
||||
lambda _: None,
|
||||
lambda: None)
|
||||
self.__dict__['_eventlet_clock'] = hub.clock
|
||||
|
||||
@_wraps(_Socket.close)
|
||||
def close(self, linger=None):
|
||||
super(Socket, self).close(linger)
|
||||
if self._eventlet_listener is not None:
|
||||
hubs.get_hub().remove(self._eventlet_listener)
|
||||
self.__dict__['_eventlet_listener'] = None
|
||||
# wake any blocked threads
|
||||
self._eventlet_send_event.wake()
|
||||
self._eventlet_recv_event.wake()
|
||||
|
||||
@_wraps(_Socket.getsockopt)
|
||||
def getsockopt(self, option):
|
||||
result = _Socket_getsockopt(self, option)
|
||||
if option == EVENTS:
|
||||
# Getting the events causes the zmq socket to process
|
||||
# events which may mean a msg can be sent or received. If
|
||||
# there is a greenthread blocked and waiting for events,
|
||||
# it will miss the edge-triggered read event, so wake it
|
||||
# up.
|
||||
if (result & POLLOUT):
|
||||
self._eventlet_send_event.wake()
|
||||
if (result & POLLIN):
|
||||
self._eventlet_recv_event.wake()
|
||||
return result
|
||||
|
||||
@_wraps(_Socket.send)
|
||||
def send(self, msg, flags=0, copy=True, track=False):
|
||||
"""A send method that's safe to use when multiple greenthreads
|
||||
are calling send, send_multipart, recv and recv_multipart on
|
||||
the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
result = _Socket_send(self, msg, flags, copy, track)
|
||||
# Instead of calling both wake methods, could call
|
||||
# self.getsockopt(EVENTS) which would trigger wakeups if
|
||||
# needed.
|
||||
self._eventlet_send_event.wake()
|
||||
self._eventlet_recv_event.wake()
|
||||
return result
|
||||
|
||||
# TODO: pyzmq will copy the message buffer and create Message
|
||||
# objects under some circumstances. We could do that work here
|
||||
# once to avoid doing it every time the send is retried.
|
||||
flags |= NOBLOCK
|
||||
with self._eventlet_send_lock:
|
||||
while True:
|
||||
try:
|
||||
return _Socket_send(self, msg, flags, copy, track)
|
||||
except ZMQError as e:
|
||||
if e.errno == EAGAIN:
|
||||
self._eventlet_send_event.block()
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
# The call to send processes 0mq events and may
|
||||
# make the socket ready to recv. Wake the next
|
||||
# receiver. (Could check EVENTS for POLLIN here)
|
||||
self._eventlet_recv_event.wake()
|
||||
|
||||
@_wraps(_Socket.send_multipart)
|
||||
def send_multipart(self, msg_parts, flags=0, copy=True, track=False):
|
||||
"""A send_multipart method that's safe to use when multiple
|
||||
greenthreads are calling send, send_multipart, recv and
|
||||
recv_multipart on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
|
||||
|
||||
# acquire lock here so the subsequent calls to send for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_send_lock:
|
||||
return _Socket_send_multipart(self, msg_parts, flags, copy, track)
|
||||
|
||||
@_wraps(_Socket.send_string)
|
||||
def send_string(self, u, flags=0, copy=True, encoding='utf-8'):
|
||||
"""A send_string method that's safe to use when multiple
|
||||
greenthreads are calling send, send_string, recv and
|
||||
recv_string on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_send_string(self, u, flags, copy, encoding)
|
||||
|
||||
# acquire lock here so the subsequent calls to send for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_send_lock:
|
||||
return _Socket_send_string(self, u, flags, copy, encoding)
|
||||
|
||||
@_wraps(_Socket.send_pyobj)
|
||||
def send_pyobj(self, obj, flags=0, protocol=2):
|
||||
"""A send_pyobj method that's safe to use when multiple
|
||||
greenthreads are calling send, send_pyobj, recv and
|
||||
recv_pyobj on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_send_pyobj(self, obj, flags, protocol)
|
||||
|
||||
# acquire lock here so the subsequent calls to send for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_send_lock:
|
||||
return _Socket_send_pyobj(self, obj, flags, protocol)
|
||||
|
||||
@_wraps(_Socket.send_json)
|
||||
def send_json(self, obj, flags=0, **kwargs):
|
||||
"""A send_json method that's safe to use when multiple
|
||||
greenthreads are calling send, send_json, recv and
|
||||
recv_json on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_send_json(self, obj, flags, **kwargs)
|
||||
|
||||
# acquire lock here so the subsequent calls to send for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_send_lock:
|
||||
return _Socket_send_json(self, obj, flags, **kwargs)
|
||||
|
||||
@_wraps(_Socket.recv)
|
||||
def recv(self, flags=0, copy=True, track=False):
|
||||
"""A recv method that's safe to use when multiple greenthreads
|
||||
are calling send, send_multipart, recv and recv_multipart on
|
||||
the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
msg = _Socket_recv(self, flags, copy, track)
|
||||
# Instead of calling both wake methods, could call
|
||||
# self.getsockopt(EVENTS) which would trigger wakeups if
|
||||
# needed.
|
||||
self._eventlet_send_event.wake()
|
||||
self._eventlet_recv_event.wake()
|
||||
return msg
|
||||
|
||||
deadline = None
|
||||
if hasattr(__zmq__, 'RCVTIMEO'):
|
||||
sock_timeout = self.getsockopt(__zmq__.RCVTIMEO)
|
||||
if sock_timeout == -1:
|
||||
pass
|
||||
elif sock_timeout > 0:
|
||||
deadline = self._eventlet_clock() + sock_timeout / 1000.0
|
||||
else:
|
||||
raise ValueError(sock_timeout)
|
||||
|
||||
flags |= NOBLOCK
|
||||
with self._eventlet_recv_lock:
|
||||
while True:
|
||||
try:
|
||||
return _Socket_recv(self, flags, copy, track)
|
||||
except ZMQError as e:
|
||||
if e.errno == EAGAIN:
|
||||
# zmq in its wisdom decided to reuse EAGAIN for timeouts
|
||||
if deadline is not None and self._eventlet_clock() > deadline:
|
||||
e.is_timeout = True
|
||||
raise
|
||||
|
||||
self._eventlet_recv_event.block(deadline=deadline)
|
||||
else:
|
||||
raise
|
||||
finally:
|
||||
# The call to recv processes 0mq events and may
|
||||
# make the socket ready to send. Wake the next
|
||||
# receiver. (Could check EVENTS for POLLOUT here)
|
||||
self._eventlet_send_event.wake()
|
||||
|
||||
@_wraps(_Socket.recv_multipart)
|
||||
def recv_multipart(self, flags=0, copy=True, track=False):
|
||||
"""A recv_multipart method that's safe to use when multiple
|
||||
greenthreads are calling send, send_multipart, recv and
|
||||
recv_multipart on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_recv_multipart(self, flags, copy, track)
|
||||
|
||||
# acquire lock here so the subsequent calls to recv for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_recv_lock:
|
||||
return _Socket_recv_multipart(self, flags, copy, track)
|
||||
|
||||
@_wraps(_Socket.recv_string)
|
||||
def recv_string(self, flags=0, encoding='utf-8'):
|
||||
"""A recv_string method that's safe to use when multiple
|
||||
greenthreads are calling send, send_string, recv and
|
||||
recv_string on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_recv_string(self, flags, encoding)
|
||||
|
||||
# acquire lock here so the subsequent calls to recv for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_recv_lock:
|
||||
return _Socket_recv_string(self, flags, encoding)
|
||||
|
||||
@_wraps(_Socket.recv_json)
|
||||
def recv_json(self, flags=0, **kwargs):
|
||||
"""A recv_json method that's safe to use when multiple
|
||||
greenthreads are calling send, send_json, recv and
|
||||
recv_json on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_recv_json(self, flags, **kwargs)
|
||||
|
||||
# acquire lock here so the subsequent calls to recv for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_recv_lock:
|
||||
return _Socket_recv_json(self, flags, **kwargs)
|
||||
|
||||
@_wraps(_Socket.recv_pyobj)
|
||||
def recv_pyobj(self, flags=0):
|
||||
"""A recv_pyobj method that's safe to use when multiple
|
||||
greenthreads are calling send, send_pyobj, recv and
|
||||
recv_pyobj on the same socket.
|
||||
"""
|
||||
if flags & NOBLOCK:
|
||||
return _Socket_recv_pyobj(self, flags)
|
||||
|
||||
# acquire lock here so the subsequent calls to recv for the
|
||||
# message parts after the first don't block
|
||||
with self._eventlet_recv_lock:
|
||||
return _Socket_recv_pyobj(self, flags)
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/green/zmq.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/green/zmq.pyc
Normal file
Binary file not shown.
@@ -0,0 +1,8 @@
|
||||
from eventlet.support import six
|
||||
|
||||
from eventlet.greenio.base import * # noqa
|
||||
|
||||
if six.PY2:
|
||||
from eventlet.greenio.py2 import * # noqa
|
||||
else:
|
||||
from eventlet.greenio.py3 import * # noqa
|
||||
BIN
venv/lib/python2.7/site-packages/eventlet/greenio/__init__.pyc
Normal file
BIN
venv/lib/python2.7/site-packages/eventlet/greenio/__init__.pyc
Normal file
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user