om@3
|
1 |
"""A high-speed, production ready, thread pooled, generic HTTP server.
|
om@3
|
2 |
|
om@3
|
3 |
Simplest example on how to use this module directly
|
om@3
|
4 |
(without using CherryPy's application machinery)::
|
om@3
|
5 |
|
om@3
|
6 |
from cherrypy import wsgiserver
|
om@3
|
7 |
|
om@3
|
8 |
def my_crazy_app(environ, start_response):
|
om@3
|
9 |
status = '200 OK'
|
om@3
|
10 |
response_headers = [('Content-type','text/plain')]
|
om@3
|
11 |
start_response(status, response_headers)
|
om@3
|
12 |
return ['Hello world!']
|
om@3
|
13 |
|
om@3
|
14 |
server = wsgiserver.CherryPyWSGIServer(
|
om@3
|
15 |
('0.0.0.0', 8070), my_crazy_app,
|
om@3
|
16 |
server_name='www.cherrypy.example')
|
om@3
|
17 |
server.start()
|
om@3
|
18 |
|
om@3
|
19 |
The CherryPy WSGI server can serve as many WSGI applications
|
om@3
|
20 |
as you want in one instance by using a WSGIPathInfoDispatcher::
|
om@3
|
21 |
|
om@3
|
22 |
d = WSGIPathInfoDispatcher({'/': my_crazy_app, '/blog': my_blog_app})
|
om@3
|
23 |
server = wsgiserver.CherryPyWSGIServer(('0.0.0.0', 80), d)
|
om@3
|
24 |
|
om@3
|
25 |
Want SSL support? Just set server.ssl_adapter to an SSLAdapter instance.
|
om@3
|
26 |
|
om@3
|
27 |
This won't call the CherryPy engine (application side) at all, only the
|
om@3
|
28 |
HTTP server, which is independent from the rest of CherryPy. Don't
|
om@3
|
29 |
let the name "CherryPyWSGIServer" throw you; the name merely reflects
|
om@3
|
30 |
its origin, not its coupling.
|
om@3
|
31 |
|
om@3
|
32 |
For those of you wanting to understand internals of this module, here's the
|
om@3
|
33 |
basic call flow. The server's listening thread runs a very tight loop,
|
om@3
|
34 |
sticking incoming connections onto a Queue::
|
om@3
|
35 |
|
om@3
|
36 |
server = CherryPyWSGIServer(...)
|
om@3
|
37 |
server.start()
|
om@3
|
38 |
while True:
|
om@3
|
39 |
tick()
|
om@3
|
40 |
# This blocks until a request comes in:
|
om@3
|
41 |
child = socket.accept()
|
om@3
|
42 |
conn = HTTPConnection(child, ...)
|
om@3
|
43 |
server.requests.put(conn)
|
om@3
|
44 |
|
om@3
|
45 |
Worker threads are kept in a pool and poll the Queue, popping off and then
|
om@3
|
46 |
handling each connection in turn. Each connection can consist of an arbitrary
|
om@3
|
47 |
number of requests and their responses, so we run a nested loop::
|
om@3
|
48 |
|
om@3
|
49 |
while True:
|
om@3
|
50 |
conn = server.requests.get()
|
om@3
|
51 |
conn.communicate()
|
om@3
|
52 |
-> while True:
|
om@3
|
53 |
req = HTTPRequest(...)
|
om@3
|
54 |
req.parse_request()
|
om@3
|
55 |
-> # Read the Request-Line, e.g. "GET /page HTTP/1.1"
|
om@3
|
56 |
req.rfile.readline()
|
om@3
|
57 |
read_headers(req.rfile, req.inheaders)
|
om@3
|
58 |
req.respond()
|
om@3
|
59 |
-> response = app(...)
|
om@3
|
60 |
try:
|
om@3
|
61 |
for chunk in response:
|
om@3
|
62 |
if chunk:
|
om@3
|
63 |
req.write(chunk)
|
om@3
|
64 |
finally:
|
om@3
|
65 |
if hasattr(response, "close"):
|
om@3
|
66 |
response.close()
|
om@3
|
67 |
if req.close_connection:
|
om@3
|
68 |
return
|
om@3
|
69 |
"""
|
om@3
|
70 |
|
om@3
|
71 |
CRLF = '\r\n'
|
om@3
|
72 |
import os
|
om@3
|
73 |
import Queue
|
om@3
|
74 |
import re
|
om@3
|
75 |
quoted_slash = re.compile("(?i)%2F")
|
om@3
|
76 |
import rfc822
|
om@3
|
77 |
import socket
|
om@3
|
78 |
import sys
|
om@3
|
79 |
if 'win' in sys.platform and not hasattr(socket, 'IPPROTO_IPV6'):
|
om@3
|
80 |
socket.IPPROTO_IPV6 = 41
|
om@3
|
81 |
try:
|
om@3
|
82 |
import cStringIO as StringIO
|
om@3
|
83 |
except ImportError:
|
om@3
|
84 |
import StringIO
|
om@3
|
85 |
DEFAULT_BUFFER_SIZE = -1
|
om@3
|
86 |
|
om@3
|
87 |
_fileobject_uses_str_type = isinstance(socket._fileobject(None)._rbuf, basestring)
|
om@3
|
88 |
|
om@3
|
89 |
import threading
|
om@3
|
90 |
import time
|
om@3
|
91 |
import traceback
|
om@3
|
92 |
def format_exc(limit=None):
|
om@3
|
93 |
"""Like print_exc() but return a string. Backport for Python 2.3."""
|
om@3
|
94 |
try:
|
om@3
|
95 |
etype, value, tb = sys.exc_info()
|
om@3
|
96 |
return ''.join(traceback.format_exception(etype, value, tb, limit))
|
om@3
|
97 |
finally:
|
om@3
|
98 |
etype = value = tb = None
|
om@3
|
99 |
|
om@3
|
100 |
|
om@3
|
101 |
from urllib import unquote
|
om@3
|
102 |
from urlparse import urlparse
|
om@3
|
103 |
import warnings
|
om@3
|
104 |
|
om@3
|
105 |
import errno
|
om@3
|
106 |
|
om@3
|
107 |
def plat_specific_errors(*errnames):
|
om@3
|
108 |
"""Return error numbers for all errors in errnames on this platform.
|
om@3
|
109 |
|
om@3
|
110 |
The 'errno' module contains different global constants depending on
|
om@3
|
111 |
the specific platform (OS). This function will return the list of
|
om@3
|
112 |
numeric values for a given list of potential names.
|
om@3
|
113 |
"""
|
om@3
|
114 |
errno_names = dir(errno)
|
om@3
|
115 |
nums = [getattr(errno, k) for k in errnames if k in errno_names]
|
om@3
|
116 |
# de-dupe the list
|
om@3
|
117 |
return dict.fromkeys(nums).keys()
|
om@3
|
118 |
|
om@3
|
119 |
socket_error_eintr = plat_specific_errors("EINTR", "WSAEINTR")
|
om@3
|
120 |
|
om@3
|
121 |
socket_errors_to_ignore = plat_specific_errors(
|
om@3
|
122 |
"EPIPE",
|
om@3
|
123 |
"EBADF", "WSAEBADF",
|
om@3
|
124 |
"ENOTSOCK", "WSAENOTSOCK",
|
om@3
|
125 |
"ETIMEDOUT", "WSAETIMEDOUT",
|
om@3
|
126 |
"ECONNREFUSED", "WSAECONNREFUSED",
|
om@3
|
127 |
"ECONNRESET", "WSAECONNRESET",
|
om@3
|
128 |
"ECONNABORTED", "WSAECONNABORTED",
|
om@3
|
129 |
"ENETRESET", "WSAENETRESET",
|
om@3
|
130 |
"EHOSTDOWN", "EHOSTUNREACH",
|
om@3
|
131 |
)
|
om@3
|
132 |
socket_errors_to_ignore.append("timed out")
|
om@3
|
133 |
socket_errors_to_ignore.append("The read operation timed out")
|
om@3
|
134 |
|
om@3
|
135 |
socket_errors_nonblocking = plat_specific_errors(
|
om@3
|
136 |
'EAGAIN', 'EWOULDBLOCK', 'WSAEWOULDBLOCK')
|
om@3
|
137 |
|
om@3
|
138 |
comma_separated_headers = ['Accept', 'Accept-Charset', 'Accept-Encoding',
|
om@3
|
139 |
'Accept-Language', 'Accept-Ranges', 'Allow', 'Cache-Control',
|
om@3
|
140 |
'Connection', 'Content-Encoding', 'Content-Language', 'Expect',
|
om@3
|
141 |
'If-Match', 'If-None-Match', 'Pragma', 'Proxy-Authenticate', 'TE',
|
om@3
|
142 |
'Trailer', 'Transfer-Encoding', 'Upgrade', 'Vary', 'Via', 'Warning',
|
om@3
|
143 |
'WWW-Authenticate']
|
om@3
|
144 |
|
om@3
|
145 |
|
om@3
|
146 |
import logging
|
om@3
|
147 |
if not hasattr(logging, 'statistics'): logging.statistics = {}
|
om@3
|
148 |
|
om@3
|
149 |
|
om@3
|
150 |
def read_headers(rfile, hdict=None):
|
om@3
|
151 |
"""Read headers from the given stream into the given header dict.
|
om@3
|
152 |
|
om@3
|
153 |
If hdict is None, a new header dict is created. Returns the populated
|
om@3
|
154 |
header dict.
|
om@3
|
155 |
|
om@3
|
156 |
Headers which are repeated are folded together using a comma if their
|
om@3
|
157 |
specification so dictates.
|
om@3
|
158 |
|
om@3
|
159 |
This function raises ValueError when the read bytes violate the HTTP spec.
|
om@3
|
160 |
You should probably return "400 Bad Request" if this happens.
|
om@3
|
161 |
"""
|
om@3
|
162 |
if hdict is None:
|
om@3
|
163 |
hdict = {}
|
om@3
|
164 |
|
om@3
|
165 |
while True:
|
om@3
|
166 |
line = rfile.readline()
|
om@3
|
167 |
if not line:
|
om@3
|
168 |
# No more data--illegal end of headers
|
om@3
|
169 |
raise ValueError("Illegal end of headers.")
|
om@3
|
170 |
|
om@3
|
171 |
if line == CRLF:
|
om@3
|
172 |
# Normal end of headers
|
om@3
|
173 |
break
|
om@3
|
174 |
if not line.endswith(CRLF):
|
om@3
|
175 |
raise ValueError("HTTP requires CRLF terminators")
|
om@3
|
176 |
|
om@3
|
177 |
if line[0] in ' \t':
|
om@3
|
178 |
# It's a continuation line.
|
om@3
|
179 |
v = line.strip()
|
om@3
|
180 |
else:
|
om@3
|
181 |
try:
|
om@3
|
182 |
k, v = line.split(":", 1)
|
om@3
|
183 |
except ValueError:
|
om@3
|
184 |
raise ValueError("Illegal header line.")
|
om@3
|
185 |
# TODO: what about TE and WWW-Authenticate?
|
om@3
|
186 |
k = k.strip().title()
|
om@3
|
187 |
v = v.strip()
|
om@3
|
188 |
hname = k
|
om@3
|
189 |
|
om@3
|
190 |
if k in comma_separated_headers:
|
om@3
|
191 |
existing = hdict.get(hname)
|
om@3
|
192 |
if existing:
|
om@3
|
193 |
v = ", ".join((existing, v))
|
om@3
|
194 |
hdict[hname] = v
|
om@3
|
195 |
|
om@3
|
196 |
return hdict
|
om@3
|
197 |
|
om@3
|
198 |
|
om@3
|
199 |
class MaxSizeExceeded(Exception):
|
om@3
|
200 |
pass
|
om@3
|
201 |
|
om@3
|
202 |
class SizeCheckWrapper(object):
|
om@3
|
203 |
"""Wraps a file-like object, raising MaxSizeExceeded if too large."""
|
om@3
|
204 |
|
om@3
|
205 |
def __init__(self, rfile, maxlen):
|
om@3
|
206 |
self.rfile = rfile
|
om@3
|
207 |
self.maxlen = maxlen
|
om@3
|
208 |
self.bytes_read = 0
|
om@3
|
209 |
|
om@3
|
210 |
def _check_length(self):
|
om@3
|
211 |
if self.maxlen and self.bytes_read > self.maxlen:
|
om@3
|
212 |
raise MaxSizeExceeded()
|
om@3
|
213 |
|
om@3
|
214 |
def read(self, size=None):
|
om@3
|
215 |
data = self.rfile.read(size)
|
om@3
|
216 |
self.bytes_read += len(data)
|
om@3
|
217 |
self._check_length()
|
om@3
|
218 |
return data
|
om@3
|
219 |
|
om@3
|
220 |
def readline(self, size=None):
|
om@3
|
221 |
if size is not None:
|
om@3
|
222 |
data = self.rfile.readline(size)
|
om@3
|
223 |
self.bytes_read += len(data)
|
om@3
|
224 |
self._check_length()
|
om@3
|
225 |
return data
|
om@3
|
226 |
|
om@3
|
227 |
# User didn't specify a size ...
|
om@3
|
228 |
# We read the line in chunks to make sure it's not a 100MB line !
|
om@3
|
229 |
res = []
|
om@3
|
230 |
while True:
|
om@3
|
231 |
data = self.rfile.readline(256)
|
om@3
|
232 |
self.bytes_read += len(data)
|
om@3
|
233 |
self._check_length()
|
om@3
|
234 |
res.append(data)
|
om@3
|
235 |
# See http://www.cherrypy.org/ticket/421
|
om@3
|
236 |
if len(data) < 256 or data[-1:] == "\n":
|
om@3
|
237 |
return ''.join(res)
|
om@3
|
238 |
|
om@3
|
239 |
def readlines(self, sizehint=0):
|
om@3
|
240 |
# Shamelessly stolen from StringIO
|
om@3
|
241 |
total = 0
|
om@3
|
242 |
lines = []
|
om@3
|
243 |
line = self.readline()
|
om@3
|
244 |
while line:
|
om@3
|
245 |
lines.append(line)
|
om@3
|
246 |
total += len(line)
|
om@3
|
247 |
if 0 < sizehint <= total:
|
om@3
|
248 |
break
|
om@3
|
249 |
line = self.readline()
|
om@3
|
250 |
return lines
|
om@3
|
251 |
|
om@3
|
252 |
def close(self):
|
om@3
|
253 |
self.rfile.close()
|
om@3
|
254 |
|
om@3
|
255 |
def __iter__(self):
|
om@3
|
256 |
return self
|
om@3
|
257 |
|
om@3
|
258 |
def next(self):
|
om@3
|
259 |
data = self.rfile.next()
|
om@3
|
260 |
self.bytes_read += len(data)
|
om@3
|
261 |
self._check_length()
|
om@3
|
262 |
return data
|
om@3
|
263 |
|
om@3
|
264 |
|
om@3
|
265 |
class KnownLengthRFile(object):
|
om@3
|
266 |
"""Wraps a file-like object, returning an empty string when exhausted."""
|
om@3
|
267 |
|
om@3
|
268 |
def __init__(self, rfile, content_length):
|
om@3
|
269 |
self.rfile = rfile
|
om@3
|
270 |
self.remaining = content_length
|
om@3
|
271 |
|
om@3
|
272 |
def read(self, size=None):
|
om@3
|
273 |
if self.remaining == 0:
|
om@3
|
274 |
return ''
|
om@3
|
275 |
if size is None:
|
om@3
|
276 |
size = self.remaining
|
om@3
|
277 |
else:
|
om@3
|
278 |
size = min(size, self.remaining)
|
om@3
|
279 |
|
om@3
|
280 |
data = self.rfile.read(size)
|
om@3
|
281 |
self.remaining -= len(data)
|
om@3
|
282 |
return data
|
om@3
|
283 |
|
om@3
|
284 |
def readline(self, size=None):
|
om@3
|
285 |
if self.remaining == 0:
|
om@3
|
286 |
return ''
|
om@3
|
287 |
if size is None:
|
om@3
|
288 |
size = self.remaining
|
om@3
|
289 |
else:
|
om@3
|
290 |
size = min(size, self.remaining)
|
om@3
|
291 |
|
om@3
|
292 |
data = self.rfile.readline(size)
|
om@3
|
293 |
self.remaining -= len(data)
|
om@3
|
294 |
return data
|
om@3
|
295 |
|
om@3
|
296 |
def readlines(self, sizehint=0):
|
om@3
|
297 |
# Shamelessly stolen from StringIO
|
om@3
|
298 |
total = 0
|
om@3
|
299 |
lines = []
|
om@3
|
300 |
line = self.readline(sizehint)
|
om@3
|
301 |
while line:
|
om@3
|
302 |
lines.append(line)
|
om@3
|
303 |
total += len(line)
|
om@3
|
304 |
if 0 < sizehint <= total:
|
om@3
|
305 |
break
|
om@3
|
306 |
line = self.readline(sizehint)
|
om@3
|
307 |
return lines
|
om@3
|
308 |
|
om@3
|
309 |
def close(self):
|
om@3
|
310 |
self.rfile.close()
|
om@3
|
311 |
|
om@3
|
312 |
def __iter__(self):
|
om@3
|
313 |
return self
|
om@3
|
314 |
|
om@3
|
315 |
def __next__(self):
|
om@3
|
316 |
data = next(self.rfile)
|
om@3
|
317 |
self.remaining -= len(data)
|
om@3
|
318 |
return data
|
om@3
|
319 |
|
om@3
|
320 |
|
om@3
|
321 |
class ChunkedRFile(object):
|
om@3
|
322 |
"""Wraps a file-like object, returning an empty string when exhausted.
|
om@3
|
323 |
|
om@3
|
324 |
This class is intended to provide a conforming wsgi.input value for
|
om@3
|
325 |
request entities that have been encoded with the 'chunked' transfer
|
om@3
|
326 |
encoding.
|
om@3
|
327 |
"""
|
om@3
|
328 |
|
om@3
|
329 |
def __init__(self, rfile, maxlen, bufsize=8192):
|
om@3
|
330 |
self.rfile = rfile
|
om@3
|
331 |
self.maxlen = maxlen
|
om@3
|
332 |
self.bytes_read = 0
|
om@3
|
333 |
self.buffer = ''
|
om@3
|
334 |
self.bufsize = bufsize
|
om@3
|
335 |
self.closed = False
|
om@3
|
336 |
|
om@3
|
337 |
def _fetch(self):
|
om@3
|
338 |
if self.closed:
|
om@3
|
339 |
return
|
om@3
|
340 |
|
om@3
|
341 |
line = self.rfile.readline()
|
om@3
|
342 |
self.bytes_read += len(line)
|
om@3
|
343 |
|
om@3
|
344 |
if self.maxlen and self.bytes_read > self.maxlen:
|
om@3
|
345 |
raise MaxSizeExceeded("Request Entity Too Large", self.maxlen)
|
om@3
|
346 |
|
om@3
|
347 |
line = line.strip().split(";", 1)
|
om@3
|
348 |
|
om@3
|
349 |
try:
|
om@3
|
350 |
chunk_size = line.pop(0)
|
om@3
|
351 |
chunk_size = int(chunk_size, 16)
|
om@3
|
352 |
except ValueError:
|
om@3
|
353 |
raise ValueError("Bad chunked transfer size: " + repr(chunk_size))
|
om@3
|
354 |
|
om@3
|
355 |
if chunk_size <= 0:
|
om@3
|
356 |
self.closed = True
|
om@3
|
357 |
return
|
om@3
|
358 |
|
om@3
|
359 |
## if line: chunk_extension = line[0]
|
om@3
|
360 |
|
om@3
|
361 |
if self.maxlen and self.bytes_read + chunk_size > self.maxlen:
|
om@3
|
362 |
raise IOError("Request Entity Too Large")
|
om@3
|
363 |
|
om@3
|
364 |
chunk = self.rfile.read(chunk_size)
|
om@3
|
365 |
self.bytes_read += len(chunk)
|
om@3
|
366 |
self.buffer += chunk
|
om@3
|
367 |
|
om@3
|
368 |
crlf = self.rfile.read(2)
|
om@3
|
369 |
if crlf != CRLF:
|
om@3
|
370 |
raise ValueError(
|
om@3
|
371 |
"Bad chunked transfer coding (expected '\\r\\n', "
|
om@3
|
372 |
"got " + repr(crlf) + ")")
|
om@3
|
373 |
|
om@3
|
374 |
def read(self, size=None):
|
om@3
|
375 |
data = ''
|
om@3
|
376 |
while True:
|
om@3
|
377 |
if size and len(data) >= size:
|
om@3
|
378 |
return data
|
om@3
|
379 |
|
om@3
|
380 |
if not self.buffer:
|
om@3
|
381 |
self._fetch()
|
om@3
|
382 |
if not self.buffer:
|
om@3
|
383 |
# EOF
|
om@3
|
384 |
return data
|
om@3
|
385 |
|
om@3
|
386 |
if size:
|
om@3
|
387 |
remaining = size - len(data)
|
om@3
|
388 |
data += self.buffer[:remaining]
|
om@3
|
389 |
self.buffer = self.buffer[remaining:]
|
om@3
|
390 |
else:
|
om@3
|
391 |
data += self.buffer
|
om@3
|
392 |
|
om@3
|
393 |
def readline(self, size=None):
|
om@3
|
394 |
data = ''
|
om@3
|
395 |
while True:
|
om@3
|
396 |
if size and len(data) >= size:
|
om@3
|
397 |
return data
|
om@3
|
398 |
|
om@3
|
399 |
if not self.buffer:
|
om@3
|
400 |
self._fetch()
|
om@3
|
401 |
if not self.buffer:
|
om@3
|
402 |
# EOF
|
om@3
|
403 |
return data
|
om@3
|
404 |
|
om@3
|
405 |
newline_pos = self.buffer.find('\n')
|
om@3
|
406 |
if size:
|
om@3
|
407 |
if newline_pos == -1:
|
om@3
|
408 |
remaining = size - len(data)
|
om@3
|
409 |
data += self.buffer[:remaining]
|
om@3
|
410 |
self.buffer = self.buffer[remaining:]
|
om@3
|
411 |
else:
|
om@3
|
412 |
remaining = min(size - len(data), newline_pos)
|
om@3
|
413 |
data += self.buffer[:remaining]
|
om@3
|
414 |
self.buffer = self.buffer[remaining:]
|
om@3
|
415 |
else:
|
om@3
|
416 |
if newline_pos == -1:
|
om@3
|
417 |
data += self.buffer
|
om@3
|
418 |
else:
|
om@3
|
419 |
data += self.buffer[:newline_pos]
|
om@3
|
420 |
self.buffer = self.buffer[newline_pos:]
|
om@3
|
421 |
|
om@3
|
422 |
def readlines(self, sizehint=0):
|
om@3
|
423 |
# Shamelessly stolen from StringIO
|
om@3
|
424 |
total = 0
|
om@3
|
425 |
lines = []
|
om@3
|
426 |
line = self.readline(sizehint)
|
om@3
|
427 |
while line:
|
om@3
|
428 |
lines.append(line)
|
om@3
|
429 |
total += len(line)
|
om@3
|
430 |
if 0 < sizehint <= total:
|
om@3
|
431 |
break
|
om@3
|
432 |
line = self.readline(sizehint)
|
om@3
|
433 |
return lines
|
om@3
|
434 |
|
om@3
|
435 |
def read_trailer_lines(self):
|
om@3
|
436 |
if not self.closed:
|
om@3
|
437 |
raise ValueError(
|
om@3
|
438 |
"Cannot read trailers until the request body has been read.")
|
om@3
|
439 |
|
om@3
|
440 |
while True:
|
om@3
|
441 |
line = self.rfile.readline()
|
om@3
|
442 |
if not line:
|
om@3
|
443 |
# No more data--illegal end of headers
|
om@3
|
444 |
raise ValueError("Illegal end of headers.")
|
om@3
|
445 |
|
om@3
|
446 |
self.bytes_read += len(line)
|
om@3
|
447 |
if self.maxlen and self.bytes_read > self.maxlen:
|
om@3
|
448 |
raise IOError("Request Entity Too Large")
|
om@3
|
449 |
|
om@3
|
450 |
if line == CRLF:
|
om@3
|
451 |
# Normal end of headers
|
om@3
|
452 |
break
|
om@3
|
453 |
if not line.endswith(CRLF):
|
om@3
|
454 |
raise ValueError("HTTP requires CRLF terminators")
|
om@3
|
455 |
|
om@3
|
456 |
yield line
|
om@3
|
457 |
|
om@3
|
458 |
def close(self):
|
om@3
|
459 |
self.rfile.close()
|
om@3
|
460 |
|
om@3
|
461 |
def __iter__(self):
|
om@3
|
462 |
# Shamelessly stolen from StringIO
|
om@3
|
463 |
total = 0
|
om@3
|
464 |
line = self.readline(sizehint)
|
om@3
|
465 |
while line:
|
om@3
|
466 |
yield line
|
om@3
|
467 |
total += len(line)
|
om@3
|
468 |
if 0 < sizehint <= total:
|
om@3
|
469 |
break
|
om@3
|
470 |
line = self.readline(sizehint)
|
om@3
|
471 |
|
om@3
|
472 |
|
om@3
|
473 |
class HTTPRequest(object):
|
om@3
|
474 |
"""An HTTP Request (and response).
|
om@3
|
475 |
|
om@3
|
476 |
A single HTTP connection may consist of multiple request/response pairs.
|
om@3
|
477 |
"""
|
om@3
|
478 |
|
om@3
|
479 |
server = None
|
om@3
|
480 |
"""The HTTPServer object which is receiving this request."""
|
om@3
|
481 |
|
om@3
|
482 |
conn = None
|
om@3
|
483 |
"""The HTTPConnection object on which this request connected."""
|
om@3
|
484 |
|
om@3
|
485 |
inheaders = {}
|
om@3
|
486 |
"""A dict of request headers."""
|
om@3
|
487 |
|
om@3
|
488 |
outheaders = []
|
om@3
|
489 |
"""A list of header tuples to write in the response."""
|
om@3
|
490 |
|
om@3
|
491 |
ready = False
|
om@3
|
492 |
"""When True, the request has been parsed and is ready to begin generating
|
om@3
|
493 |
the response. When False, signals the calling Connection that the response
|
om@3
|
494 |
should not be generated and the connection should close."""
|
om@3
|
495 |
|
om@3
|
496 |
close_connection = False
|
om@3
|
497 |
"""Signals the calling Connection that the request should close. This does
|
om@3
|
498 |
not imply an error! The client and/or server may each request that the
|
om@3
|
499 |
connection be closed."""
|
om@3
|
500 |
|
om@3
|
501 |
chunked_write = False
|
om@3
|
502 |
"""If True, output will be encoded with the "chunked" transfer-coding.
|
om@3
|
503 |
|
om@3
|
504 |
This value is set automatically inside send_headers."""
|
om@3
|
505 |
|
om@3
|
506 |
def __init__(self, server, conn):
|
om@3
|
507 |
self.server= server
|
om@3
|
508 |
self.conn = conn
|
om@3
|
509 |
|
om@3
|
510 |
self.ready = False
|
om@3
|
511 |
self.started_request = False
|
om@3
|
512 |
self.scheme = "http"
|
om@3
|
513 |
if self.server.ssl_adapter is not None:
|
om@3
|
514 |
self.scheme = "https"
|
om@3
|
515 |
# Use the lowest-common protocol in case read_request_line errors.
|
om@3
|
516 |
self.response_protocol = 'HTTP/1.0'
|
om@3
|
517 |
self.inheaders = {}
|
om@3
|
518 |
|
om@3
|
519 |
self.status = ""
|
om@3
|
520 |
self.outheaders = []
|
om@3
|
521 |
self.sent_headers = False
|
om@3
|
522 |
self.close_connection = self.__class__.close_connection
|
om@3
|
523 |
self.chunked_read = False
|
om@3
|
524 |
self.chunked_write = self.__class__.chunked_write
|
om@3
|
525 |
|
om@3
|
526 |
def parse_request(self):
|
om@3
|
527 |
"""Parse the next HTTP request start-line and message-headers."""
|
om@3
|
528 |
self.rfile = SizeCheckWrapper(self.conn.rfile,
|
om@3
|
529 |
self.server.max_request_header_size)
|
om@3
|
530 |
try:
|
om@3
|
531 |
self.read_request_line()
|
om@3
|
532 |
except MaxSizeExceeded:
|
om@3
|
533 |
self.simple_response("414 Request-URI Too Long",
|
om@3
|
534 |
"The Request-URI sent with the request exceeds the maximum "
|
om@3
|
535 |
"allowed bytes.")
|
om@3
|
536 |
return
|
om@3
|
537 |
|
om@3
|
538 |
try:
|
om@3
|
539 |
success = self.read_request_headers()
|
om@3
|
540 |
except MaxSizeExceeded:
|
om@3
|
541 |
self.simple_response("413 Request Entity Too Large",
|
om@3
|
542 |
"The headers sent with the request exceed the maximum "
|
om@3
|
543 |
"allowed bytes.")
|
om@3
|
544 |
return
|
om@3
|
545 |
else:
|
om@3
|
546 |
if not success:
|
om@3
|
547 |
return
|
om@3
|
548 |
|
om@3
|
549 |
self.ready = True
|
om@3
|
550 |
|
om@3
|
551 |
def read_request_line(self):
|
om@3
|
552 |
# HTTP/1.1 connections are persistent by default. If a client
|
om@3
|
553 |
# requests a page, then idles (leaves the connection open),
|
om@3
|
554 |
# then rfile.readline() will raise socket.error("timed out").
|
om@3
|
555 |
# Note that it does this based on the value given to settimeout(),
|
om@3
|
556 |
# and doesn't need the client to request or acknowledge the close
|
om@3
|
557 |
# (although your TCP stack might suffer for it: cf Apache's history
|
om@3
|
558 |
# with FIN_WAIT_2).
|
om@3
|
559 |
request_line = self.rfile.readline()
|
om@3
|
560 |
|
om@3
|
561 |
# Set started_request to True so communicate() knows to send 408
|
om@3
|
562 |
# from here on out.
|
om@3
|
563 |
self.started_request = True
|
om@3
|
564 |
if not request_line:
|
om@3
|
565 |
# Force self.ready = False so the connection will close.
|
om@3
|
566 |
self.ready = False
|
om@3
|
567 |
return
|
om@3
|
568 |
|
om@3
|
569 |
if request_line == CRLF:
|
om@3
|
570 |
# RFC 2616 sec 4.1: "...if the server is reading the protocol
|
om@3
|
571 |
# stream at the beginning of a message and receives a CRLF
|
om@3
|
572 |
# first, it should ignore the CRLF."
|
om@3
|
573 |
# But only ignore one leading line! else we enable a DoS.
|
om@3
|
574 |
request_line = self.rfile.readline()
|
om@3
|
575 |
if not request_line:
|
om@3
|
576 |
self.ready = False
|
om@3
|
577 |
return
|
om@3
|
578 |
|
om@3
|
579 |
if not request_line.endswith(CRLF):
|
om@3
|
580 |
self.simple_response("400 Bad Request", "HTTP requires CRLF terminators")
|
om@3
|
581 |
return
|
om@3
|
582 |
|
om@3
|
583 |
try:
|
om@3
|
584 |
method, uri, req_protocol = request_line.strip().split(" ", 2)
|
om@3
|
585 |
rp = int(req_protocol[5]), int(req_protocol[7])
|
om@3
|
586 |
except (ValueError, IndexError):
|
om@3
|
587 |
self.simple_response("400 Bad Request", "Malformed Request-Line")
|
om@3
|
588 |
return
|
om@3
|
589 |
|
om@3
|
590 |
self.uri = uri
|
om@3
|
591 |
self.method = method
|
om@3
|
592 |
|
om@3
|
593 |
# uri may be an abs_path (including "http://host.domain.tld");
|
om@3
|
594 |
scheme, authority, path = self.parse_request_uri(uri)
|
om@3
|
595 |
if '#' in path:
|
om@3
|
596 |
self.simple_response("400 Bad Request",
|
om@3
|
597 |
"Illegal #fragment in Request-URI.")
|
om@3
|
598 |
return
|
om@3
|
599 |
|
om@3
|
600 |
if scheme:
|
om@3
|
601 |
self.scheme = scheme
|
om@3
|
602 |
|
om@3
|
603 |
qs = ''
|
om@3
|
604 |
if '?' in path:
|
om@3
|
605 |
path, qs = path.split('?', 1)
|
om@3
|
606 |
|
om@3
|
607 |
# Unquote the path+params (e.g. "/this%20path" -> "/this path").
|
om@3
|
608 |
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec5.html#sec5.1.2
|
om@3
|
609 |
#
|
om@3
|
610 |
# But note that "...a URI must be separated into its components
|
om@3
|
611 |
# before the escaped characters within those components can be
|
om@3
|
612 |
# safely decoded." http://www.ietf.org/rfc/rfc2396.txt, sec 2.4.2
|
om@3
|
613 |
# Therefore, "/this%2Fpath" becomes "/this%2Fpath", not "/this/path".
|
om@3
|
614 |
try:
|
om@3
|
615 |
atoms = [unquote(x) for x in quoted_slash.split(path)]
|
om@3
|
616 |
except ValueError, ex:
|
om@3
|
617 |
self.simple_response("400 Bad Request", ex.args[0])
|
om@3
|
618 |
return
|
om@3
|
619 |
path = "%2F".join(atoms)
|
om@3
|
620 |
self.path = path
|
om@3
|
621 |
|
om@3
|
622 |
# Note that, like wsgiref and most other HTTP servers,
|
om@3
|
623 |
# we "% HEX HEX"-unquote the path but not the query string.
|
om@3
|
624 |
self.qs = qs
|
om@3
|
625 |
|
om@3
|
626 |
# Compare request and server HTTP protocol versions, in case our
|
om@3
|
627 |
# server does not support the requested protocol. Limit our output
|
om@3
|
628 |
# to min(req, server). We want the following output:
|
om@3
|
629 |
# request server actual written supported response
|
om@3
|
630 |
# protocol protocol response protocol feature set
|
om@3
|
631 |
# a 1.0 1.0 1.0 1.0
|
om@3
|
632 |
# b 1.0 1.1 1.1 1.0
|
om@3
|
633 |
# c 1.1 1.0 1.0 1.0
|
om@3
|
634 |
# d 1.1 1.1 1.1 1.1
|
om@3
|
635 |
# Notice that, in (b), the response will be "HTTP/1.1" even though
|
om@3
|
636 |
# the client only understands 1.0. RFC 2616 10.5.6 says we should
|
om@3
|
637 |
# only return 505 if the _major_ version is different.
|
om@3
|
638 |
sp = int(self.server.protocol[5]), int(self.server.protocol[7])
|
om@3
|
639 |
|
om@3
|
640 |
if sp[0] != rp[0]:
|
om@3
|
641 |
self.simple_response("505 HTTP Version Not Supported")
|
om@3
|
642 |
return
|
om@3
|
643 |
self.request_protocol = req_protocol
|
om@3
|
644 |
self.response_protocol = "HTTP/%s.%s" % min(rp, sp)
|
om@3
|
645 |
|
om@3
|
646 |
def read_request_headers(self):
|
om@3
|
647 |
"""Read self.rfile into self.inheaders. Return success."""
|
om@3
|
648 |
|
om@3
|
649 |
# then all the http headers
|
om@3
|
650 |
try:
|
om@3
|
651 |
read_headers(self.rfile, self.inheaders)
|
om@3
|
652 |
except ValueError, ex:
|
om@3
|
653 |
self.simple_response("400 Bad Request", ex.args[0])
|
om@3
|
654 |
return False
|
om@3
|
655 |
|
om@3
|
656 |
mrbs = self.server.max_request_body_size
|
om@3
|
657 |
if mrbs and int(self.inheaders.get("Content-Length", 0)) > mrbs:
|
om@3
|
658 |
self.simple_response("413 Request Entity Too Large",
|
om@3
|
659 |
"The entity sent with the request exceeds the maximum "
|
om@3
|
660 |
"allowed bytes.")
|
om@3
|
661 |
return False
|
om@3
|
662 |
|
om@3
|
663 |
# Persistent connection support
|
om@3
|
664 |
if self.response_protocol == "HTTP/1.1":
|
om@3
|
665 |
# Both server and client are HTTP/1.1
|
om@3
|
666 |
if self.inheaders.get("Connection", "") == "close":
|
om@3
|
667 |
self.close_connection = True
|
om@3
|
668 |
else:
|
om@3
|
669 |
# Either the server or client (or both) are HTTP/1.0
|
om@3
|
670 |
if self.inheaders.get("Connection", "") != "Keep-Alive":
|
om@3
|
671 |
self.close_connection = True
|
om@3
|
672 |
|
om@3
|
673 |
# Transfer-Encoding support
|
om@3
|
674 |
te = None
|
om@3
|
675 |
if self.response_protocol == "HTTP/1.1":
|
om@3
|
676 |
te = self.inheaders.get("Transfer-Encoding")
|
om@3
|
677 |
if te:
|
om@3
|
678 |
te = [x.strip().lower() for x in te.split(",") if x.strip()]
|
om@3
|
679 |
|
om@3
|
680 |
self.chunked_read = False
|
om@3
|
681 |
|
om@3
|
682 |
if te:
|
om@3
|
683 |
for enc in te:
|
om@3
|
684 |
if enc == "chunked":
|
om@3
|
685 |
self.chunked_read = True
|
om@3
|
686 |
else:
|
om@3
|
687 |
# Note that, even if we see "chunked", we must reject
|
om@3
|
688 |
# if there is an extension we don't recognize.
|
om@3
|
689 |
self.simple_response("501 Unimplemented")
|
om@3
|
690 |
self.close_connection = True
|
om@3
|
691 |
return False
|
om@3
|
692 |
|
om@3
|
693 |
# From PEP 333:
|
om@3
|
694 |
# "Servers and gateways that implement HTTP 1.1 must provide
|
om@3
|
695 |
# transparent support for HTTP 1.1's "expect/continue" mechanism.
|
om@3
|
696 |
# This may be done in any of several ways:
|
om@3
|
697 |
# 1. Respond to requests containing an Expect: 100-continue request
|
om@3
|
698 |
# with an immediate "100 Continue" response, and proceed normally.
|
om@3
|
699 |
# 2. Proceed with the request normally, but provide the application
|
om@3
|
700 |
# with a wsgi.input stream that will send the "100 Continue"
|
om@3
|
701 |
# response if/when the application first attempts to read from
|
om@3
|
702 |
# the input stream. The read request must then remain blocked
|
om@3
|
703 |
# until the client responds.
|
om@3
|
704 |
# 3. Wait until the client decides that the server does not support
|
om@3
|
705 |
# expect/continue, and sends the request body on its own.
|
om@3
|
706 |
# (This is suboptimal, and is not recommended.)
|
om@3
|
707 |
#
|
om@3
|
708 |
# We used to do 3, but are now doing 1. Maybe we'll do 2 someday,
|
om@3
|
709 |
# but it seems like it would be a big slowdown for such a rare case.
|
om@3
|
710 |
if self.inheaders.get("Expect", "") == "100-continue":
|
om@3
|
711 |
# Don't use simple_response here, because it emits headers
|
om@3
|
712 |
# we don't want. See http://www.cherrypy.org/ticket/951
|
om@3
|
713 |
msg = self.server.protocol + " 100 Continue\r\n\r\n"
|
om@3
|
714 |
try:
|
om@3
|
715 |
self.conn.wfile.sendall(msg)
|
om@3
|
716 |
except socket.error, x:
|
om@3
|
717 |
if x.args[0] not in socket_errors_to_ignore:
|
om@3
|
718 |
raise
|
om@3
|
719 |
return True
|
om@3
|
720 |
|
om@3
|
721 |
def parse_request_uri(self, uri):
|
om@3
|
722 |
"""Parse a Request-URI into (scheme, authority, path).
|
om@3
|
723 |
|
om@3
|
724 |
Note that Request-URI's must be one of::
|
om@3
|
725 |
|
om@3
|
726 |
Request-URI = "*" | absoluteURI | abs_path | authority
|
om@3
|
727 |
|
om@3
|
728 |
Therefore, a Request-URI which starts with a double forward-slash
|
om@3
|
729 |
cannot be a "net_path"::
|
om@3
|
730 |
|
om@3
|
731 |
net_path = "//" authority [ abs_path ]
|
om@3
|
732 |
|
om@3
|
733 |
Instead, it must be interpreted as an "abs_path" with an empty first
|
om@3
|
734 |
path segment::
|
om@3
|
735 |
|
om@3
|
736 |
abs_path = "/" path_segments
|
om@3
|
737 |
path_segments = segment *( "/" segment )
|
om@3
|
738 |
segment = *pchar *( ";" param )
|
om@3
|
739 |
param = *pchar
|
om@3
|
740 |
"""
|
om@3
|
741 |
if uri == "*":
|
om@3
|
742 |
return None, None, uri
|
om@3
|
743 |
|
om@3
|
744 |
i = uri.find('://')
|
om@3
|
745 |
if i > 0 and '?' not in uri[:i]:
|
om@3
|
746 |
# An absoluteURI.
|
om@3
|
747 |
# If there's a scheme (and it must be http or https), then:
|
om@3
|
748 |
# http_URL = "http:" "//" host [ ":" port ] [ abs_path [ "?" query ]]
|
om@3
|
749 |
scheme, remainder = uri[:i].lower(), uri[i + 3:]
|
om@3
|
750 |
authority, path = remainder.split("/", 1)
|
om@3
|
751 |
return scheme, authority, path
|
om@3
|
752 |
|
om@3
|
753 |
if uri.startswith('/'):
|
om@3
|
754 |
# An abs_path.
|
om@3
|
755 |
return None, None, uri
|
om@3
|
756 |
else:
|
om@3
|
757 |
# An authority.
|
om@3
|
758 |
return None, uri, None
|
om@3
|
759 |
|
om@3
|
760 |
def respond(self):
|
om@3
|
761 |
"""Call the gateway and write its iterable output."""
|
om@3
|
762 |
mrbs = self.server.max_request_body_size
|
om@3
|
763 |
if self.chunked_read:
|
om@3
|
764 |
self.rfile = ChunkedRFile(self.conn.rfile, mrbs)
|
om@3
|
765 |
else:
|
om@3
|
766 |
cl = int(self.inheaders.get("Content-Length", 0))
|
om@3
|
767 |
if mrbs and mrbs < cl:
|
om@3
|
768 |
if not self.sent_headers:
|
om@3
|
769 |
self.simple_response("413 Request Entity Too Large",
|
om@3
|
770 |
"The entity sent with the request exceeds the maximum "
|
om@3
|
771 |
"allowed bytes.")
|
om@3
|
772 |
return
|
om@3
|
773 |
self.rfile = KnownLengthRFile(self.conn.rfile, cl)
|
om@3
|
774 |
|
om@3
|
775 |
self.server.gateway(self).respond()
|
om@3
|
776 |
|
om@3
|
777 |
if (self.ready and not self.sent_headers):
|
om@3
|
778 |
self.sent_headers = True
|
om@3
|
779 |
self.send_headers()
|
om@3
|
780 |
if self.chunked_write:
|
om@3
|
781 |
self.conn.wfile.sendall("0\r\n\r\n")
|
om@3
|
782 |
|
om@3
|
783 |
def simple_response(self, status, msg=""):
|
om@3
|
784 |
"""Write a simple response back to the client."""
|
om@3
|
785 |
status = str(status)
|
om@3
|
786 |
buf = [self.server.protocol + " " +
|
om@3
|
787 |
status + CRLF,
|
om@3
|
788 |
"Content-Length: %s\r\n" % len(msg),
|
om@3
|
789 |
"Content-Type: text/plain\r\n"]
|
om@3
|
790 |
|
om@3
|
791 |
if status[:3] in ("413", "414"):
|
om@3
|
792 |
# Request Entity Too Large / Request-URI Too Long
|
om@3
|
793 |
self.close_connection = True
|
om@3
|
794 |
if self.response_protocol == 'HTTP/1.1':
|
om@3
|
795 |
# This will not be true for 414, since read_request_line
|
om@3
|
796 |
# usually raises 414 before reading the whole line, and we
|
om@3
|
797 |
# therefore cannot know the proper response_protocol.
|
om@3
|
798 |
buf.append("Connection: close\r\n")
|
om@3
|
799 |
else:
|
om@3
|
800 |
# HTTP/1.0 had no 413/414 status nor Connection header.
|
om@3
|
801 |
# Emit 400 instead and trust the message body is enough.
|
om@3
|
802 |
status = "400 Bad Request"
|
om@3
|
803 |
|
om@3
|
804 |
buf.append(CRLF)
|
om@3
|
805 |
if msg:
|
om@3
|
806 |
if isinstance(msg, unicode):
|
om@3
|
807 |
msg = msg.encode("ISO-8859-1")
|
om@3
|
808 |
buf.append(msg)
|
om@3
|
809 |
|
om@3
|
810 |
try:
|
om@3
|
811 |
self.conn.wfile.sendall("".join(buf))
|
om@3
|
812 |
except socket.error, x:
|
om@3
|
813 |
if x.args[0] not in socket_errors_to_ignore:
|
om@3
|
814 |
raise
|
om@3
|
815 |
|
om@3
|
816 |
def write(self, chunk):
|
om@3
|
817 |
"""Write unbuffered data to the client."""
|
om@3
|
818 |
if self.chunked_write and chunk:
|
om@3
|
819 |
buf = [hex(len(chunk))[2:], CRLF, chunk, CRLF]
|
om@3
|
820 |
self.conn.wfile.sendall("".join(buf))
|
om@3
|
821 |
else:
|
om@3
|
822 |
self.conn.wfile.sendall(chunk)
|
om@3
|
823 |
|
om@3
|
824 |
def send_headers(self):
|
om@3
|
825 |
"""Assert, process, and send the HTTP response message-headers.
|
om@3
|
826 |
|
om@3
|
827 |
You must set self.status, and self.outheaders before calling this.
|
om@3
|
828 |
"""
|
om@3
|
829 |
hkeys = [key.lower() for key, value in self.outheaders]
|
om@3
|
830 |
status = int(self.status[:3])
|
om@3
|
831 |
|
om@3
|
832 |
if status == 413:
|
om@3
|
833 |
# Request Entity Too Large. Close conn to avoid garbage.
|
om@3
|
834 |
self.close_connection = True
|
om@3
|
835 |
elif "content-length" not in hkeys:
|
om@3
|
836 |
# "All 1xx (informational), 204 (no content),
|
om@3
|
837 |
# and 304 (not modified) responses MUST NOT
|
om@3
|
838 |
# include a message-body." So no point chunking.
|
om@3
|
839 |
if status < 200 or status in (204, 205, 304):
|
om@3
|
840 |
pass
|
om@3
|
841 |
else:
|
om@3
|
842 |
if (self.response_protocol == 'HTTP/1.1'
|
om@3
|
843 |
and self.method != 'HEAD'):
|
om@3
|
844 |
# Use the chunked transfer-coding
|
om@3
|
845 |
self.chunked_write = True
|
om@3
|
846 |
self.outheaders.append(("Transfer-Encoding", "chunked"))
|
om@3
|
847 |
else:
|
om@3
|
848 |
# Closing the conn is the only way to determine len.
|
om@3
|
849 |
self.close_connection = True
|
om@3
|
850 |
|
om@3
|
851 |
if "connection" not in hkeys:
|
om@3
|
852 |
if self.response_protocol == 'HTTP/1.1':
|
om@3
|
853 |
# Both server and client are HTTP/1.1 or better
|
om@3
|
854 |
if self.close_connection:
|
om@3
|
855 |
self.outheaders.append(("Connection", "close"))
|
om@3
|
856 |
else:
|
om@3
|
857 |
# Server and/or client are HTTP/1.0
|
om@3
|
858 |
if not self.close_connection:
|
om@3
|
859 |
self.outheaders.append(("Connection", "Keep-Alive"))
|
om@3
|
860 |
|
om@3
|
861 |
if (not self.close_connection) and (not self.chunked_read):
|
om@3
|
862 |
# Read any remaining request body data on the socket.
|
om@3
|
863 |
# "If an origin server receives a request that does not include an
|
om@3
|
864 |
# Expect request-header field with the "100-continue" expectation,
|
om@3
|
865 |
# the request includes a request body, and the server responds
|
om@3
|
866 |
# with a final status code before reading the entire request body
|
om@3
|
867 |
# from the transport connection, then the server SHOULD NOT close
|
om@3
|
868 |
# the transport connection until it has read the entire request,
|
om@3
|
869 |
# or until the client closes the connection. Otherwise, the client
|
om@3
|
870 |
# might not reliably receive the response message. However, this
|
om@3
|
871 |
# requirement is not be construed as preventing a server from
|
om@3
|
872 |
# defending itself against denial-of-service attacks, or from
|
om@3
|
873 |
# badly broken client implementations."
|
om@3
|
874 |
remaining = getattr(self.rfile, 'remaining', 0)
|
om@3
|
875 |
if remaining > 0:
|
om@3
|
876 |
self.rfile.read(remaining)
|
om@3
|
877 |
|
om@3
|
878 |
if "date" not in hkeys:
|
om@3
|
879 |
self.outheaders.append(("Date", rfc822.formatdate()))
|
om@3
|
880 |
|
om@3
|
881 |
if "server" not in hkeys:
|
om@3
|
882 |
self.outheaders.append(("Server", self.server.server_name))
|
om@3
|
883 |
|
om@3
|
884 |
buf = [self.server.protocol + " " + self.status + CRLF]
|
om@3
|
885 |
for k, v in self.outheaders:
|
om@3
|
886 |
buf.append(k + ": " + v + CRLF)
|
om@3
|
887 |
buf.append(CRLF)
|
om@3
|
888 |
self.conn.wfile.sendall("".join(buf))
|
om@3
|
889 |
|
om@3
|
890 |
|
om@3
|
891 |
class NoSSLError(Exception):
|
om@3
|
892 |
"""Exception raised when a client speaks HTTP to an HTTPS socket."""
|
om@3
|
893 |
pass
|
om@3
|
894 |
|
om@3
|
895 |
|
om@3
|
896 |
class FatalSSLAlert(Exception):
|
om@3
|
897 |
"""Exception raised when the SSL implementation signals a fatal alert."""
|
om@3
|
898 |
pass
|
om@3
|
899 |
|
om@3
|
900 |
|
om@3
|
901 |
class CP_fileobject(socket._fileobject):
|
om@3
|
902 |
"""Faux file object attached to a socket object."""
|
om@3
|
903 |
|
om@3
|
904 |
def __init__(self, *args, **kwargs):
|
om@3
|
905 |
self.bytes_read = 0
|
om@3
|
906 |
self.bytes_written = 0
|
om@3
|
907 |
socket._fileobject.__init__(self, *args, **kwargs)
|
om@3
|
908 |
|
om@3
|
909 |
def sendall(self, data):
|
om@3
|
910 |
"""Sendall for non-blocking sockets."""
|
om@3
|
911 |
while data:
|
om@3
|
912 |
try:
|
om@3
|
913 |
bytes_sent = self.send(data)
|
om@3
|
914 |
data = data[bytes_sent:]
|
om@3
|
915 |
except socket.error, e:
|
om@3
|
916 |
if e.args[0] not in socket_errors_nonblocking:
|
om@3
|
917 |
raise
|
om@3
|
918 |
|
om@3
|
919 |
def send(self, data):
|
om@3
|
920 |
bytes_sent = self._sock.send(data)
|
om@3
|
921 |
self.bytes_written += bytes_sent
|
om@3
|
922 |
return bytes_sent
|
om@3
|
923 |
|
om@3
|
924 |
def flush(self):
|
om@3
|
925 |
if self._wbuf:
|
om@3
|
926 |
buffer = "".join(self._wbuf)
|
om@3
|
927 |
self._wbuf = []
|
om@3
|
928 |
self.sendall(buffer)
|
om@3
|
929 |
|
om@3
|
930 |
def recv(self, size):
|
om@3
|
931 |
while True:
|
om@3
|
932 |
try:
|
om@3
|
933 |
data = self._sock.recv(size)
|
om@3
|
934 |
self.bytes_read += len(data)
|
om@3
|
935 |
return data
|
om@3
|
936 |
except socket.error, e:
|
om@3
|
937 |
if (e.args[0] not in socket_errors_nonblocking
|
om@3
|
938 |
and e.args[0] not in socket_error_eintr):
|
om@3
|
939 |
raise
|
om@3
|
940 |
|
om@3
|
941 |
if not _fileobject_uses_str_type:
|
om@3
|
942 |
def read(self, size=-1):
|
om@3
|
943 |
# Use max, disallow tiny reads in a loop as they are very inefficient.
|
om@3
|
944 |
# We never leave read() with any leftover data from a new recv() call
|
om@3
|
945 |
# in our internal buffer.
|
om@3
|
946 |
rbufsize = max(self._rbufsize, self.default_bufsize)
|
om@3
|
947 |
# Our use of StringIO rather than lists of string objects returned by
|
om@3
|
948 |
# recv() minimizes memory usage and fragmentation that occurs when
|
om@3
|
949 |
# rbufsize is large compared to the typical return value of recv().
|
om@3
|
950 |
buf = self._rbuf
|
om@3
|
951 |
buf.seek(0, 2) # seek end
|
om@3
|
952 |
if size < 0:
|
om@3
|
953 |
# Read until EOF
|
om@3
|
954 |
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
|
om@3
|
955 |
while True:
|
om@3
|
956 |
data = self.recv(rbufsize)
|
om@3
|
957 |
if not data:
|
om@3
|
958 |
break
|
om@3
|
959 |
buf.write(data)
|
om@3
|
960 |
return buf.getvalue()
|
om@3
|
961 |
else:
|
om@3
|
962 |
# Read until size bytes or EOF seen, whichever comes first
|
om@3
|
963 |
buf_len = buf.tell()
|
om@3
|
964 |
if buf_len >= size:
|
om@3
|
965 |
# Already have size bytes in our buffer? Extract and return.
|
om@3
|
966 |
buf.seek(0)
|
om@3
|
967 |
rv = buf.read(size)
|
om@3
|
968 |
self._rbuf = StringIO.StringIO()
|
om@3
|
969 |
self._rbuf.write(buf.read())
|
om@3
|
970 |
return rv
|
om@3
|
971 |
|
om@3
|
972 |
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
|
om@3
|
973 |
while True:
|
om@3
|
974 |
left = size - buf_len
|
om@3
|
975 |
# recv() will malloc the amount of memory given as its
|
om@3
|
976 |
# parameter even though it often returns much less data
|
om@3
|
977 |
# than that. The returned data string is short lived
|
om@3
|
978 |
# as we copy it into a StringIO and free it. This avoids
|
om@3
|
979 |
# fragmentation issues on many platforms.
|
om@3
|
980 |
data = self.recv(left)
|
om@3
|
981 |
if not data:
|
om@3
|
982 |
break
|
om@3
|
983 |
n = len(data)
|
om@3
|
984 |
if n == size and not buf_len:
|
om@3
|
985 |
# Shortcut. Avoid buffer data copies when:
|
om@3
|
986 |
# - We have no data in our buffer.
|
om@3
|
987 |
# AND
|
om@3
|
988 |
# - Our call to recv returned exactly the
|
om@3
|
989 |
# number of bytes we were asked to read.
|
om@3
|
990 |
return data
|
om@3
|
991 |
if n == left:
|
om@3
|
992 |
buf.write(data)
|
om@3
|
993 |
del data # explicit free
|
om@3
|
994 |
break
|
om@3
|
995 |
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
|
om@3
|
996 |
buf.write(data)
|
om@3
|
997 |
buf_len += n
|
om@3
|
998 |
del data # explicit free
|
om@3
|
999 |
#assert buf_len == buf.tell()
|
om@3
|
1000 |
return buf.getvalue()
|
om@3
|
1001 |
|
om@3
|
1002 |
def readline(self, size=-1):
|
om@3
|
1003 |
buf = self._rbuf
|
om@3
|
1004 |
buf.seek(0, 2) # seek end
|
om@3
|
1005 |
if buf.tell() > 0:
|
om@3
|
1006 |
# check if we already have it in our buffer
|
om@3
|
1007 |
buf.seek(0)
|
om@3
|
1008 |
bline = buf.readline(size)
|
om@3
|
1009 |
if bline.endswith('\n') or len(bline) == size:
|
om@3
|
1010 |
self._rbuf = StringIO.StringIO()
|
om@3
|
1011 |
self._rbuf.write(buf.read())
|
om@3
|
1012 |
return bline
|
om@3
|
1013 |
del bline
|
om@3
|
1014 |
if size < 0:
|
om@3
|
1015 |
# Read until \n or EOF, whichever comes first
|
om@3
|
1016 |
if self._rbufsize <= 1:
|
om@3
|
1017 |
# Speed up unbuffered case
|
om@3
|
1018 |
buf.seek(0)
|
om@3
|
1019 |
buffers = [buf.read()]
|
om@3
|
1020 |
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
|
om@3
|
1021 |
data = None
|
om@3
|
1022 |
recv = self.recv
|
om@3
|
1023 |
while data != "\n":
|
om@3
|
1024 |
data = recv(1)
|
om@3
|
1025 |
if not data:
|
om@3
|
1026 |
break
|
om@3
|
1027 |
buffers.append(data)
|
om@3
|
1028 |
return "".join(buffers)
|
om@3
|
1029 |
|
om@3
|
1030 |
buf.seek(0, 2) # seek end
|
om@3
|
1031 |
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
|
om@3
|
1032 |
while True:
|
om@3
|
1033 |
data = self.recv(self._rbufsize)
|
om@3
|
1034 |
if not data:
|
om@3
|
1035 |
break
|
om@3
|
1036 |
nl = data.find('\n')
|
om@3
|
1037 |
if nl >= 0:
|
om@3
|
1038 |
nl += 1
|
om@3
|
1039 |
buf.write(data[:nl])
|
om@3
|
1040 |
self._rbuf.write(data[nl:])
|
om@3
|
1041 |
del data
|
om@3
|
1042 |
break
|
om@3
|
1043 |
buf.write(data)
|
om@3
|
1044 |
return buf.getvalue()
|
om@3
|
1045 |
else:
|
om@3
|
1046 |
# Read until size bytes or \n or EOF seen, whichever comes first
|
om@3
|
1047 |
buf.seek(0, 2) # seek end
|
om@3
|
1048 |
buf_len = buf.tell()
|
om@3
|
1049 |
if buf_len >= size:
|
om@3
|
1050 |
buf.seek(0)
|
om@3
|
1051 |
rv = buf.read(size)
|
om@3
|
1052 |
self._rbuf = StringIO.StringIO()
|
om@3
|
1053 |
self._rbuf.write(buf.read())
|
om@3
|
1054 |
return rv
|
om@3
|
1055 |
self._rbuf = StringIO.StringIO() # reset _rbuf. we consume it via buf.
|
om@3
|
1056 |
while True:
|
om@3
|
1057 |
data = self.recv(self._rbufsize)
|
om@3
|
1058 |
if not data:
|
om@3
|
1059 |
break
|
om@3
|
1060 |
left = size - buf_len
|
om@3
|
1061 |
# did we just receive a newline?
|
om@3
|
1062 |
nl = data.find('\n', 0, left)
|
om@3
|
1063 |
if nl >= 0:
|
om@3
|
1064 |
nl += 1
|
om@3
|
1065 |
# save the excess data to _rbuf
|
om@3
|
1066 |
self._rbuf.write(data[nl:])
|
om@3
|
1067 |
if buf_len:
|
om@3
|
1068 |
buf.write(data[:nl])
|
om@3
|
1069 |
break
|
om@3
|
1070 |
else:
|
om@3
|
1071 |
# Shortcut. Avoid data copy through buf when returning
|
om@3
|
1072 |
# a substring of our first recv().
|
om@3
|
1073 |
return data[:nl]
|
om@3
|
1074 |
n = len(data)
|
om@3
|
1075 |
if n == size and not buf_len:
|
om@3
|
1076 |
# Shortcut. Avoid data copy through buf when
|
om@3
|
1077 |
# returning exactly all of our first recv().
|
om@3
|
1078 |
return data
|
om@3
|
1079 |
if n >= left:
|
om@3
|
1080 |
buf.write(data[:left])
|
om@3
|
1081 |
self._rbuf.write(data[left:])
|
om@3
|
1082 |
break
|
om@3
|
1083 |
buf.write(data)
|
om@3
|
1084 |
buf_len += n
|
om@3
|
1085 |
#assert buf_len == buf.tell()
|
om@3
|
1086 |
return buf.getvalue()
|
om@3
|
1087 |
else:
|
om@3
|
1088 |
def read(self, size=-1):
|
om@3
|
1089 |
if size < 0:
|
om@3
|
1090 |
# Read until EOF
|
om@3
|
1091 |
buffers = [self._rbuf]
|
om@3
|
1092 |
self._rbuf = ""
|
om@3
|
1093 |
if self._rbufsize <= 1:
|
om@3
|
1094 |
recv_size = self.default_bufsize
|
om@3
|
1095 |
else:
|
om@3
|
1096 |
recv_size = self._rbufsize
|
om@3
|
1097 |
|
om@3
|
1098 |
while True:
|
om@3
|
1099 |
data = self.recv(recv_size)
|
om@3
|
1100 |
if not data:
|
om@3
|
1101 |
break
|
om@3
|
1102 |
buffers.append(data)
|
om@3
|
1103 |
return "".join(buffers)
|
om@3
|
1104 |
else:
|
om@3
|
1105 |
# Read until size bytes or EOF seen, whichever comes first
|
om@3
|
1106 |
data = self._rbuf
|
om@3
|
1107 |
buf_len = len(data)
|
om@3
|
1108 |
if buf_len >= size:
|
om@3
|
1109 |
self._rbuf = data[size:]
|
om@3
|
1110 |
return data[:size]
|
om@3
|
1111 |
buffers = []
|
om@3
|
1112 |
if data:
|
om@3
|
1113 |
buffers.append(data)
|
om@3
|
1114 |
self._rbuf = ""
|
om@3
|
1115 |
while True:
|
om@3
|
1116 |
left = size - buf_len
|
om@3
|
1117 |
recv_size = max(self._rbufsize, left)
|
om@3
|
1118 |
data = self.recv(recv_size)
|
om@3
|
1119 |
if not data:
|
om@3
|
1120 |
break
|
om@3
|
1121 |
buffers.append(data)
|
om@3
|
1122 |
n = len(data)
|
om@3
|
1123 |
if n >= left:
|
om@3
|
1124 |
self._rbuf = data[left:]
|
om@3
|
1125 |
buffers[-1] = data[:left]
|
om@3
|
1126 |
break
|
om@3
|
1127 |
buf_len += n
|
om@3
|
1128 |
return "".join(buffers)
|
om@3
|
1129 |
|
om@3
|
1130 |
def readline(self, size=-1):
|
om@3
|
1131 |
data = self._rbuf
|
om@3
|
1132 |
if size < 0:
|
om@3
|
1133 |
# Read until \n or EOF, whichever comes first
|
om@3
|
1134 |
if self._rbufsize <= 1:
|
om@3
|
1135 |
# Speed up unbuffered case
|
om@3
|
1136 |
assert data == ""
|
om@3
|
1137 |
buffers = []
|
om@3
|
1138 |
while data != "\n":
|
om@3
|
1139 |
data = self.recv(1)
|
om@3
|
1140 |
if not data:
|
om@3
|
1141 |
break
|
om@3
|
1142 |
buffers.append(data)
|
om@3
|
1143 |
return "".join(buffers)
|
om@3
|
1144 |
nl = data.find('\n')
|
om@3
|
1145 |
if nl >= 0:
|
om@3
|
1146 |
nl += 1
|
om@3
|
1147 |
self._rbuf = data[nl:]
|
om@3
|
1148 |
return data[:nl]
|
om@3
|
1149 |
buffers = []
|
om@3
|
1150 |
if data:
|
om@3
|
1151 |
buffers.append(data)
|
om@3
|
1152 |
self._rbuf = ""
|
om@3
|
1153 |
while True:
|
om@3
|
1154 |
data = self.recv(self._rbufsize)
|
om@3
|
1155 |
if not data:
|
om@3
|
1156 |
break
|
om@3
|
1157 |
buffers.append(data)
|
om@3
|
1158 |
nl = data.find('\n')
|
om@3
|
1159 |
if nl >= 0:
|
om@3
|
1160 |
nl += 1
|
om@3
|
1161 |
self._rbuf = data[nl:]
|
om@3
|
1162 |
buffers[-1] = data[:nl]
|
om@3
|
1163 |
break
|
om@3
|
1164 |
return "".join(buffers)
|
om@3
|
1165 |
else:
|
om@3
|
1166 |
# Read until size bytes or \n or EOF seen, whichever comes first
|
om@3
|
1167 |
nl = data.find('\n', 0, size)
|
om@3
|
1168 |
if nl >= 0:
|
om@3
|
1169 |
nl += 1
|
om@3
|
1170 |
self._rbuf = data[nl:]
|
om@3
|
1171 |
return data[:nl]
|
om@3
|
1172 |
buf_len = len(data)
|
om@3
|
1173 |
if buf_len >= size:
|
om@3
|
1174 |
self._rbuf = data[size:]
|
om@3
|
1175 |
return data[:size]
|
om@3
|
1176 |
buffers = []
|
om@3
|
1177 |
if data:
|
om@3
|
1178 |
buffers.append(data)
|
om@3
|
1179 |
self._rbuf = ""
|
om@3
|
1180 |
while True:
|
om@3
|
1181 |
data = self.recv(self._rbufsize)
|
om@3
|
1182 |
if not data:
|
om@3
|
1183 |
break
|
om@3
|
1184 |
buffers.append(data)
|
om@3
|
1185 |
left = size - buf_len
|
om@3
|
1186 |
nl = data.find('\n', 0, left)
|
om@3
|
1187 |
if nl >= 0:
|
om@3
|
1188 |
nl += 1
|
om@3
|
1189 |
self._rbuf = data[nl:]
|
om@3
|
1190 |
buffers[-1] = data[:nl]
|
om@3
|
1191 |
break
|
om@3
|
1192 |
n = len(data)
|
om@3
|
1193 |
if n >= left:
|
om@3
|
1194 |
self._rbuf = data[left:]
|
om@3
|
1195 |
buffers[-1] = data[:left]
|
om@3
|
1196 |
break
|
om@3
|
1197 |
buf_len += n
|
om@3
|
1198 |
return "".join(buffers)
|
om@3
|
1199 |
|
om@3
|
1200 |
|
om@3
|
1201 |
class HTTPConnection(object):
|
om@3
|
1202 |
"""An HTTP connection (active socket).
|
om@3
|
1203 |
|
om@3
|
1204 |
server: the Server object which received this connection.
|
om@3
|
1205 |
socket: the raw socket object (usually TCP) for this connection.
|
om@3
|
1206 |
makefile: a fileobject class for reading from the socket.
|
om@3
|
1207 |
"""
|
om@3
|
1208 |
|
om@3
|
1209 |
remote_addr = None
|
om@3
|
1210 |
remote_port = None
|
om@3
|
1211 |
ssl_env = None
|
om@3
|
1212 |
rbufsize = DEFAULT_BUFFER_SIZE
|
om@3
|
1213 |
wbufsize = DEFAULT_BUFFER_SIZE
|
om@3
|
1214 |
RequestHandlerClass = HTTPRequest
|
om@3
|
1215 |
|
om@3
|
1216 |
def __init__(self, server, sock, makefile=CP_fileobject):
|
om@3
|
1217 |
self.server = server
|
om@3
|
1218 |
self.socket = sock
|
om@3
|
1219 |
self.rfile = makefile(sock, "rb", self.rbufsize)
|
om@3
|
1220 |
self.wfile = makefile(sock, "wb", self.wbufsize)
|
om@3
|
1221 |
self.requests_seen = 0
|
om@3
|
1222 |
|
om@3
|
1223 |
def communicate(self):
|
om@3
|
1224 |
"""Read each request and respond appropriately."""
|
om@3
|
1225 |
request_seen = False
|
om@3
|
1226 |
try:
|
om@3
|
1227 |
while True:
|
om@3
|
1228 |
# (re)set req to None so that if something goes wrong in
|
om@3
|
1229 |
# the RequestHandlerClass constructor, the error doesn't
|
om@3
|
1230 |
# get written to the previous request.
|
om@3
|
1231 |
req = None
|
om@3
|
1232 |
req = self.RequestHandlerClass(self.server, self)
|
om@3
|
1233 |
|
om@3
|
1234 |
# This order of operations should guarantee correct pipelining.
|
om@3
|
1235 |
req.parse_request()
|
om@3
|
1236 |
if self.server.stats['Enabled']:
|
om@3
|
1237 |
self.requests_seen += 1
|
om@3
|
1238 |
if not req.ready:
|
om@3
|
1239 |
# Something went wrong in the parsing (and the server has
|
om@3
|
1240 |
# probably already made a simple_response). Return and
|
om@3
|
1241 |
# let the conn close.
|
om@3
|
1242 |
return
|
om@3
|
1243 |
|
om@3
|
1244 |
request_seen = True
|
om@3
|
1245 |
req.respond()
|
om@3
|
1246 |
if req.close_connection:
|
om@3
|
1247 |
return
|
om@3
|
1248 |
except socket.error, e:
|
om@3
|
1249 |
errnum = e.args[0]
|
om@3
|
1250 |
# sadly SSL sockets return a different (longer) time out string
|
om@3
|
1251 |
if errnum == 'timed out' or errnum == 'The read operation timed out':
|
om@3
|
1252 |
# Don't error if we're between requests; only error
|
om@3
|
1253 |
# if 1) no request has been started at all, or 2) we're
|
om@3
|
1254 |
# in the middle of a request.
|
om@3
|
1255 |
# See http://www.cherrypy.org/ticket/853
|
om@3
|
1256 |
if (not request_seen) or (req and req.started_request):
|
om@3
|
1257 |
# Don't bother writing the 408 if the response
|
om@3
|
1258 |
# has already started being written.
|
om@3
|
1259 |
if req and not req.sent_headers:
|
om@3
|
1260 |
try:
|
om@3
|
1261 |
req.simple_response("408 Request Timeout")
|
om@3
|
1262 |
except FatalSSLAlert:
|
om@3
|
1263 |
# Close the connection.
|
om@3
|
1264 |
return
|
om@3
|
1265 |
elif errnum not in socket_errors_to_ignore:
|
om@3
|
1266 |
if req and not req.sent_headers:
|
om@3
|
1267 |
try:
|
om@3
|
1268 |
req.simple_response("500 Internal Server Error",
|
om@3
|
1269 |
format_exc())
|
om@3
|
1270 |
except FatalSSLAlert:
|
om@3
|
1271 |
# Close the connection.
|
om@3
|
1272 |
return
|
om@3
|
1273 |
return
|
om@3
|
1274 |
except (KeyboardInterrupt, SystemExit):
|
om@3
|
1275 |
raise
|
om@3
|
1276 |
except FatalSSLAlert:
|
om@3
|
1277 |
# Close the connection.
|
om@3
|
1278 |
return
|
om@3
|
1279 |
except NoSSLError:
|
om@3
|
1280 |
if req and not req.sent_headers:
|
om@3
|
1281 |
# Unwrap our wfile
|
om@3
|
1282 |
self.wfile = CP_fileobject(self.socket._sock, "wb", self.wbufsize)
|
om@3
|
1283 |
req.simple_response("400 Bad Request",
|
om@3
|
1284 |
"The client sent a plain HTTP request, but "
|
om@3
|
1285 |
"this server only speaks HTTPS on this port.")
|
om@3
|
1286 |
self.linger = True
|
om@3
|
1287 |
except Exception:
|
om@3
|
1288 |
if req and not req.sent_headers:
|
om@3
|
1289 |
try:
|
om@3
|
1290 |
req.simple_response("500 Internal Server Error", format_exc())
|
om@3
|
1291 |
except FatalSSLAlert:
|
om@3
|
1292 |
# Close the connection.
|
om@3
|
1293 |
return
|
om@3
|
1294 |
|
om@3
|
1295 |
linger = False
|
om@3
|
1296 |
|
om@3
|
1297 |
def close(self):
|
om@3
|
1298 |
"""Close the socket underlying this connection."""
|
om@3
|
1299 |
self.rfile.close()
|
om@3
|
1300 |
|
om@3
|
1301 |
if not self.linger:
|
om@3
|
1302 |
# Python's socket module does NOT call close on the kernel socket
|
om@3
|
1303 |
# when you call socket.close(). We do so manually here because we
|
om@3
|
1304 |
# want this server to send a FIN TCP segment immediately. Note this
|
om@3
|
1305 |
# must be called *before* calling socket.close(), because the latter
|
om@3
|
1306 |
# drops its reference to the kernel socket.
|
om@3
|
1307 |
if hasattr(self.socket, '_sock'):
|
om@3
|
1308 |
self.socket._sock.close()
|
om@3
|
1309 |
self.socket.close()
|
om@3
|
1310 |
else:
|
om@3
|
1311 |
# On the other hand, sometimes we want to hang around for a bit
|
om@3
|
1312 |
# to make sure the client has a chance to read our entire
|
om@3
|
1313 |
# response. Skipping the close() calls here delays the FIN
|
om@3
|
1314 |
# packet until the socket object is garbage-collected later.
|
om@3
|
1315 |
# Someday, perhaps, we'll do the full lingering_close that
|
om@3
|
1316 |
# Apache does, but not today.
|
om@3
|
1317 |
pass
|
om@3
|
1318 |
|
om@3
|
1319 |
|
om@3
|
1320 |
_SHUTDOWNREQUEST = None
|
om@3
|
1321 |
|
om@3
|
1322 |
class WorkerThread(threading.Thread):
|
om@3
|
1323 |
"""Thread which continuously polls a Queue for Connection objects.
|
om@3
|
1324 |
|
om@3
|
1325 |
Due to the timing issues of polling a Queue, a WorkerThread does not
|
om@3
|
1326 |
check its own 'ready' flag after it has started. To stop the thread,
|
om@3
|
1327 |
it is necessary to stick a _SHUTDOWNREQUEST object onto the Queue
|
om@3
|
1328 |
(one for each running WorkerThread).
|
om@3
|
1329 |
"""
|
om@3
|
1330 |
|
om@3
|
1331 |
conn = None
|
om@3
|
1332 |
"""The current connection pulled off the Queue, or None."""
|
om@3
|
1333 |
|
om@3
|
1334 |
server = None
|
om@3
|
1335 |
"""The HTTP Server which spawned this thread, and which owns the
|
om@3
|
1336 |
Queue and is placing active connections into it."""
|
om@3
|
1337 |
|
om@3
|
1338 |
ready = False
|
om@3
|
1339 |
"""A simple flag for the calling server to know when this thread
|
om@3
|
1340 |
has begun polling the Queue."""
|
om@3
|
1341 |
|
om@3
|
1342 |
|
om@3
|
1343 |
def __init__(self, server):
|
om@3
|
1344 |
self.ready = False
|
om@3
|
1345 |
self.server = server
|
om@3
|
1346 |
|
om@3
|
1347 |
self.requests_seen = 0
|
om@3
|
1348 |
self.bytes_read = 0
|
om@3
|
1349 |
self.bytes_written = 0
|
om@3
|
1350 |
self.start_time = None
|
om@3
|
1351 |
self.work_time = 0
|
om@3
|
1352 |
self.stats = {
|
om@3
|
1353 |
'Requests': lambda s: self.requests_seen + ((self.start_time is None) and 0 or self.conn.requests_seen),
|
om@3
|
1354 |
'Bytes Read': lambda s: self.bytes_read + ((self.start_time is None) and 0 or self.conn.rfile.bytes_read),
|
om@3
|
1355 |
'Bytes Written': lambda s: self.bytes_written + ((self.start_time is None) and 0 or self.conn.wfile.bytes_written),
|
om@3
|
1356 |
'Work Time': lambda s: self.work_time + ((self.start_time is None) and 0 or time.time() - self.start_time),
|
om@3
|
1357 |
'Read Throughput': lambda s: s['Bytes Read'](s) / (s['Work Time'](s) or 1e-6),
|
om@3
|
1358 |
'Write Throughput': lambda s: s['Bytes Written'](s) / (s['Work Time'](s) or 1e-6),
|
om@3
|
1359 |
}
|
om@3
|
1360 |
threading.Thread.__init__(self)
|
om@3
|
1361 |
|
om@3
|
1362 |
def run(self):
|
om@3
|
1363 |
self.server.stats['Worker Threads'][self.getName()] = self.stats
|
om@3
|
1364 |
try:
|
om@3
|
1365 |
self.ready = True
|
om@3
|
1366 |
while True:
|
om@3
|
1367 |
conn = self.server.requests.get()
|
om@3
|
1368 |
if conn is _SHUTDOWNREQUEST:
|
om@3
|
1369 |
return
|
om@3
|
1370 |
|
om@3
|
1371 |
self.conn = conn
|
om@3
|
1372 |
if self.server.stats['Enabled']:
|
om@3
|
1373 |
self.start_time = time.time()
|
om@3
|
1374 |
try:
|
om@3
|
1375 |
conn.communicate()
|
om@3
|
1376 |
finally:
|
om@3
|
1377 |
conn.close()
|
om@3
|
1378 |
if self.server.stats['Enabled']:
|
om@3
|
1379 |
self.requests_seen += self.conn.requests_seen
|
om@3
|
1380 |
self.bytes_read += self.conn.rfile.bytes_read
|
om@3
|
1381 |
self.bytes_written += self.conn.wfile.bytes_written
|
om@3
|
1382 |
self.work_time += time.time() - self.start_time
|
om@3
|
1383 |
self.start_time = None
|
om@3
|
1384 |
self.conn = None
|
om@3
|
1385 |
except (KeyboardInterrupt, SystemExit), exc:
|
om@3
|
1386 |
self.server.interrupt = exc
|
om@3
|
1387 |
|
om@3
|
1388 |
|
om@3
|
1389 |
class ThreadPool(object):
|
om@3
|
1390 |
"""A Request Queue for the CherryPyWSGIServer which pools threads.
|
om@3
|
1391 |
|
om@3
|
1392 |
ThreadPool objects must provide min, get(), put(obj), start()
|
om@3
|
1393 |
and stop(timeout) attributes.
|
om@3
|
1394 |
"""
|
om@3
|
1395 |
|
om@3
|
1396 |
def __init__(self, server, min=10, max=-1):
|
om@3
|
1397 |
self.server = server
|
om@3
|
1398 |
self.min = min
|
om@3
|
1399 |
self.max = max
|
om@3
|
1400 |
self._threads = []
|
om@3
|
1401 |
self._queue = Queue.Queue()
|
om@3
|
1402 |
self.get = self._queue.get
|
om@3
|
1403 |
|
om@3
|
1404 |
def start(self):
|
om@3
|
1405 |
"""Start the pool of threads."""
|
om@3
|
1406 |
for i in range(self.min):
|
om@3
|
1407 |
self._threads.append(WorkerThread(self.server))
|
om@3
|
1408 |
for worker in self._threads:
|
om@3
|
1409 |
worker.setName("CP Server " + worker.getName())
|
om@3
|
1410 |
worker.start()
|
om@3
|
1411 |
for worker in self._threads:
|
om@3
|
1412 |
while not worker.ready:
|
om@3
|
1413 |
time.sleep(.1)
|
om@3
|
1414 |
|
om@3
|
1415 |
def _get_idle(self):
|
om@3
|
1416 |
"""Number of worker threads which are idle. Read-only."""
|
om@3
|
1417 |
return len([t for t in self._threads if t.conn is None])
|
om@3
|
1418 |
idle = property(_get_idle, doc=_get_idle.__doc__)
|
om@3
|
1419 |
|
om@3
|
1420 |
def put(self, obj):
|
om@3
|
1421 |
self._queue.put(obj)
|
om@3
|
1422 |
if obj is _SHUTDOWNREQUEST:
|
om@3
|
1423 |
return
|
om@3
|
1424 |
|
om@3
|
1425 |
def grow(self, amount):
|
om@3
|
1426 |
"""Spawn new worker threads (not above self.max)."""
|
om@3
|
1427 |
for i in range(amount):
|
om@3
|
1428 |
if self.max > 0 and len(self._threads) >= self.max:
|
om@3
|
1429 |
break
|
om@3
|
1430 |
worker = WorkerThread(self.server)
|
om@3
|
1431 |
worker.setName("CP Server " + worker.getName())
|
om@3
|
1432 |
self._threads.append(worker)
|
om@3
|
1433 |
worker.start()
|
om@3
|
1434 |
|
om@3
|
1435 |
def shrink(self, amount):
|
om@3
|
1436 |
"""Kill off worker threads (not below self.min)."""
|
om@3
|
1437 |
# Grow/shrink the pool if necessary.
|
om@3
|
1438 |
# Remove any dead threads from our list
|
om@3
|
1439 |
for t in self._threads:
|
om@3
|
1440 |
if not t.isAlive():
|
om@3
|
1441 |
self._threads.remove(t)
|
om@3
|
1442 |
amount -= 1
|
om@3
|
1443 |
|
om@3
|
1444 |
if amount > 0:
|
om@3
|
1445 |
for i in range(min(amount, len(self._threads) - self.min)):
|
om@3
|
1446 |
# Put a number of shutdown requests on the queue equal
|
om@3
|
1447 |
# to 'amount'. Once each of those is processed by a worker,
|
om@3
|
1448 |
# that worker will terminate and be culled from our list
|
om@3
|
1449 |
# in self.put.
|
om@3
|
1450 |
self._queue.put(_SHUTDOWNREQUEST)
|
om@3
|
1451 |
|
om@3
|
1452 |
def stop(self, timeout=5):
|
om@3
|
1453 |
# Must shut down threads here so the code that calls
|
om@3
|
1454 |
# this method can know when all threads are stopped.
|
om@3
|
1455 |
for worker in self._threads:
|
om@3
|
1456 |
self._queue.put(_SHUTDOWNREQUEST)
|
om@3
|
1457 |
|
om@3
|
1458 |
# Don't join currentThread (when stop is called inside a request).
|
om@3
|
1459 |
current = threading.currentThread()
|
om@3
|
1460 |
if timeout and timeout >= 0:
|
om@3
|
1461 |
endtime = time.time() + timeout
|
om@3
|
1462 |
while self._threads:
|
om@3
|
1463 |
worker = self._threads.pop()
|
om@3
|
1464 |
if worker is not current and worker.isAlive():
|
om@3
|
1465 |
try:
|
om@3
|
1466 |
if timeout is None or timeout < 0:
|
om@3
|
1467 |
worker.join()
|
om@3
|
1468 |
else:
|
om@3
|
1469 |
remaining_time = endtime - time.time()
|
om@3
|
1470 |
if remaining_time > 0:
|
om@3
|
1471 |
worker.join(remaining_time)
|
om@3
|
1472 |
if worker.isAlive():
|
om@3
|
1473 |
# We exhausted the timeout.
|
om@3
|
1474 |
# Forcibly shut down the socket.
|
om@3
|
1475 |
c = worker.conn
|
om@3
|
1476 |
if c and not c.rfile.closed:
|
om@3
|
1477 |
try:
|
om@3
|
1478 |
c.socket.shutdown(socket.SHUT_RD)
|
om@3
|
1479 |
except TypeError:
|
om@3
|
1480 |
# pyOpenSSL sockets don't take an arg
|
om@3
|
1481 |
c.socket.shutdown()
|
om@3
|
1482 |
worker.join()
|
om@3
|
1483 |
except (AssertionError,
|
om@3
|
1484 |
# Ignore repeated Ctrl-C.
|
om@3
|
1485 |
# See http://www.cherrypy.org/ticket/691.
|
om@3
|
1486 |
KeyboardInterrupt), exc1:
|
om@3
|
1487 |
pass
|
om@3
|
1488 |
|
om@3
|
1489 |
def _get_qsize(self):
|
om@3
|
1490 |
return self._queue.qsize()
|
om@3
|
1491 |
qsize = property(_get_qsize)
|
om@3
|
1492 |
|
om@3
|
1493 |
|
om@3
|
1494 |
|
om@3
|
1495 |
try:
|
om@3
|
1496 |
import fcntl
|
om@3
|
1497 |
except ImportError:
|
om@3
|
1498 |
try:
|
om@3
|
1499 |
from ctypes import windll, WinError
|
om@3
|
1500 |
except ImportError:
|
om@3
|
1501 |
def prevent_socket_inheritance(sock):
|
om@3
|
1502 |
"""Dummy function, since neither fcntl nor ctypes are available."""
|
om@3
|
1503 |
pass
|
om@3
|
1504 |
else:
|
om@3
|
1505 |
def prevent_socket_inheritance(sock):
|
om@3
|
1506 |
"""Mark the given socket fd as non-inheritable (Windows)."""
|
om@3
|
1507 |
if not windll.kernel32.SetHandleInformation(sock.fileno(), 1, 0):
|
om@3
|
1508 |
raise WinError()
|
om@3
|
1509 |
else:
|
om@3
|
1510 |
def prevent_socket_inheritance(sock):
|
om@3
|
1511 |
"""Mark the given socket fd as non-inheritable (POSIX)."""
|
om@3
|
1512 |
fd = sock.fileno()
|
om@3
|
1513 |
old_flags = fcntl.fcntl(fd, fcntl.F_GETFD)
|
om@3
|
1514 |
fcntl.fcntl(fd, fcntl.F_SETFD, old_flags | fcntl.FD_CLOEXEC)
|
om@3
|
1515 |
|
om@3
|
1516 |
|
om@3
|
1517 |
class SSLAdapter(object):
|
om@3
|
1518 |
"""Base class for SSL driver library adapters.
|
om@3
|
1519 |
|
om@3
|
1520 |
Required methods:
|
om@3
|
1521 |
|
om@3
|
1522 |
* ``wrap(sock) -> (wrapped socket, ssl environ dict)``
|
om@3
|
1523 |
* ``makefile(sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE) -> socket file object``
|
om@3
|
1524 |
"""
|
om@3
|
1525 |
|
om@3
|
1526 |
def __init__(self, certificate, private_key, certificate_chain=None):
|
om@3
|
1527 |
self.certificate = certificate
|
om@3
|
1528 |
self.private_key = private_key
|
om@3
|
1529 |
self.certificate_chain = certificate_chain
|
om@3
|
1530 |
|
om@3
|
1531 |
def wrap(self, sock):
|
om@3
|
1532 |
raise NotImplemented
|
om@3
|
1533 |
|
om@3
|
1534 |
def makefile(self, sock, mode='r', bufsize=DEFAULT_BUFFER_SIZE):
|
om@3
|
1535 |
raise NotImplemented
|
om@3
|
1536 |
|
om@3
|
1537 |
|
om@3
|
1538 |
class HTTPServer(object):
|
om@3
|
1539 |
"""An HTTP server."""
|
om@3
|
1540 |
|
om@3
|
1541 |
_bind_addr = "127.0.0.1"
|
om@3
|
1542 |
_interrupt = None
|
om@3
|
1543 |
|
om@3
|
1544 |
gateway = None
|
om@3
|
1545 |
"""A Gateway instance."""
|
om@3
|
1546 |
|
om@3
|
1547 |
minthreads = None
|
om@3
|
1548 |
"""The minimum number of worker threads to create (default 10)."""
|
om@3
|
1549 |
|
om@3
|
1550 |
maxthreads = None
|
om@3
|
1551 |
"""The maximum number of worker threads to create (default -1 = no limit)."""
|
om@3
|
1552 |
|
om@3
|
1553 |
server_name = None
|
om@3
|
1554 |
"""The name of the server; defaults to socket.gethostname()."""
|
om@3
|
1555 |
|
om@3
|
1556 |
protocol = "HTTP/1.1"
|
om@3
|
1557 |
"""The version string to write in the Status-Line of all HTTP responses.
|
om@3
|
1558 |
|
om@3
|
1559 |
For example, "HTTP/1.1" is the default. This also limits the supported
|
om@3
|
1560 |
features used in the response."""
|
om@3
|
1561 |
|
om@3
|
1562 |
request_queue_size = 5
|
om@3
|
1563 |
"""The 'backlog' arg to socket.listen(); max queued connections (default 5)."""
|
om@3
|
1564 |
|
om@3
|
1565 |
shutdown_timeout = 5
|
om@3
|
1566 |
"""The total time, in seconds, to wait for worker threads to cleanly exit."""
|
om@3
|
1567 |
|
om@3
|
1568 |
timeout = 10
|
om@3
|
1569 |
"""The timeout in seconds for accepted connections (default 10)."""
|
om@3
|
1570 |
|
om@3
|
1571 |
version = "CherryPy/3.2.0"
|
om@3
|
1572 |
"""A version string for the HTTPServer."""
|
om@3
|
1573 |
|
om@3
|
1574 |
software = None
|
om@3
|
1575 |
"""The value to set for the SERVER_SOFTWARE entry in the WSGI environ.
|
om@3
|
1576 |
|
om@3
|
1577 |
If None, this defaults to ``'%s Server' % self.version``."""
|
om@3
|
1578 |
|
om@3
|
1579 |
ready = False
|
om@3
|
1580 |
"""An internal flag which marks whether the socket is accepting connections."""
|
om@3
|
1581 |
|
om@3
|
1582 |
max_request_header_size = 0
|
om@3
|
1583 |
"""The maximum size, in bytes, for request headers, or 0 for no limit."""
|
om@3
|
1584 |
|
om@3
|
1585 |
max_request_body_size = 0
|
om@3
|
1586 |
"""The maximum size, in bytes, for request bodies, or 0 for no limit."""
|
om@3
|
1587 |
|
om@3
|
1588 |
nodelay = True
|
om@3
|
1589 |
"""If True (the default since 3.1), sets the TCP_NODELAY socket option."""
|
om@3
|
1590 |
|
om@3
|
1591 |
ConnectionClass = HTTPConnection
|
om@3
|
1592 |
"""The class to use for handling HTTP connections."""
|
om@3
|
1593 |
|
om@3
|
1594 |
ssl_adapter = None
|
om@3
|
1595 |
"""An instance of SSLAdapter (or a subclass).
|
om@3
|
1596 |
|
om@3
|
1597 |
You must have the corresponding SSL driver library installed."""
|
om@3
|
1598 |
|
om@3
|
1599 |
def __init__(self, bind_addr, gateway, minthreads=10, maxthreads=-1,
|
om@3
|
1600 |
server_name=None):
|
om@3
|
1601 |
self.bind_addr = bind_addr
|
om@3
|
1602 |
self.gateway = gateway
|
om@3
|
1603 |
|
om@3
|
1604 |
self.requests = ThreadPool(self, min=minthreads or 1, max=maxthreads)
|
om@3
|
1605 |
|
om@3
|
1606 |
if not server_name:
|
om@3
|
1607 |
server_name = socket.gethostname()
|
om@3
|
1608 |
self.server_name = server_name
|
om@3
|
1609 |
self.clear_stats()
|
om@3
|
1610 |
|
om@3
|
1611 |
def clear_stats(self):
|
om@3
|
1612 |
self._start_time = None
|
om@3
|
1613 |
self._run_time = 0
|
om@3
|
1614 |
self.stats = {
|
om@3
|
1615 |
'Enabled': False,
|
om@3
|
1616 |
'Bind Address': lambda s: repr(self.bind_addr),
|
om@3
|
1617 |
'Run time': lambda s: (not s['Enabled']) and 0 or self.runtime(),
|
om@3
|
1618 |
'Accepts': 0,
|
om@3
|
1619 |
'Accepts/sec': lambda s: s['Accepts'] / self.runtime(),
|
om@3
|
1620 |
'Queue': lambda s: getattr(self.requests, "qsize", None),
|
om@3
|
1621 |
'Threads': lambda s: len(getattr(self.requests, "_threads", [])),
|
om@3
|
1622 |
'Threads Idle': lambda s: getattr(self.requests, "idle", None),
|
om@3
|
1623 |
'Socket Errors': 0,
|
om@3
|
1624 |
'Requests': lambda s: (not s['Enabled']) and 0 or sum([w['Requests'](w) for w
|
om@3
|
1625 |
in s['Worker Threads'].values()], 0),
|
om@3
|
1626 |
'Bytes Read': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Read'](w) for w
|
om@3
|
1627 |
in s['Worker Threads'].values()], 0),
|
om@3
|
1628 |
'Bytes Written': lambda s: (not s['Enabled']) and 0 or sum([w['Bytes Written'](w) for w
|
om@3
|
1629 |
in s['Worker Threads'].values()], 0),
|
om@3
|
1630 |
'Work Time': lambda s: (not s['Enabled']) and 0 or sum([w['Work Time'](w) for w
|
om@3
|
1631 |
in s['Worker Threads'].values()], 0),
|
om@3
|
1632 |
'Read Throughput': lambda s: (not s['Enabled']) and 0 or sum(
|
om@3
|
1633 |
[w['Bytes Read'](w) / (w['Work Time'](w) or 1e-6)
|
om@3
|
1634 |
for w in s['Worker Threads'].values()], 0),
|
om@3
|
1635 |
'Write Throughput': lambda s: (not s['Enabled']) and 0 or sum(
|
om@3
|
1636 |
[w['Bytes Written'](w) / (w['Work Time'](w) or 1e-6)
|
om@3
|
1637 |
for w in s['Worker Threads'].values()], 0),
|
om@3
|
1638 |
'Worker Threads': {},
|
om@3
|
1639 |
}
|
om@3
|
1640 |
logging.statistics["CherryPy HTTPServer %d" % id(self)] = self.stats
|
om@3
|
1641 |
|
om@3
|
1642 |
def runtime(self):
|
om@3
|
1643 |
if self._start_time is None:
|
om@3
|
1644 |
return self._run_time
|
om@3
|
1645 |
else:
|
om@3
|
1646 |
return self._run_time + (time.time() - self._start_time)
|
om@3
|
1647 |
|
om@3
|
1648 |
def __str__(self):
|
om@3
|
1649 |
return "%s.%s(%r)" % (self.__module__, self.__class__.__name__,
|
om@3
|
1650 |
self.bind_addr)
|
om@3
|
1651 |
|
om@3
|
1652 |
def _get_bind_addr(self):
|
om@3
|
1653 |
return self._bind_addr
|
om@3
|
1654 |
def _set_bind_addr(self, value):
|
om@3
|
1655 |
if isinstance(value, tuple) and value[0] in ('', None):
|
om@3
|
1656 |
# Despite the socket module docs, using '' does not
|
om@3
|
1657 |
# allow AI_PASSIVE to work. Passing None instead
|
om@3
|
1658 |
# returns '0.0.0.0' like we want. In other words:
|
om@3
|
1659 |
# host AI_PASSIVE result
|
om@3
|
1660 |
# '' Y 192.168.x.y
|
om@3
|
1661 |
# '' N 192.168.x.y
|
om@3
|
1662 |
# None Y 0.0.0.0
|
om@3
|
1663 |
# None N 127.0.0.1
|
om@3
|
1664 |
# But since you can get the same effect with an explicit
|
om@3
|
1665 |
# '0.0.0.0', we deny both the empty string and None as values.
|
om@3
|
1666 |
raise ValueError("Host values of '' or None are not allowed. "
|
om@3
|
1667 |
"Use '0.0.0.0' (IPv4) or '::' (IPv6) instead "
|
om@3
|
1668 |
"to listen on all active interfaces.")
|
om@3
|
1669 |
self._bind_addr = value
|
om@3
|
1670 |
bind_addr = property(_get_bind_addr, _set_bind_addr,
|
om@3
|
1671 |
doc="""The interface on which to listen for connections.
|
om@3
|
1672 |
|
om@3
|
1673 |
For TCP sockets, a (host, port) tuple. Host values may be any IPv4
|
om@3
|
1674 |
or IPv6 address, or any valid hostname. The string 'localhost' is a
|
om@3
|
1675 |
synonym for '127.0.0.1' (or '::1', if your hosts file prefers IPv6).
|
om@3
|
1676 |
The string '0.0.0.0' is a special IPv4 entry meaning "any active
|
om@3
|
1677 |
interface" (INADDR_ANY), and '::' is the similar IN6ADDR_ANY for
|
om@3
|
1678 |
IPv6. The empty string or None are not allowed.
|
om@3
|
1679 |
|
om@3
|
1680 |
For UNIX sockets, supply the filename as a string.""")
|
om@3
|
1681 |
|
om@3
|
1682 |
def start(self):
|
om@3
|
1683 |
"""Run the server forever."""
|
om@3
|
1684 |
# We don't have to trap KeyboardInterrupt or SystemExit here,
|
om@3
|
1685 |
# because cherrpy.server already does so, calling self.stop() for us.
|
om@3
|
1686 |
# If you're using this server with another framework, you should
|
om@3
|
1687 |
# trap those exceptions in whatever code block calls start().
|
om@3
|
1688 |
self._interrupt = None
|
om@3
|
1689 |
|
om@3
|
1690 |
if self.software is None:
|
om@3
|
1691 |
self.software = "%s Server" % self.version
|
om@3
|
1692 |
|
om@3
|
1693 |
# SSL backward compatibility
|
om@3
|
1694 |
if (self.ssl_adapter is None and
|
om@3
|
1695 |
getattr(self, 'ssl_certificate', None) and
|
om@3
|
1696 |
getattr(self, 'ssl_private_key', None)):
|
om@3
|
1697 |
warnings.warn(
|
om@3
|
1698 |
"SSL attributes are deprecated in CherryPy 3.2, and will "
|
om@3
|
1699 |
"be removed in CherryPy 3.3. Use an ssl_adapter attribute "
|
om@3
|
1700 |
"instead.",
|
om@3
|
1701 |
DeprecationWarning
|
om@3
|
1702 |
)
|
om@3
|
1703 |
try:
|
om@3
|
1704 |
from cherrypy.wsgiserver.ssl_pyopenssl import pyOpenSSLAdapter
|
om@3
|
1705 |
except ImportError:
|
om@3
|
1706 |
pass
|
om@3
|
1707 |
else:
|
om@3
|
1708 |
self.ssl_adapter = pyOpenSSLAdapter(
|
om@3
|
1709 |
self.ssl_certificate, self.ssl_private_key,
|
om@3
|
1710 |
getattr(self, 'ssl_certificate_chain', None))
|
om@3
|
1711 |
|
om@3
|
1712 |
# Select the appropriate socket
|
om@3
|
1713 |
if isinstance(self.bind_addr, basestring):
|
om@3
|
1714 |
# AF_UNIX socket
|
om@3
|
1715 |
|
om@3
|
1716 |
# So we can reuse the socket...
|
om@3
|
1717 |
try: os.unlink(self.bind_addr)
|
om@3
|
1718 |
except: pass
|
om@3
|
1719 |
|
om@3
|
1720 |
# So everyone can access the socket...
|
om@3
|
1721 |
try: os.chmod(self.bind_addr, 0777)
|
om@3
|
1722 |
except: pass
|
om@3
|
1723 |
|
om@3
|
1724 |
info = [(socket.AF_UNIX, socket.SOCK_STREAM, 0, "", self.bind_addr)]
|
om@3
|
1725 |
else:
|
om@3
|
1726 |
# AF_INET or AF_INET6 socket
|
om@3
|
1727 |
# Get the correct address family for our host (allows IPv6 addresses)
|
om@3
|
1728 |
host, port = self.bind_addr
|
om@3
|
1729 |
try:
|
om@3
|
1730 |
info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,
|
om@3
|
1731 |
socket.SOCK_STREAM, 0, socket.AI_PASSIVE)
|
om@3
|
1732 |
except socket.gaierror:
|
om@3
|
1733 |
if ':' in self.bind_addr[0]:
|
om@3
|
1734 |
info = [(socket.AF_INET6, socket.SOCK_STREAM,
|
om@3
|
1735 |
0, "", self.bind_addr + (0, 0))]
|
om@3
|
1736 |
else:
|
om@3
|
1737 |
info = [(socket.AF_INET, socket.SOCK_STREAM,
|
om@3
|
1738 |
0, "", self.bind_addr)]
|
om@3
|
1739 |
|
om@3
|
1740 |
self.socket = None
|
om@3
|
1741 |
msg = "No socket could be created"
|
om@3
|
1742 |
for res in info:
|
om@3
|
1743 |
af, socktype, proto, canonname, sa = res
|
om@3
|
1744 |
try:
|
om@3
|
1745 |
self.bind(af, socktype, proto)
|
om@3
|
1746 |
except socket.error:
|
om@3
|
1747 |
if self.socket:
|
om@3
|
1748 |
self.socket.close()
|
om@3
|
1749 |
self.socket = None
|
om@3
|
1750 |
continue
|
om@3
|
1751 |
break
|
om@3
|
1752 |
if not self.socket:
|
om@3
|
1753 |
raise socket.error(msg)
|
om@3
|
1754 |
|
om@3
|
1755 |
# Timeout so KeyboardInterrupt can be caught on Win32
|
om@3
|
1756 |
self.socket.settimeout(1)
|
om@3
|
1757 |
self.socket.listen(self.request_queue_size)
|
om@3
|
1758 |
|
om@3
|
1759 |
# Create worker threads
|
om@3
|
1760 |
self.requests.start()
|
om@3
|
1761 |
|
om@3
|
1762 |
self.ready = True
|
om@3
|
1763 |
self._start_time = time.time()
|
om@3
|
1764 |
while self.ready:
|
om@3
|
1765 |
self.tick()
|
om@3
|
1766 |
if self.interrupt:
|
om@3
|
1767 |
while self.interrupt is True:
|
om@3
|
1768 |
# Wait for self.stop() to complete. See _set_interrupt.
|
om@3
|
1769 |
time.sleep(0.1)
|
om@3
|
1770 |
if self.interrupt:
|
om@3
|
1771 |
raise self.interrupt
|
om@3
|
1772 |
|
om@3
|
1773 |
def bind(self, family, type, proto=0):
|
om@3
|
1774 |
"""Create (or recreate) the actual socket object."""
|
om@3
|
1775 |
self.socket = socket.socket(family, type, proto)
|
om@3
|
1776 |
prevent_socket_inheritance(self.socket)
|
om@3
|
1777 |
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
om@3
|
1778 |
if self.nodelay and not isinstance(self.bind_addr, str):
|
om@3
|
1779 |
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
|
om@3
|
1780 |
|
om@3
|
1781 |
if self.ssl_adapter is not None:
|
om@3
|
1782 |
self.socket = self.ssl_adapter.bind(self.socket)
|
om@3
|
1783 |
|
om@3
|
1784 |
# If listening on the IPV6 any address ('::' = IN6ADDR_ANY),
|
om@3
|
1785 |
# activate dual-stack. See http://www.cherrypy.org/ticket/871.
|
om@3
|
1786 |
if (hasattr(socket, 'AF_INET6') and family == socket.AF_INET6
|
om@3
|
1787 |
and self.bind_addr[0] in ('::', '::0', '::0.0.0.0')):
|
om@3
|
1788 |
try:
|
om@3
|
1789 |
self.socket.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 0)
|
om@3
|
1790 |
except (AttributeError, socket.error):
|
om@3
|
1791 |
# Apparently, the socket option is not available in
|
om@3
|
1792 |
# this machine's TCP stack
|
om@3
|
1793 |
pass
|
om@3
|
1794 |
|
om@3
|
1795 |
self.socket.bind(self.bind_addr)
|
om@3
|
1796 |
|
om@3
|
1797 |
def tick(self):
|
om@3
|
1798 |
"""Accept a new connection and put it on the Queue."""
|
om@3
|
1799 |
try:
|
om@3
|
1800 |
s, addr = self.socket.accept()
|
om@3
|
1801 |
if self.stats['Enabled']:
|
om@3
|
1802 |
self.stats['Accepts'] += 1
|
om@3
|
1803 |
if not self.ready:
|
om@3
|
1804 |
return
|
om@3
|
1805 |
|
om@3
|
1806 |
prevent_socket_inheritance(s)
|
om@3
|
1807 |
if hasattr(s, 'settimeout'):
|
om@3
|
1808 |
s.settimeout(self.timeout)
|
om@3
|
1809 |
|
om@3
|
1810 |
makefile = CP_fileobject
|
om@3
|
1811 |
ssl_env = {}
|
om@3
|
1812 |
# if ssl cert and key are set, we try to be a secure HTTP server
|
om@3
|
1813 |
if self.ssl_adapter is not None:
|
om@3
|
1814 |
try:
|
om@3
|
1815 |
s, ssl_env = self.ssl_adapter.wrap(s)
|
om@3
|
1816 |
except NoSSLError:
|
om@3
|
1817 |
msg = ("The client sent a plain HTTP request, but "
|
om@3
|
1818 |
"this server only speaks HTTPS on this port.")
|
om@3
|
1819 |
buf = ["%s 400 Bad Request\r\n" % self.protocol,
|
om@3
|
1820 |
"Content-Length: %s\r\n" % len(msg),
|
om@3
|
1821 |
"Content-Type: text/plain\r\n\r\n",
|
om@3
|
1822 |
msg]
|
om@3
|
1823 |
|
om@3
|
1824 |
wfile = CP_fileobject(s, "wb", DEFAULT_BUFFER_SIZE)
|
om@3
|
1825 |
try:
|
om@3
|
1826 |
wfile.sendall("".join(buf))
|
om@3
|
1827 |
except socket.error, x:
|
om@3
|
1828 |
if x.args[0] not in socket_errors_to_ignore:
|
om@3
|
1829 |
raise
|
om@3
|
1830 |
return
|
om@3
|
1831 |
if not s:
|
om@3
|
1832 |
return
|
om@3
|
1833 |
makefile = self.ssl_adapter.makefile
|
om@3
|
1834 |
# Re-apply our timeout since we may have a new socket object
|
om@3
|
1835 |
if hasattr(s, 'settimeout'):
|
om@3
|
1836 |
s.settimeout(self.timeout)
|
om@3
|
1837 |
|
om@3
|
1838 |
conn = self.ConnectionClass(self, s, makefile)
|
om@3
|
1839 |
|
om@3
|
1840 |
if not isinstance(self.bind_addr, basestring):
|
om@3
|
1841 |
# optional values
|
om@3
|
1842 |
# Until we do DNS lookups, omit REMOTE_HOST
|
om@3
|
1843 |
if addr is None: # sometimes this can happen
|
om@3
|
1844 |
# figure out if AF_INET or AF_INET6.
|
om@3
|
1845 |
if len(s.getsockname()) == 2:
|
om@3
|
1846 |
# AF_INET
|
om@3
|
1847 |
addr = ('0.0.0.0', 0)
|
om@3
|
1848 |
else:
|
om@3
|
1849 |
# AF_INET6
|
om@3
|
1850 |
addr = ('::', 0)
|
om@3
|
1851 |
conn.remote_addr = addr[0]
|
om@3
|
1852 |
conn.remote_port = addr[1]
|
om@3
|
1853 |
|
om@3
|
1854 |
conn.ssl_env = ssl_env
|
om@3
|
1855 |
|
om@3
|
1856 |
self.requests.put(conn)
|
om@3
|
1857 |
except socket.timeout:
|
om@3
|
1858 |
# The only reason for the timeout in start() is so we can
|
om@3
|
1859 |
# notice keyboard interrupts on Win32, which don't interrupt
|
om@3
|
1860 |
# accept() by default
|
om@3
|
1861 |
return
|
om@3
|
1862 |
except socket.error, x:
|
om@3
|
1863 |
if self.stats['Enabled']:
|
om@3
|
1864 |
self.stats['Socket Errors'] += 1
|
om@3
|
1865 |
if x.args[0] in socket_error_eintr:
|
om@3
|
1866 |
# I *think* this is right. EINTR should occur when a signal
|
om@3
|
1867 |
# is received during the accept() call; all docs say retry
|
om@3
|
1868 |
# the call, and I *think* I'm reading it right that Python
|
om@3
|
1869 |
# will then go ahead and poll for and handle the signal
|
om@3
|
1870 |
# elsewhere. See http://www.cherrypy.org/ticket/707.
|
om@3
|
1871 |
return
|
om@3
|
1872 |
if x.args[0] in socket_errors_nonblocking:
|
om@3
|
1873 |
# Just try again. See http://www.cherrypy.org/ticket/479.
|
om@3
|
1874 |
return
|
om@3
|
1875 |
if x.args[0] in socket_errors_to_ignore:
|
om@3
|
1876 |
# Our socket was closed.
|
om@3
|
1877 |
# See http://www.cherrypy.org/ticket/686.
|
om@3
|
1878 |
return
|
om@3
|
1879 |
raise
|
om@3
|
1880 |
|
om@3
|
1881 |
def _get_interrupt(self):
|
om@3
|
1882 |
return self._interrupt
|
om@3
|
1883 |
def _set_interrupt(self, interrupt):
|
om@3
|
1884 |
self._interrupt = True
|
om@3
|
1885 |
self.stop()
|
om@3
|
1886 |
self._interrupt = interrupt
|
om@3
|
1887 |
interrupt = property(_get_interrupt, _set_interrupt,
|
om@3
|
1888 |
doc="Set this to an Exception instance to "
|
om@3
|
1889 |
"interrupt the server.")
|
om@3
|
1890 |
|
om@3
|
1891 |
def stop(self):
|
om@3
|
1892 |
"""Gracefully shutdown a server that is serving forever."""
|
om@3
|
1893 |
self.ready = False
|
om@3
|
1894 |
if self._start_time is not None:
|
om@3
|
1895 |
self._run_time += (time.time() - self._start_time)
|
om@3
|
1896 |
self._start_time = None
|
om@3
|
1897 |
|
om@3
|
1898 |
sock = getattr(self, "socket", None)
|
om@3
|
1899 |
if sock:
|
om@3
|
1900 |
if not isinstance(self.bind_addr, basestring):
|
om@3
|
1901 |
# Touch our own socket to make accept() return immediately.
|
om@3
|
1902 |
try:
|
om@3
|
1903 |
host, port = sock.getsockname()[:2]
|
om@3
|
1904 |
except socket.error, x:
|
om@3
|
1905 |
if x.args[0] not in socket_errors_to_ignore:
|
om@3
|
1906 |
# Changed to use error code and not message
|
om@3
|
1907 |
# See http://www.cherrypy.org/ticket/860.
|
om@3
|
1908 |
raise
|
om@3
|
1909 |
else:
|
om@3
|
1910 |
# Note that we're explicitly NOT using AI_PASSIVE,
|
om@3
|
1911 |
# here, because we want an actual IP to touch.
|
om@3
|
1912 |
# localhost won't work if we've bound to a public IP,
|
om@3
|
1913 |
# but it will if we bound to '0.0.0.0' (INADDR_ANY).
|
om@3
|
1914 |
for res in socket.getaddrinfo(host, port, socket.AF_UNSPEC,
|
om@3
|
1915 |
socket.SOCK_STREAM):
|
om@3
|
1916 |
af, socktype, proto, canonname, sa = res
|
om@3
|
1917 |
s = None
|
om@3
|
1918 |
try:
|
om@3
|
1919 |
s = socket.socket(af, socktype, proto)
|
om@3
|
1920 |
# See http://groups.google.com/group/cherrypy-users/
|
om@3
|
1921 |
# browse_frm/thread/bbfe5eb39c904fe0
|
om@3
|
1922 |
s.settimeout(1.0)
|
om@3
|
1923 |
s.connect((host, port))
|
om@3
|
1924 |
s.close()
|
om@3
|
1925 |
except socket.error:
|
om@3
|
1926 |
if s:
|
om@3
|
1927 |
s.close()
|
om@3
|
1928 |
if hasattr(sock, "close"):
|
om@3
|
1929 |
sock.close()
|
om@3
|
1930 |
self.socket = None
|
om@3
|
1931 |
|
om@3
|
1932 |
self.requests.stop(self.shutdown_timeout)
|
om@3
|
1933 |
|
om@3
|
1934 |
|
om@3
|
1935 |
class Gateway(object):
|
om@3
|
1936 |
|
om@3
|
1937 |
def __init__(self, req):
|
om@3
|
1938 |
self.req = req
|
om@3
|
1939 |
|
om@3
|
1940 |
def respond(self):
|
om@3
|
1941 |
raise NotImplemented
|
om@3
|
1942 |
|
om@3
|
1943 |
|
om@3
|
1944 |
# These may either be wsgiserver.SSLAdapter subclasses or the string names
|
om@3
|
1945 |
# of such classes (in which case they will be lazily loaded).
|
om@3
|
1946 |
ssl_adapters = {
|
om@3
|
1947 |
'builtin': 'cherrypy.wsgiserver.ssl_builtin.BuiltinSSLAdapter',
|
om@3
|
1948 |
'pyopenssl': 'cherrypy.wsgiserver.ssl_pyopenssl.pyOpenSSLAdapter',
|
om@3
|
1949 |
}
|
om@3
|
1950 |
|
om@3
|
1951 |
def get_ssl_adapter_class(name='pyopenssl'):
|
om@3
|
1952 |
adapter = ssl_adapters[name.lower()]
|
om@3
|
1953 |
if isinstance(adapter, basestring):
|
om@3
|
1954 |
last_dot = adapter.rfind(".")
|
om@3
|
1955 |
attr_name = adapter[last_dot + 1:]
|
om@3
|
1956 |
mod_path = adapter[:last_dot]
|
om@3
|
1957 |
|
om@3
|
1958 |
try:
|
om@3
|
1959 |
mod = sys.modules[mod_path]
|
om@3
|
1960 |
if mod is None:
|
om@3
|
1961 |
raise KeyError()
|
om@3
|
1962 |
except KeyError:
|
om@3
|
1963 |
# The last [''] is important.
|
om@3
|
1964 |
mod = __import__(mod_path, globals(), locals(), [''])
|
om@3
|
1965 |
|
om@3
|
1966 |
# Let an AttributeError propagate outward.
|
om@3
|
1967 |
try:
|
om@3
|
1968 |
adapter = getattr(mod, attr_name)
|
om@3
|
1969 |
except AttributeError:
|
om@3
|
1970 |
raise AttributeError("'%s' object has no attribute '%s'"
|
om@3
|
1971 |
% (mod_path, attr_name))
|
om@3
|
1972 |
|
om@3
|
1973 |
return adapter
|
om@3
|
1974 |
|
om@3
|
1975 |
# -------------------------------- WSGI Stuff -------------------------------- #
|
om@3
|
1976 |
|
om@3
|
1977 |
|
om@3
|
1978 |
class CherryPyWSGIServer(HTTPServer):
|
om@3
|
1979 |
|
om@3
|
1980 |
wsgi_version = (1, 0)
|
om@3
|
1981 |
|
om@3
|
1982 |
def __init__(self, bind_addr, wsgi_app, numthreads=10, server_name=None,
|
om@3
|
1983 |
max=-1, request_queue_size=5, timeout=10, shutdown_timeout=5):
|
om@3
|
1984 |
self.requests = ThreadPool(self, min=numthreads or 1, max=max)
|
om@3
|
1985 |
self.wsgi_app = wsgi_app
|
om@3
|
1986 |
self.gateway = wsgi_gateways[self.wsgi_version]
|
om@3
|
1987 |
|
om@3
|
1988 |
self.bind_addr = bind_addr
|
om@3
|
1989 |
if not server_name:
|
om@3
|
1990 |
server_name = socket.gethostname()
|
om@3
|
1991 |
self.server_name = server_name
|
om@3
|
1992 |
self.request_queue_size = request_queue_size
|
om@3
|
1993 |
|
om@3
|
1994 |
self.timeout = timeout
|
om@3
|
1995 |
self.shutdown_timeout = shutdown_timeout
|
om@3
|
1996 |
self.clear_stats()
|
om@3
|
1997 |
|
om@3
|
1998 |
def _get_numthreads(self):
|
om@3
|
1999 |
return self.requests.min
|
om@3
|
2000 |
def _set_numthreads(self, value):
|
om@3
|
2001 |
self.requests.min = value
|
om@3
|
2002 |
numthreads = property(_get_numthreads, _set_numthreads)
|
om@3
|
2003 |
|
om@3
|
2004 |
|
om@3
|
2005 |
class WSGIGateway(Gateway):
|
om@3
|
2006 |
|
om@3
|
2007 |
def __init__(self, req):
|
om@3
|
2008 |
self.req = req
|
om@3
|
2009 |
self.started_response = False
|
om@3
|
2010 |
self.env = self.get_environ()
|
om@3
|
2011 |
self.remaining_bytes_out = None
|
om@3
|
2012 |
|
om@3
|
2013 |
def get_environ(self):
|
om@3
|
2014 |
"""Return a new environ dict targeting the given wsgi.version"""
|
om@3
|
2015 |
raise NotImplemented
|
om@3
|
2016 |
|
om@3
|
2017 |
def respond(self):
|
om@3
|
2018 |
response = self.req.server.wsgi_app(self.env, self.start_response)
|
om@3
|
2019 |
try:
|
om@3
|
2020 |
for chunk in response:
|
om@3
|
2021 |
# "The start_response callable must not actually transmit
|
om@3
|
2022 |
# the response headers. Instead, it must store them for the
|
om@3
|
2023 |
# server or gateway to transmit only after the first
|
om@3
|
2024 |
# iteration of the application return value that yields
|
om@3
|
2025 |
# a NON-EMPTY string, or upon the application's first
|
om@3
|
2026 |
# invocation of the write() callable." (PEP 333)
|
om@3
|
2027 |
if chunk:
|
om@3
|
2028 |
if isinstance(chunk, unicode):
|
om@3
|
2029 |
chunk = chunk.encode('ISO-8859-1')
|
om@3
|
2030 |
self.write(chunk)
|
om@3
|
2031 |
finally:
|
om@3
|
2032 |
if hasattr(response, "close"):
|
om@3
|
2033 |
response.close()
|
om@3
|
2034 |
|
om@3
|
2035 |
def start_response(self, status, headers, exc_info = None):
|
om@3
|
2036 |
"""WSGI callable to begin the HTTP response."""
|
om@3
|
2037 |
# "The application may call start_response more than once,
|
om@3
|
2038 |
# if and only if the exc_info argument is provided."
|
om@3
|
2039 |
if self.started_response and not exc_info:
|
om@3
|
2040 |
raise AssertionError("WSGI start_response called a second "
|
om@3
|
2041 |
"time with no exc_info.")
|
om@3
|
2042 |
self.started_response = True
|
om@3
|
2043 |
|
om@3
|
2044 |
# "if exc_info is provided, and the HTTP headers have already been
|
om@3
|
2045 |
# sent, start_response must raise an error, and should raise the
|
om@3
|
2046 |
# exc_info tuple."
|
om@3
|
2047 |
if self.req.sent_headers:
|
om@3
|
2048 |
try:
|
om@3
|
2049 |
raise exc_info[0], exc_info[1], exc_info[2]
|
om@3
|
2050 |
finally:
|
om@3
|
2051 |
exc_info = None
|
om@3
|
2052 |
|
om@3
|
2053 |
self.req.status = status
|
om@3
|
2054 |
for k, v in headers:
|
om@3
|
2055 |
if not isinstance(k, str):
|
om@3
|
2056 |
raise TypeError("WSGI response header key %r is not a byte string." % k)
|
om@3
|
2057 |
if not isinstance(v, str):
|
om@3
|
2058 |
raise TypeError("WSGI response header value %r is not a byte string." % v)
|
om@3
|
2059 |
if k.lower() == 'content-length':
|
om@3
|
2060 |
self.remaining_bytes_out = int(v)
|
om@3
|
2061 |
self.req.outheaders.extend(headers)
|
om@3
|
2062 |
|
om@3
|
2063 |
return self.write
|
om@3
|
2064 |
|
om@3
|
2065 |
def write(self, chunk):
|
om@3
|
2066 |
"""WSGI callable to write unbuffered data to the client.
|
om@3
|
2067 |
|
om@3
|
2068 |
This method is also used internally by start_response (to write
|
om@3
|
2069 |
data from the iterable returned by the WSGI application).
|
om@3
|
2070 |
"""
|
om@3
|
2071 |
if not self.started_response:
|
om@3
|
2072 |
raise AssertionError("WSGI write called before start_response.")
|
om@3
|
2073 |
|
om@3
|
2074 |
chunklen = len(chunk)
|
om@3
|
2075 |
rbo = self.remaining_bytes_out
|
om@3
|
2076 |
if rbo is not None and chunklen > rbo:
|
om@3
|
2077 |
if not self.req.sent_headers:
|
om@3
|
2078 |
# Whew. We can send a 500 to the client.
|
om@3
|
2079 |
self.req.simple_response("500 Internal Server Error",
|
om@3
|
2080 |
"The requested resource returned more bytes than the "
|
om@3
|
2081 |
"declared Content-Length.")
|
om@3
|
2082 |
else:
|
om@3
|
2083 |
# Dang. We have probably already sent data. Truncate the chunk
|
om@3
|
2084 |
# to fit (so the client doesn't hang) and raise an error later.
|
om@3
|
2085 |
chunk = chunk[:rbo]
|
om@3
|
2086 |
|
om@3
|
2087 |
if not self.req.sent_headers:
|
om@3
|
2088 |
self.req.sent_headers = True
|
om@3
|
2089 |
self.req.send_headers()
|
om@3
|
2090 |
|
om@3
|
2091 |
self.req.write(chunk)
|
om@3
|
2092 |
|
om@3
|
2093 |
if rbo is not None:
|
om@3
|
2094 |
rbo -= chunklen
|
om@3
|
2095 |
if rbo < 0:
|
om@3
|
2096 |
raise ValueError(
|
om@3
|
2097 |
"Response body exceeds the declared Content-Length.")
|
om@3
|
2098 |
|
om@3
|
2099 |
|
om@3
|
2100 |
class WSGIGateway_10(WSGIGateway):
|
om@3
|
2101 |
|
om@3
|
2102 |
def get_environ(self):
|
om@3
|
2103 |
"""Return a new environ dict targeting the given wsgi.version"""
|
om@3
|
2104 |
req = self.req
|
om@3
|
2105 |
env = {
|
om@3
|
2106 |
# set a non-standard environ entry so the WSGI app can know what
|
om@3
|
2107 |
# the *real* server protocol is (and what features to support).
|
om@3
|
2108 |
# See http://www.faqs.org/rfcs/rfc2145.html.
|
om@3
|
2109 |
'ACTUAL_SERVER_PROTOCOL': req.server.protocol,
|
om@3
|
2110 |
'PATH_INFO': req.path,
|
om@3
|
2111 |
'QUERY_STRING': req.qs,
|
om@3
|
2112 |
'REMOTE_ADDR': req.conn.remote_addr or '',
|
om@3
|
2113 |
'REMOTE_PORT': str(req.conn.remote_port or ''),
|
om@3
|
2114 |
'REQUEST_METHOD': req.method,
|
om@3
|
2115 |
'REQUEST_URI': req.uri,
|
om@3
|
2116 |
'SCRIPT_NAME': '',
|
om@3
|
2117 |
'SERVER_NAME': req.server.server_name,
|
om@3
|
2118 |
# Bah. "SERVER_PROTOCOL" is actually the REQUEST protocol.
|
om@3
|
2119 |
'SERVER_PROTOCOL': req.request_protocol,
|
om@3
|
2120 |
'SERVER_SOFTWARE': req.server.software,
|
om@3
|
2121 |
'wsgi.errors': sys.stderr,
|
om@3
|
2122 |
'wsgi.input': req.rfile,
|
om@3
|
2123 |
'wsgi.multiprocess': False,
|
om@3
|
2124 |
'wsgi.multithread': True,
|
om@3
|
2125 |
'wsgi.run_once': False,
|
om@3
|
2126 |
'wsgi.url_scheme': req.scheme,
|
om@3
|
2127 |
'wsgi.version': (1, 0),
|
om@3
|
2128 |
}
|
om@3
|
2129 |
|
om@3
|
2130 |
if isinstance(req.server.bind_addr, basestring):
|
om@3
|
2131 |
# AF_UNIX. This isn't really allowed by WSGI, which doesn't
|
om@3
|
2132 |
# address unix domain sockets. But it's better than nothing.
|
om@3
|
2133 |
env["SERVER_PORT"] = ""
|
om@3
|
2134 |
else:
|
om@3
|
2135 |
env["SERVER_PORT"] = str(req.server.bind_addr[1])
|
om@3
|
2136 |
|
om@3
|
2137 |
# Request headers
|
om@3
|
2138 |
for k, v in req.inheaders.iteritems():
|
om@3
|
2139 |
env["HTTP_" + k.upper().replace("-", "_")] = v
|
om@3
|
2140 |
|
om@3
|
2141 |
# CONTENT_TYPE/CONTENT_LENGTH
|
om@3
|
2142 |
ct = env.pop("HTTP_CONTENT_TYPE", None)
|
om@3
|
2143 |
if ct is not None:
|
om@3
|
2144 |
env["CONTENT_TYPE"] = ct
|
om@3
|
2145 |
cl = env.pop("HTTP_CONTENT_LENGTH", None)
|
om@3
|
2146 |
if cl is not None:
|
om@3
|
2147 |
env["CONTENT_LENGTH"] = cl
|
om@3
|
2148 |
|
om@3
|
2149 |
if req.conn.ssl_env:
|
om@3
|
2150 |
env.update(req.conn.ssl_env)
|
om@3
|
2151 |
|
om@3
|
2152 |
return env
|
om@3
|
2153 |
|
om@3
|
2154 |
|
om@3
|
2155 |
class WSGIGateway_u0(WSGIGateway_10):
|
om@3
|
2156 |
|
om@3
|
2157 |
def get_environ(self):
|
om@3
|
2158 |
"""Return a new environ dict targeting the given wsgi.version"""
|
om@3
|
2159 |
req = self.req
|
om@3
|
2160 |
env_10 = WSGIGateway_10.get_environ(self)
|
om@3
|
2161 |
env = dict([(k.decode('ISO-8859-1'), v) for k, v in env_10.iteritems()])
|
om@3
|
2162 |
env[u'wsgi.version'] = ('u', 0)
|
om@3
|
2163 |
|
om@3
|
2164 |
# Request-URI
|
om@3
|
2165 |
env.setdefault(u'wsgi.url_encoding', u'utf-8')
|
om@3
|
2166 |
try:
|
om@3
|
2167 |
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
|
om@3
|
2168 |
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
|
om@3
|
2169 |
except UnicodeDecodeError:
|
om@3
|
2170 |
# Fall back to latin 1 so apps can transcode if needed.
|
om@3
|
2171 |
env[u'wsgi.url_encoding'] = u'ISO-8859-1'
|
om@3
|
2172 |
for key in [u"PATH_INFO", u"SCRIPT_NAME", u"QUERY_STRING"]:
|
om@3
|
2173 |
env[key] = env_10[str(key)].decode(env[u'wsgi.url_encoding'])
|
om@3
|
2174 |
|
om@3
|
2175 |
for k, v in sorted(env.items()):
|
om@3
|
2176 |
if isinstance(v, str) and k not in ('REQUEST_URI', 'wsgi.input'):
|
om@3
|
2177 |
env[k] = v.decode('ISO-8859-1')
|
om@3
|
2178 |
|
om@3
|
2179 |
return env
|
om@3
|
2180 |
|
om@3
|
2181 |
wsgi_gateways = {
|
om@3
|
2182 |
(1, 0): WSGIGateway_10,
|
om@3
|
2183 |
('u', 0): WSGIGateway_u0,
|
om@3
|
2184 |
}
|
om@3
|
2185 |
|
om@3
|
2186 |
class WSGIPathInfoDispatcher(object):
|
om@3
|
2187 |
"""A WSGI dispatcher for dispatch based on the PATH_INFO.
|
om@3
|
2188 |
|
om@3
|
2189 |
apps: a dict or list of (path_prefix, app) pairs.
|
om@3
|
2190 |
"""
|
om@3
|
2191 |
|
om@3
|
2192 |
def __init__(self, apps):
|
om@3
|
2193 |
try:
|
om@3
|
2194 |
apps = apps.items()
|
om@3
|
2195 |
except AttributeError:
|
om@3
|
2196 |
pass
|
om@3
|
2197 |
|
om@3
|
2198 |
# Sort the apps by len(path), descending
|
om@3
|
2199 |
apps.sort(cmp=lambda x,y: cmp(len(x[0]), len(y[0])))
|
om@3
|
2200 |
apps.reverse()
|
om@3
|
2201 |
|
om@3
|
2202 |
# The path_prefix strings must start, but not end, with a slash.
|
om@3
|
2203 |
# Use "" instead of "/".
|
om@3
|
2204 |
self.apps = [(p.rstrip("/"), a) for p, a in apps]
|
om@3
|
2205 |
|
om@3
|
2206 |
def __call__(self, environ, start_response):
|
om@3
|
2207 |
path = environ["PATH_INFO"] or "/"
|
om@3
|
2208 |
for p, app in self.apps:
|
om@3
|
2209 |
# The apps list should be sorted by length, descending.
|
om@3
|
2210 |
if path.startswith(p + "/") or path == p:
|
om@3
|
2211 |
environ = environ.copy()
|
om@3
|
2212 |
environ["SCRIPT_NAME"] = environ["SCRIPT_NAME"] + p
|
om@3
|
2213 |
environ["PATH_INFO"] = path[len(p):]
|
om@3
|
2214 |
return app(environ, start_response)
|
om@3
|
2215 |
|
om@3
|
2216 |
start_response('404 Not Found', [('Content-Type', 'text/plain'),
|
om@3
|
2217 |
('Content-Length', '0')])
|
om@3
|
2218 |
return ['']
|
om@3
|
2219 |
|