1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/OpenSecurity/install/web.py-0.37/build/lib/web/browser.py Mon Dec 02 14:02:05 2013 +0100
1.3 @@ -0,0 +1,236 @@
1.4 +"""Browser to test web applications.
1.5 +(from web.py)
1.6 +"""
1.7 +from utils import re_compile
1.8 +from net import htmlunquote
1.9 +
1.10 +import httplib, urllib, urllib2
1.11 +import copy
1.12 +from StringIO import StringIO
1.13 +
1.14 +DEBUG = False
1.15 +
1.16 +__all__ = [
1.17 + "BrowserError",
1.18 + "Browser", "AppBrowser",
1.19 + "AppHandler"
1.20 +]
1.21 +
1.22 +class BrowserError(Exception):
1.23 + pass
1.24 +
1.25 +class Browser:
1.26 + def __init__(self):
1.27 + import cookielib
1.28 + self.cookiejar = cookielib.CookieJar()
1.29 + self._cookie_processor = urllib2.HTTPCookieProcessor(self.cookiejar)
1.30 + self.form = None
1.31 +
1.32 + self.url = "http://0.0.0.0:8080/"
1.33 + self.path = "/"
1.34 +
1.35 + self.status = None
1.36 + self.data = None
1.37 + self._response = None
1.38 + self._forms = None
1.39 +
1.40 + def reset(self):
1.41 + """Clears all cookies and history."""
1.42 + self.cookiejar.clear()
1.43 +
1.44 + def build_opener(self):
1.45 + """Builds the opener using urllib2.build_opener.
1.46 + Subclasses can override this function to prodive custom openers.
1.47 + """
1.48 + return urllib2.build_opener()
1.49 +
1.50 + def do_request(self, req):
1.51 + if DEBUG:
1.52 + print 'requesting', req.get_method(), req.get_full_url()
1.53 + opener = self.build_opener()
1.54 + opener.add_handler(self._cookie_processor)
1.55 + try:
1.56 + self._response = opener.open(req)
1.57 + except urllib2.HTTPError, e:
1.58 + self._response = e
1.59 +
1.60 + self.url = self._response.geturl()
1.61 + self.path = urllib2.Request(self.url).get_selector()
1.62 + self.data = self._response.read()
1.63 + self.status = self._response.code
1.64 + self._forms = None
1.65 + self.form = None
1.66 + return self.get_response()
1.67 +
1.68 + def open(self, url, data=None, headers={}):
1.69 + """Opens the specified url."""
1.70 + url = urllib.basejoin(self.url, url)
1.71 + req = urllib2.Request(url, data, headers)
1.72 + return self.do_request(req)
1.73 +
1.74 + def show(self):
1.75 + """Opens the current page in real web browser."""
1.76 + f = open('page.html', 'w')
1.77 + f.write(self.data)
1.78 + f.close()
1.79 +
1.80 + import webbrowser, os
1.81 + url = 'file://' + os.path.abspath('page.html')
1.82 + webbrowser.open(url)
1.83 +
1.84 + def get_response(self):
1.85 + """Returns a copy of the current response."""
1.86 + return urllib.addinfourl(StringIO(self.data), self._response.info(), self._response.geturl())
1.87 +
1.88 + def get_soup(self):
1.89 + """Returns beautiful soup of the current document."""
1.90 + import BeautifulSoup
1.91 + return BeautifulSoup.BeautifulSoup(self.data)
1.92 +
1.93 + def get_text(self, e=None):
1.94 + """Returns content of e or the current document as plain text."""
1.95 + e = e or self.get_soup()
1.96 + return ''.join([htmlunquote(c) for c in e.recursiveChildGenerator() if isinstance(c, unicode)])
1.97 +
1.98 + def _get_links(self):
1.99 + soup = self.get_soup()
1.100 + return [a for a in soup.findAll(name='a')]
1.101 +
1.102 + def get_links(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
1.103 + """Returns all links in the document."""
1.104 + return self._filter_links(self._get_links(),
1.105 + text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
1.106 +
1.107 + def follow_link(self, link=None, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
1.108 + if link is None:
1.109 + links = self._filter_links(self.get_links(),
1.110 + text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
1.111 + link = links and links[0]
1.112 +
1.113 + if link:
1.114 + return self.open(link['href'])
1.115 + else:
1.116 + raise BrowserError("No link found")
1.117 +
1.118 + def find_link(self, text=None, text_regex=None, url=None, url_regex=None, predicate=None):
1.119 + links = self._filter_links(self.get_links(),
1.120 + text=text, text_regex=text_regex, url=url, url_regex=url_regex, predicate=predicate)
1.121 + return links and links[0] or None
1.122 +
1.123 + def _filter_links(self, links,
1.124 + text=None, text_regex=None,
1.125 + url=None, url_regex=None,
1.126 + predicate=None):
1.127 + predicates = []
1.128 + if text is not None:
1.129 + predicates.append(lambda link: link.string == text)
1.130 + if text_regex is not None:
1.131 + predicates.append(lambda link: re_compile(text_regex).search(link.string or ''))
1.132 + if url is not None:
1.133 + predicates.append(lambda link: link.get('href') == url)
1.134 + if url_regex is not None:
1.135 + predicates.append(lambda link: re_compile(url_regex).search(link.get('href', '')))
1.136 + if predicate:
1.137 + predicate.append(predicate)
1.138 +
1.139 + def f(link):
1.140 + for p in predicates:
1.141 + if not p(link):
1.142 + return False
1.143 + return True
1.144 +
1.145 + return [link for link in links if f(link)]
1.146 +
1.147 + def get_forms(self):
1.148 + """Returns all forms in the current document.
1.149 + The returned form objects implement the ClientForm.HTMLForm interface.
1.150 + """
1.151 + if self._forms is None:
1.152 + import ClientForm
1.153 + self._forms = ClientForm.ParseResponse(self.get_response(), backwards_compat=False)
1.154 + return self._forms
1.155 +
1.156 + def select_form(self, name=None, predicate=None, index=0):
1.157 + """Selects the specified form."""
1.158 + forms = self.get_forms()
1.159 +
1.160 + if name is not None:
1.161 + forms = [f for f in forms if f.name == name]
1.162 + if predicate:
1.163 + forms = [f for f in forms if predicate(f)]
1.164 +
1.165 + if forms:
1.166 + self.form = forms[index]
1.167 + return self.form
1.168 + else:
1.169 + raise BrowserError("No form selected.")
1.170 +
1.171 + def submit(self, **kw):
1.172 + """submits the currently selected form."""
1.173 + if self.form is None:
1.174 + raise BrowserError("No form selected.")
1.175 + req = self.form.click(**kw)
1.176 + return self.do_request(req)
1.177 +
1.178 + def __getitem__(self, key):
1.179 + return self.form[key]
1.180 +
1.181 + def __setitem__(self, key, value):
1.182 + self.form[key] = value
1.183 +
1.184 +class AppBrowser(Browser):
1.185 + """Browser interface to test web.py apps.
1.186 +
1.187 + b = AppBrowser(app)
1.188 + b.open('/')
1.189 + b.follow_link(text='Login')
1.190 +
1.191 + b.select_form(name='login')
1.192 + b['username'] = 'joe'
1.193 + b['password'] = 'secret'
1.194 + b.submit()
1.195 +
1.196 + assert b.path == '/'
1.197 + assert 'Welcome joe' in b.get_text()
1.198 + """
1.199 + def __init__(self, app):
1.200 + Browser.__init__(self)
1.201 + self.app = app
1.202 +
1.203 + def build_opener(self):
1.204 + return urllib2.build_opener(AppHandler(self.app))
1.205 +
1.206 +class AppHandler(urllib2.HTTPHandler):
1.207 + """urllib2 handler to handle requests using web.py application."""
1.208 + handler_order = 100
1.209 +
1.210 + def __init__(self, app):
1.211 + self.app = app
1.212 +
1.213 + def http_open(self, req):
1.214 + result = self.app.request(
1.215 + localpart=req.get_selector(),
1.216 + method=req.get_method(),
1.217 + host=req.get_host(),
1.218 + data=req.get_data(),
1.219 + headers=dict(req.header_items()),
1.220 + https=req.get_type() == "https"
1.221 + )
1.222 + return self._make_response(result, req.get_full_url())
1.223 +
1.224 + def https_open(self, req):
1.225 + return self.http_open(req)
1.226 +
1.227 + try:
1.228 + https_request = urllib2.HTTPHandler.do_request_
1.229 + except AttributeError:
1.230 + # for python 2.3
1.231 + pass
1.232 +
1.233 + def _make_response(self, result, url):
1.234 + data = "\r\n".join(["%s: %s" % (k, v) for k, v in result.header_items])
1.235 + headers = httplib.HTTPMessage(StringIO(data))
1.236 + response = urllib.addinfourl(StringIO(result.data), headers, url)
1.237 + code, msg = result.status.split(None, 1)
1.238 + response.code, response.msg = int(code), msg
1.239 + return response