url(r'^signup/oauth/$', pgweb.account.views.signup_oauth),
]
-for provider in settings.OAUTH.keys():
+for provider in list(settings.OAUTH.keys()):
urlpatterns.append(url(r'^login/({0})/$'.format(provider), pgweb.account.oauthclient.login_oauth))
url = request.POST['url']
expr = request.POST['expr']
xkey = request.POST['xkey']
- l = len(filter(None, [url, expr, xkey]))
+ l = len([_f for _f in [url, expr, xkey] if _f])
if l == 0:
# Nothing specified
return HttpResponseRedirect('.')
from django.conf import settings
import os
-import cPickle as pickle
+import pickle as pickle
import json
from pgweb.util.decorators import nocache
del allnodes
# Add all directories
- directories = [{'link': k, 'url': k, 'type': 'd'} for k, v in node.items() if v['t'] == 'd']
+ directories = [{'link': k, 'url': k, 'type': 'd'} for k, v in list(node.items()) if v['t'] == 'd']
# Add all symlinks (only directories supported)
- directories.extend([{'link': k, 'url': v['d'], 'type': 'l'} for k, v in node.items() if v['t'] == 'l'])
+ directories.extend([{'link': k, 'url': v['d'], 'type': 'l'} for k, v in list(node.items()) if v['t'] == 'l'])
# A ittle early sorting wouldn't go amiss, so .. ends up at the top
directories.sort(key=version_sort, reverse=True)
directories.insert(0, {'link': '[Parent Directory]', 'url': '..'})
# Fetch files
- files = [{'name': k, 'mtime': v['d'], 'size': v['s']} for k, v in node.items() if v['t'] == 'f']
+ files = [{'name': k, 'mtime': v['d'], 'size': v['s']} for k, v in list(node.items()) if v['t'] == 'f']
breadcrumbs = []
if subpath:
2: {'str': 'Obsolete', 'class': 'obs', 'bgcolor': '#ddddff'},
3: {'str': '?', 'class': 'unk', 'bgcolor': '#ffffaa'},
}
-choices = [(k, v['str']) for k, v in choices_map.items()]
+choices = [(k, v['str']) for k, v in list(choices_map.items())]
class FeatureGroup(models.Model):
def clean(self):
d = super(SecurityPatchForm, self).clean()
- vecs = [v for k, v in d.items() if k.startswith('vector_')]
+ vecs = [v for k, v in list(d.items()) if k.startswith('vector_')]
empty = [v for v in vecs if v == '']
if len(empty) != len(vecs) and len(empty) != 0:
- for k in d.keys():
+ for k in list(d.keys()):
if k.startswith('vector_'):
self.add_error(k, 'Either specify all vector values or none')
return d
{0}
""".format("\n".join(newly_visible)))
- map(varnish_purge, SecurityPatch.purge_urls)
+ list(map(varnish_purge, SecurityPatch.purge_urls))
import cvss
-vector_choices = {k: list(v.items()) for k, v in cvss.constants3.METRICS_VALUE_NAMES.items()}
+vector_choices = {k: list(v.items()) for k, v in list(cvss.constants3.METRICS_VALUE_NAMES.items())}
component_choices = (
('core server', 'Core server product'),
if v not in cvss.constants3.METRICS_VALUES[k]:
raise ValidationError("Metric {0} has unknown value {1}. Valind ones are: {2}".format(
k, v,
- ", ".join(cvss.constants3.METRICS_VALUES[k].keys()),
+ ", ".join(list(cvss.constants3.METRICS_VALUES[k].keys())),
))
except ValidationError:
raise
# Set fixed fields. Note that this will not work if the fixed fields are ManyToMany,
# but we'll fix that sometime in the future
if fixedfields:
- for k, v in fixedfields.items():
+ for k, v in list(fixedfields.items()):
setattr(r, k, v)
r.save()
('font', ["'self'", "fonts.gstatic.com", "data:", ]),
])
if hasattr(response, 'x_allow_extra_sources'):
- for k, v in response.x_allow_extra_sources.items():
+ for k, v in list(response.x_allow_extra_sources.items()):
if k in sources:
sources[k].extend(v)
else:
sources[k] = v
- security_policies = ["{0}-src {1}".format(k, " ".join(v)) for k, v in sources.items()]
+ security_policies = ["{0}-src {1}".format(k, " ".join(v)) for k, v in list(sources.items())]
if not getattr(response, 'x_allow_frames', False):
response['X-Frame-Options'] = 'DENY'
return "This object does not know how to express ifself."
s = "\n\n".join(["\n".join(
- filter(
- lambda x: not x.startswith('@@'),
- difflib.unified_diff(
- _get_attr_value(oldobj, n).splitlines(),
- _get_attr_value(obj, n).splitlines(),
- n=1,
- lineterm='',
- fromfile=n,
- tofile=n,
- )
- )
+ [x for x in difflib.unified_diff(
+ _get_attr_value(oldobj, n).splitlines(),
+ _get_attr_value(obj, n).splitlines(),
+ n=1,
+ lineterm='',
+ fromfile=n,
+ tofile=n,
+ ) if not x.startswith('@@')]
) for n in fieldlist if _get_attr_value(oldobj, n) != _get_attr_value(obj, n)])
if not s:
return None
purgelist = instance.purge_urls()
else:
purgelist = instance.purge_urls
- map(varnish_purge, purgelist)
+ list(map(varnish_purge, purgelist))
def register_basic_signal_handlers():
import re
import tidy
from optparse import OptionParser
-from ConfigParser import ConfigParser
+from configparser import ConfigParser
import psycopg2
import sys
import os
from datetime import datetime
-import cPickle as pickle
+import pickle as pickle
import codecs
import urllib2
def generate_platform(dirname, familyprefix, ver, installer, systemd):
- for f in platform_names.keys():
+ for f in list(platform_names.keys()):
yield ('%s-%s' % (f, ver), {
't': platform_names[f].format(ver),
'p': os.path.join(dirname, '{0}-{1}'.format(familyprefix, ver)),
reporpms[v] = {}
vroot = os.path.join(args.yumroot, v)
for dirpath, dirnames, filenames in os.walk(vroot):
- rmatches = filter(None, (re_reporpm.match(f) for f in sorted(filenames, reverse=True)))
+ rmatches = [_f for _f in (re_reporpm.match(f) for f in sorted(filenames, reverse=True)) if _f]
if rmatches:
familypath = os.path.join(*dirpath.split('/')[-2:])
shortdist, shortver, ver = r.groups(1)
found = False
- for p, pinfo in platforms.items():
+ for p, pinfo in list(platforms.items()):
if pinfo['p'] == familypath and pinfo['f'] == shortdist:
if p not in reporpms[v]:
reporpms[v][p] = {}
pass
# Filter all platforms that are not used
- platforms = {k: v for k, v in platforms.iteritems() if v['found']}
- for k, v in platforms.iteritems():
+ platforms = {k: v for k, v in platforms.items() if v['found']}
+ for k, v in platforms.items():
del v['found']
j = json.dumps({'platforms': platforms, 'reporpms': reporpms})
import sys
import os
import hashlib
-from ConfigParser import ConfigParser
+from configparser import ConfigParser
import psycopg2
# Templates that we don't want to ban automatically
curs = self.dbconn.cursor()
curs.execute("DELETE FROM webpages WHERE site=%(site)s AND NOT suburl=ANY(%(urls)s)", {
'site': self.siteid,
- 'urls': self.pages_crawled.keys(),
+ 'urls': list(self.pages_crawled.keys()),
})
if curs.rowcount:
log("Deleted %s pages no longer accessible" % curs.rowcount)
# We need to seed the crawler with every URL we've already seen, since
# we don't recrawl the contents if they haven't changed.
- allpages = self.scantimes.keys()
+ allpages = list(self.scantimes.keys())
# Figure out if there are any excludes to deal with (beyond the
# robots.txt ones)
import re
import urllib
-from StringIO import StringIO
+from io import StringIO
import dateutil.parser
from datetime import timedelta
-from HTMLParser import HTMLParser
+from html.parser import HTMLParser
from lib.log import log
from lib.sitemapsite import SitemapSiteCrawler
from lib.threadwrapper import threadwrapper
-from ConfigParser import ConfigParser
+from configparser import ConfigParser
import psycopg2
import time