import re from urllib import urlopen uniqueBanList = [] totalBanList = [] url = 'http://74.63.239.234/banlist.txt' import itertools import operator   def most_common(L):   # get an iterable of (item, iterable) pairs   SL = sorted((x, i) for i, x in enumerate(L))   # print 'SL:', SL   groups = itertools.groupby(SL, key=operator.itemgetter(0))   # auxiliary function to get "quality" for an item   def _auxfun(g):     item, iterable = g     count = 0     five = 5     min_index = len(L)     for _, where in iterable:       count += 1       min_index = min(min_index, where)     if count >= five:         print 'item %r, count %r, minind %r' % (item, count, min_index)     return count, -min_index   # pick the highest-count/earliest item   return max(groups, key=_auxfun)[0]   def num_groups(regex, url):     webpage = urlopen(url).read()     m = re.findall(regex, webpage)     return m   if __name__ == "__main__":     for i in num_groups(r'(\bSTEAM_[0-1]:[0-1]:\d+\b).*', url):         totalBanList.append(i)         if i not in uniqueBanList: uniqueBanList.append(i)     print "There have been "+str(len(totalBanList))+ " total bans and "+str(len(uniqueBanList))+" unique people banned since the beginning of time on Noobonic Plague."     z = raw_input("Do you want to see the list of most-banned people? 1 for yes, 0 for no. > ")     if z == r'1':         print most_common(totalBanList)     else:         quit()