Performance improvements - console should now be much faster with many torrents and slightly faster to somewhat slower with few

This commit is contained in:
Asmageddon 2012-06-04 17:44:06 +02:00
parent 7a55a2e6ce
commit 08f5841522
3 changed files with 45 additions and 28 deletions

View File

@ -256,8 +256,9 @@ SEARCH_END_REACHED = 4
class AllTorrents(BaseMode, component.Component):
def __init__(self, stdscr, encoding=None):
self.formatted_rows = None
self.torrent_names = None
self.numtorrents = -1
self._cached_rows = {}
self.cursel = 1
self.curoff = 1 # TODO: this should really be 0 indexed
self.column_string = ""
@ -411,15 +412,13 @@ class AllTorrents(BaseMode, component.Component):
def set_state(self, state, refresh):
self.curstate = state # cache in case we change sort order
newnames = []
newrows = []
self._cached_rows = {}
self._sorted_ids = self._sort_torrents(self.curstate)
for torrent_id in self._sorted_ids:
ts = self.curstate[torrent_id]
newnames.append(ts["name"])
newrows.append((format_utils.format_row([column.get_column_value(name,ts) for name in self.__columns],self.column_widths),ts["state"]))
self.numtorrents = len(state)
self.formatted_rows = newrows
self.torrent_names = newnames
if refresh:
self.refresh()
@ -529,6 +528,7 @@ class AllTorrents(BaseMode, component.Component):
cmp_func = self._queue_sort
sg = state.get
def sort_by_field(state, result, field):
if field in column_names_to_state_keys:
field = column_names_to_state_keys[field]
@ -541,8 +541,8 @@ class AllTorrents(BaseMode, component.Component):
if field in first_element:
is_string = isinstance( first_element[field], basestring)
sort_key = lambda s:state.get(s)[field]
sort_key2 = lambda s:state.get(s)[field].lower()
sort_key = lambda s:sg(s)[field]
sort_key2 = lambda s:sg(s)[field].lower()
#If it's a string, sort case-insensitively but preserve A>a order
if is_string:
@ -806,23 +806,40 @@ class AllTorrents(BaseMode, component.Component):
pass
# add all the torrents
if self.formatted_rows == []:
if self.numtorrents == 0:
msg = "No torrents match filter".center(self.cols)
self.add_string(3, "{!info!}%s"%msg)
elif self.formatted_rows:
elif self.numtorrents > 0:
tidx = self.curoff
currow = 2
#Because dots are slow
sorted_ids = self._sorted_ids
curstate = self.curstate
gcv = column.get_column_value
fr = format_utils.format_row
cols = self.__columns
colw = self.column_widths
cr = self._cached_rows
def draw_row(index):
if index not in cr:
ts = curstate[sorted_ids[index]]
cr[index] = (fr([gcv(name,ts) for name in cols],colw),ts["state"])
return cr[index]
if lines:
todraw = []
for l in lines:
try:
todraw.append(self.formatted_rows[l])
except:
pass #A quick and ugly fix for crash caused by doing shift-m on last torrent
if l < tidx - 1: continue
if l >= tidx - 1 + self.rows - 3: break
if l >= self.numtorrents: break
todraw.append(draw_row(l))
lines.reverse()
else:
todraw = self.formatted_rows[tidx-1:]
todraw = []
for i in range(tidx-1, tidx-1 + self.rows - 3):
if i >= self.numtorrents: break
todraw += [draw_row(i)]
for row in todraw:
# default style
@ -1083,7 +1100,7 @@ class AllTorrents(BaseMode, component.Component):
reactor.stop()
return
if self.formatted_rows==None or self.popup:
if self.numtorrents == 0 or self.popup:
return
elif self.entering_search:

View File

@ -75,18 +75,15 @@ def get_column_value(name,state):
log.error("No such column: %s",name)
return None
if col[1] != None:
args = []
if col[1]:
try:
for key in col[0]:
args.append(state[key])
args = [ state[key] for key in col[0] ]
except:
log.error("Could not get column field: %s",col[0])
return None
colval = col[1](*args)
return col[1](*args)
else:
colval = state[col[0][0]]
return colval
return state[col[0][0]]
def get_required_fields(cols):

View File

@ -104,15 +104,18 @@ def trim_string(string, w, have_dbls):
else:
return u"%s "%(string[0:w-1])
#Dots are slow
eaw = unicodedata.east_asian_width
ud_normalize = unicodedata.normalize
def format_column(col, lim):
dbls = 0
if haveud and isinstance(col,unicode):
#Chosen over isinstance(col, unicode) and col.__class__ == unicode
# for speed - it's ~3 times faster for non-unicode strings and ~1.5
# for unicode strings.
if haveud and col.__class__ is unicode:
# might have some double width chars
col = unicodedata.normalize("NFC",col)
for c in col:
if unicodedata.east_asian_width(c) in ['W','F']:
# found a wide/full char
dbls += 1
col = ud_normalize("NFC",col)
dbls = sum(eaw(c) in 'WF' for c in col)
size = len(col)+dbls
if (size >= lim - 1):
return trim_string(col,lim,dbls>0)