more cleaning up - removing unneeded white spaces
This commit is contained in:
parent
70b6e40a55
commit
78d8d116cc
77
src/core.py
77
src/core.py
|
@ -61,38 +61,37 @@ import pref
|
|||
|
||||
TORRENTS_SUBDIR = "torrentfiles"
|
||||
|
||||
STATE_FILENAME = "persistent.state"
|
||||
PREFS_FILENAME = "prefs.state"
|
||||
DHT_FILENAME = "dht.state"
|
||||
STATE_FILENAME = "persistent.state"
|
||||
PREFS_FILENAME = "prefs.state"
|
||||
DHT_FILENAME = "dht.state"
|
||||
|
||||
CACHED_DATA_EXPIRATION = 1 # seconds, like the output of time.time()
|
||||
|
||||
PREF_FUNCTIONS = {
|
||||
"max_uploads" : deluge_core.set_max_uploads,
|
||||
"listen_on" : deluge_core.set_listen_on,
|
||||
"max_connections" : deluge_core.set_max_connections,
|
||||
"max_uploads" : deluge_core.set_max_uploads,
|
||||
"listen_on" : deluge_core.set_listen_on,
|
||||
"max_connections" : deluge_core.set_max_connections,
|
||||
"max_active_torrents" : None, # no need for a function, applied constantly
|
||||
"auto_seed_ratio" : None, # no need for a function, applied constantly
|
||||
"max_download_speed_bps" : deluge_core.set_download_rate_limit,
|
||||
"max_upload_speed_bps" : deluge_core.set_upload_rate_limit,
|
||||
"enable_dht" : None, # not a normal pref in that is is applied only on start
|
||||
"use_upnp" : deluge_core.use_upnp,
|
||||
"use_natpmp" : deluge_core.use_natpmp,
|
||||
"use_utpex" : deluge_core.use_utpex,
|
||||
"auto_seed_ratio" : None, # no need for a function, applied constantly
|
||||
"max_download_speed_bps" : deluge_core.set_download_rate_limit,
|
||||
"max_upload_speed_bps" : deluge_core.set_upload_rate_limit,
|
||||
"enable_dht" : None, # not a normal pref in that is is applied only on start
|
||||
"use_upnp" : deluge_core.use_upnp,
|
||||
"use_natpmp" : deluge_core.use_natpmp,
|
||||
"use_utpex" : deluge_core.use_utpex,
|
||||
}
|
||||
|
||||
def N_(self):
|
||||
return self
|
||||
|
||||
STATE_MESSAGES = ( N_("Queued"),
|
||||
N_("Checking"),
|
||||
N_("Connecting"),
|
||||
N_("Downloading Metadata"),
|
||||
N_("Downloading"),
|
||||
N_("Finished"),
|
||||
N_("Seeding"),
|
||||
N_("Allocating")
|
||||
)
|
||||
STATE_MESSAGES = (N_("Queued"),
|
||||
N_("Checking"),
|
||||
N_("Connecting"),
|
||||
N_("Downloading Metadata"),
|
||||
N_("Downloading"),
|
||||
N_("Finished"),
|
||||
N_("Seeding"),
|
||||
N_("Allocating"))
|
||||
# Exceptions
|
||||
|
||||
class DelugeError(Exception):
|
||||
|
@ -133,7 +132,7 @@ class InsufficientFreeSpaceError(DelugeError):
|
|||
class cached_data:
|
||||
def __init__(self, get_method, key):
|
||||
self.get_method = get_method
|
||||
self.key = key
|
||||
self.key = key
|
||||
|
||||
self.timestamp = -1
|
||||
|
||||
|
@ -150,11 +149,11 @@ class cached_data:
|
|||
|
||||
class torrent_info:
|
||||
def __init__(self, filename, save_dir, compact):
|
||||
self.filename = filename
|
||||
self.save_dir = save_dir
|
||||
self.compact = compact
|
||||
self.filename = filename
|
||||
self.save_dir = save_dir
|
||||
self.compact = compact
|
||||
|
||||
self.user_paused = False # start out unpaused
|
||||
self.user_paused = False # start out unpaused
|
||||
self.uploaded_memory = 0
|
||||
|
||||
self.delete_me = False # set this to true, to delete it on next sync
|
||||
|
@ -382,7 +381,7 @@ class Manager:
|
|||
|
||||
# Get additional data from our level
|
||||
ret['is_listening'] = deluge_core.is_listening()
|
||||
ret['port'] = deluge_core.listening_port()
|
||||
ret['port'] = deluge_core.listening_port()
|
||||
if self.dht_running == True:
|
||||
ret['DHT_nodes'] = deluge_core.get_DHT_info()
|
||||
|
||||
|
@ -422,7 +421,7 @@ class Manager:
|
|||
if curr_index > 0:
|
||||
temp = self.state.queue[curr_index - 1]
|
||||
self.state.queue[curr_index - 1] = unique_ID
|
||||
self.state.queue[curr_index] = temp
|
||||
self.state.queue[curr_index] = temp
|
||||
if enforce_queue:
|
||||
self.apply_queue()
|
||||
|
||||
|
@ -431,7 +430,7 @@ class Manager:
|
|||
if curr_index < (len(self.state.queue) - 1):
|
||||
temp = self.state.queue[curr_index + 1]
|
||||
self.state.queue[curr_index + 1] = unique_ID
|
||||
self.state.queue[curr_index] = temp
|
||||
self.state.queue[curr_index] = temp
|
||||
if enforce_queue:
|
||||
self.apply_queue()
|
||||
|
||||
|
@ -476,7 +475,7 @@ class Manager:
|
|||
for index in range(len(self.state.queue)):
|
||||
unique_ID = self.state.queue[index]
|
||||
if (index < self.get_pref('max_active_torrents') or self.get_pref('max_active_torrents') == -1) \
|
||||
and self.get_core_torrent_state(unique_ID, efficient)['is_paused'] \
|
||||
and self.get_core_torrent_state(unique_ID, efficient)['is_paused'] \
|
||||
and not self.is_user_paused(unique_ID):
|
||||
|
||||
# This torrent is a seed so skip all the free space checking
|
||||
|
@ -499,8 +498,8 @@ class Manager:
|
|||
deluge_core.resume(unique_ID)
|
||||
|
||||
elif (not self.get_core_torrent_state(unique_ID, efficient)['is_paused']) and \
|
||||
( (index >= self.get_pref('max_active_torrents') and \
|
||||
self.get_pref('max_active_torrents') != -1 ) or \
|
||||
((index >= self.get_pref('max_active_torrents') and \
|
||||
self.get_pref('max_active_torrents') != -1) or \
|
||||
self.is_user_paused(unique_ID)):
|
||||
deluge_core.pause(unique_ID)
|
||||
|
||||
|
@ -566,9 +565,9 @@ class Manager:
|
|||
self.save_fastresume_data(event['unique_ID'])
|
||||
elif event['event_type'] is self.constants['EVENT_TRACKER']:
|
||||
unique_ID = event['unique_ID']
|
||||
status = event['tracker_status']
|
||||
message = event['message']
|
||||
tracker = message[message.find('"')+1:message.rfind('"')]
|
||||
status = event['tracker_status']
|
||||
message = event['message']
|
||||
tracker = message[message.find('"')+1:message.rfind('"')]
|
||||
|
||||
self.set_supp_torrent_state_val(unique_ID, "tracker_status",
|
||||
(tracker, status))
|
||||
|
@ -618,7 +617,7 @@ class Manager:
|
|||
|
||||
def calc_swarm_speed(self, unique_ID):
|
||||
pieces_per_sec = deluge_stats.calc_swarm_speed(self.get_core_torrent_peer_info(unique_ID))
|
||||
piece_length = self.get_core_torrent_state(unique_ID, efficiently=True)
|
||||
piece_length = self.get_core_torrent_state(unique_ID, efficiently=True)
|
||||
|
||||
return pieces_per_sec * piece_length
|
||||
|
||||
|
@ -652,8 +651,7 @@ class Manager:
|
|||
# Efficient: use a saved state, if it hasn't expired yet
|
||||
def get_core_torrent_state(self, unique_ID, efficiently=True):
|
||||
if unique_ID not in self.saved_core_torrent_states.keys():
|
||||
self.saved_core_torrent_states[unique_ID] = cached_data(deluge_core.get_torrent_state,
|
||||
unique_ID)
|
||||
self.saved_core_torrent_states[unique_ID] = cached_data(deluge_core.get_torrent_state, unique_ID)
|
||||
|
||||
return self.saved_core_torrent_states[unique_ID].get(efficiently)
|
||||
|
||||
|
@ -862,4 +860,3 @@ class Manager:
|
|||
|
||||
def replace_trackers(self, unique_ID, trackers):
|
||||
return deluge_core.replace_trackers(unique_ID, trackers)
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ import time
|
|||
|
||||
# Global variables. Caching of saved data, mostly
|
||||
|
||||
old_peer_info = None
|
||||
old_peer_info = None
|
||||
old_peer_info_timestamp = None
|
||||
|
||||
# Availability - how many complete copies are among our peers
|
||||
|
@ -58,7 +58,7 @@ def calc_availability(peer_info):
|
|||
# be too unreliable. But the client can smooth things out, if desired
|
||||
def calc_swarm_speed(peer_info):
|
||||
if old_peer_info is not None:
|
||||
new_pieces = 0
|
||||
new_pieces = 0
|
||||
peers_known = 0
|
||||
|
||||
# List new peers
|
||||
|
@ -70,10 +70,10 @@ def calc_swarm_speed(peer_info):
|
|||
if new_IP in old_peer_IPs.keys():
|
||||
# We know this peer from before, see what changed
|
||||
peers_known = peers_known + 1
|
||||
delta = sum(new_peer_IPs[new_IP].pieces) - sum(old_peer_IPs[new_IP].pieces)
|
||||
delta = sum(new_peer_IPs[new_IP].pieces) - sum(old_peer_IPs[new_IP].pieces)
|
||||
|
||||
if delta >= 0:
|
||||
new_pieces = new_pieces + delta
|
||||
new_pieces = new_pieces + delta
|
||||
else:
|
||||
print "Deluge.stat.calc_swarm_speed: Bad Delta: ", delta, old_peer_IPs[new_IP].pieces, new_peer_IPs[new_IP].pieces
|
||||
|
||||
|
@ -82,8 +82,8 @@ def calc_swarm_speed(peer_info):
|
|||
ret = float(new_pieces)/( float(peers_known) * time_delta )
|
||||
|
||||
# Save info
|
||||
old_peer_info = peer_info
|
||||
old_peer_info = peer_info
|
||||
old_peer_info_timestamp = time.time()
|
||||
old_peer_IPs = new_peer_IPs
|
||||
old_peer_IPs = new_peer_IPs
|
||||
|
||||
return ret
|
||||
|
|
|
@ -262,16 +262,16 @@ def show_about_dialog(parent=None):
|
|||
|
||||
def show_popup_warning(window, message):
|
||||
warner = gtk.MessageDialog(parent = window,
|
||||
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
buttons= gtk.BUTTONS_OK,
|
||||
message_format=message,
|
||||
type = gtk.MESSAGE_WARNING)
|
||||
type = gtk.MESSAGE_WARNING)
|
||||
warner.run()
|
||||
warner.destroy()
|
||||
|
||||
def show_popup_question(window, message):
|
||||
asker = gtk.MessageDialog(parent = window,
|
||||
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
flags = gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,
|
||||
buttons = gtk.BUTTONS_YES_NO,
|
||||
message_format=message,
|
||||
type=gtk.MESSAGE_QUESTION)
|
||||
|
@ -331,5 +331,3 @@ def show_directory_chooser_dialog(parent=None, title=None):
|
|||
result = None
|
||||
chooser.destroy()
|
||||
return result
|
||||
|
||||
|
||||
|
|
|
@ -72,7 +72,7 @@ class PluginManager:
|
|||
return self.available_plugins[name]
|
||||
|
||||
def enable_plugin(self, name):
|
||||
plugin = self.available_plugins[name]
|
||||
plugin = self.available_plugins[name]
|
||||
self.enabled_plugins[name] = plugin.enable(self.core, self.interface)
|
||||
|
||||
def get_enabled_plugins(self):
|
||||
|
|
|
@ -99,6 +99,7 @@ DEFAULT_PREFS = {
|
|||
"window_x_pos" : 0,
|
||||
"window_y_pos" : 0,
|
||||
}
|
||||
|
||||
class Preferences:
|
||||
def __init__(self, filename=None, global_defaults=True, defaults=None):
|
||||
self.mapping = {}
|
||||
|
@ -177,4 +178,3 @@ class Preferences:
|
|||
def printout(self):
|
||||
for key in self.mapping.keys():
|
||||
print key, ':', self.mapping[key]
|
||||
|
||||
|
|
Loading…
Reference in New Issue