config fixes
This commit is contained in:
parent
41dbab69b2
commit
a311bcbcec
|
@ -541,7 +541,7 @@ class Core(
|
|||
|
||||
def export_set_config(self, config):
|
||||
"""Set the config with values from dictionary"""
|
||||
config = deluge.common.pythonize(config)
|
||||
#config = deluge.common.pythonize(config)
|
||||
# Load all the values into the configuration
|
||||
for key in config.keys():
|
||||
if isinstance(config[key], unicode) or isinstance(config[key], str):
|
||||
|
|
|
@ -2,19 +2,19 @@
|
|||
# core.py
|
||||
#
|
||||
# Copyright (C) 2008 Andrew Resch ('andar') <andrewresch@gmail.com>
|
||||
#
|
||||
#
|
||||
# Deluge is free software.
|
||||
#
|
||||
#
|
||||
# You may redistribute it and/or modify it under the terms of the
|
||||
# GNU General Public License, as published by the Free Software
|
||||
# Foundation; either version 3 of the License, or (at your option)
|
||||
# any later version.
|
||||
#
|
||||
#
|
||||
# deluge is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the GNU General Public License for more details.
|
||||
#
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with deluge. If not, write to:
|
||||
# The Free Software Foundation, Inc.,
|
||||
|
@ -66,17 +66,17 @@ FORMATS = {
|
|||
'p2bgz': ["PeerGuardian P2B (GZip)", PGReader]
|
||||
}
|
||||
|
||||
class Core(CorePluginBase):
|
||||
class Core(CorePluginBase):
|
||||
def enable(self):
|
||||
log.debug('Blocklist: Plugin enabled..')
|
||||
|
||||
|
||||
self.is_downloading = False
|
||||
self.is_importing = False
|
||||
self.num_blocked = 0
|
||||
self.file_progress = 0.0
|
||||
|
||||
|
||||
self.core = component.get("Core")
|
||||
|
||||
|
||||
self.config = deluge.configmanager.ConfigManager("blocklist.conf", DEFAULT_PREFS)
|
||||
if self.config["load_on_start"]:
|
||||
self.export_import(self.need_new_blocklist())
|
||||
|
@ -86,13 +86,13 @@ class Core(CorePluginBase):
|
|||
self.update_timer = gobject.timeout_add(
|
||||
self.config["check_after_days"] * 24 * 60 * 60 * 1000,
|
||||
self.download_blocklist, True)
|
||||
|
||||
|
||||
def disable(self):
|
||||
log.debug("Reset IP Filter..")
|
||||
component.get("Core").export_reset_ip_filter()
|
||||
self.config.save()
|
||||
log.debug('Blocklist: Plugin disabled')
|
||||
|
||||
|
||||
def update(self):
|
||||
pass
|
||||
|
||||
|
@ -100,7 +100,7 @@ class Core(CorePluginBase):
|
|||
def export_download(self, _import=False):
|
||||
"""Download the blocklist specified in the config as url"""
|
||||
self.download_blocklist(_import)
|
||||
|
||||
|
||||
def export_import(self, download=False, force=False):
|
||||
"""Import the blocklist from the blocklist.cache, if load is True, then
|
||||
it will download the blocklist file if needed."""
|
||||
|
@ -108,13 +108,13 @@ class Core(CorePluginBase):
|
|||
|
||||
def export_get_config(self):
|
||||
"""Returns the config dictionary"""
|
||||
return self.config.get_config()
|
||||
|
||||
return self.config.config
|
||||
|
||||
def export_set_config(self, config):
|
||||
"""Sets the config based on values in 'config'"""
|
||||
for key in config.keys():
|
||||
self.config[key] = config[key]
|
||||
|
||||
|
||||
def export_get_status(self):
|
||||
"""Returns the status of the plugin."""
|
||||
status = {}
|
||||
|
@ -124,40 +124,40 @@ class Core(CorePluginBase):
|
|||
status["state"] = "Importing"
|
||||
else:
|
||||
status["state"] = "Idle"
|
||||
|
||||
|
||||
status["num_blocked"] = self.num_blocked
|
||||
status["file_progress"] = self.file_progress
|
||||
status["file_type"] = self.config["file_type"]
|
||||
status["file_url"] = self.config["file_url"]
|
||||
status["file_size"] = self.config["file_size"]
|
||||
status["file_date"] = self.config["file_date"]
|
||||
|
||||
|
||||
return status
|
||||
|
||||
|
||||
####
|
||||
|
||||
|
||||
|
||||
|
||||
def on_download_blocklist(self, load):
|
||||
self.is_downloading = False
|
||||
if load:
|
||||
self.export_import()
|
||||
|
||||
|
||||
def import_blocklist(self, download=False, force=False):
|
||||
"""Imports the downloaded blocklist into the session"""
|
||||
if self.is_downloading:
|
||||
return
|
||||
|
||||
|
||||
if download:
|
||||
if force or self.need_new_blocklist():
|
||||
self.download_blocklist(True)
|
||||
return
|
||||
|
||||
self.is_importing = True
|
||||
|
||||
self.is_importing = True
|
||||
log.debug("Reset IP Filter..")
|
||||
component.get("Core").export_reset_ip_filter()
|
||||
|
||||
|
||||
self.num_blocked = 0
|
||||
|
||||
|
||||
# Open the file for reading
|
||||
try:
|
||||
read_list = FORMATS[self.config["listtype"]][1](
|
||||
|
@ -177,21 +177,21 @@ class Core(CorePluginBase):
|
|||
log.debug("Exception during import: %s", e)
|
||||
else:
|
||||
log.debug("Blocklist import complete!")
|
||||
|
||||
|
||||
self.is_importing = False
|
||||
|
||||
|
||||
def download_blocklist(self, load=False):
|
||||
"""Runs download_blocklist_thread() in a thread and calls on_download_blocklist
|
||||
when finished. If load is True, then we will import the blocklist
|
||||
upon download completion."""
|
||||
if self.is_importing:
|
||||
return
|
||||
|
||||
|
||||
self.is_downloading = True
|
||||
threading.Thread(
|
||||
target=self.download_blocklist_thread,
|
||||
target=self.download_blocklist_thread,
|
||||
args=(self.on_download_blocklist, load)).start()
|
||||
|
||||
|
||||
def download_blocklist_thread(self, callback, load):
|
||||
"""Downloads the blocklist specified by 'url' in the config"""
|
||||
def _call_callback(callback, load):
|
||||
|
@ -203,10 +203,10 @@ class Core(CorePluginBase):
|
|||
if fp > 1.0:
|
||||
fp = 1.0
|
||||
self.file_progress = fp
|
||||
|
||||
|
||||
import socket
|
||||
socket.setdefaulttimeout(self.config["timeout"])
|
||||
|
||||
|
||||
for i in xrange(self.config["try_times"]):
|
||||
log.debug("Attempting to download blocklist %s", self.config["url"])
|
||||
try:
|
||||
|
@ -225,10 +225,10 @@ class Core(CorePluginBase):
|
|||
list_stats = os.stat(deluge.configmanager.get_config_dir("blocklist.cache"))
|
||||
self.config["file_date"] = datetime.datetime.fromtimestamp(list_stats.st_mtime).ctime()
|
||||
self.config["file_size"] = list_size = list_stats.st_size
|
||||
|
||||
|
||||
gobject.idle_add(_call_callback, callback, load)
|
||||
return
|
||||
|
||||
|
||||
def need_new_blocklist(self):
|
||||
"""Returns True if a new blocklist file should be downloaded"""
|
||||
try:
|
||||
|
@ -247,5 +247,5 @@ class Core(CorePluginBase):
|
|||
|
||||
if current_time >= (list_time + datetime.timedelta(days=self.config["check_after_days"])):
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
return False
|
||||
|
|
|
@ -8,10 +8,6 @@ popup_icon = "/static/images/tango/emblem-symbolic-link.png" /*the best i could
|
|||
|
||||
Plugins = {}
|
||||
|
||||
function _(str) {
|
||||
return str /*#todo : translations; see Deluge.Strings.get*/
|
||||
}
|
||||
|
||||
Plugins.Label = {
|
||||
/*onload:*/
|
||||
initialize: function() {
|
||||
|
@ -42,7 +38,7 @@ Plugins.Label = {
|
|||
func = ($defined(this[action])) ? this[action] : $empty;
|
||||
func(label_id);
|
||||
},
|
||||
|
||||
|
||||
/*menu callbacks:*/
|
||||
add: function(label_id) {
|
||||
alert("Add Label:" + label_id);
|
||||
|
|
|
@ -153,4 +153,4 @@ class Core(CorePluginBase):
|
|||
|
||||
def export_get_config(self):
|
||||
"returns the config dictionary"
|
||||
return self.config.get_config()
|
||||
return self.config.config
|
||||
|
|
|
@ -119,7 +119,7 @@ class Core(CorePluginBase):
|
|||
|
||||
def export_get_config(self):
|
||||
"returns the config dictionary"
|
||||
return self.config.get_config()
|
||||
return self.config.config
|
||||
"""
|
||||
|
||||
INIT = """
|
||||
|
|
|
@ -24,11 +24,11 @@ def get_wsgi_application(base_url, config_dir):
|
|||
|
||||
utils.set_config_defaults()
|
||||
|
||||
config.set('base','/deluge')
|
||||
config.set('disallow',{
|
||||
config['base'] = '/deluge'
|
||||
config['disallow'] = {
|
||||
'/daemon/control':'running as an apache user',
|
||||
'/config/server':'running as an apache-user'
|
||||
})
|
||||
}
|
||||
|
||||
utils.apply_config()
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ config_page_manager = component.get("ConfigPageManager")
|
|||
class WebCfgForm(forms.Form):
|
||||
"config base for webui"
|
||||
def initial_data(self):
|
||||
return config.get_config()
|
||||
return config.config
|
||||
|
||||
def save(self, data):
|
||||
utils.validate_config(data)
|
||||
|
|
|
@ -37,7 +37,9 @@ import pkg_resources
|
|||
from deluge.ui.client import sclient
|
||||
import components
|
||||
from deluge.log import LOG as log
|
||||
from webserver_common import CONFIG_DEFAULTS
|
||||
|
||||
config = ConfigManager("webui06.conf", CONFIG_DEFAULTS)
|
||||
|
||||
# Initialize gettext
|
||||
if deluge.common.windows_check() or deluge.common.osx_check():
|
||||
|
@ -65,7 +67,6 @@ import utils
|
|||
|
||||
|
||||
## Init ##
|
||||
config = ConfigManager("webui06.conf")
|
||||
random.seed()
|
||||
web.webapi.internalerror = deluge_debugerror
|
||||
|
||||
|
@ -104,10 +105,10 @@ def create_webserver(debug = False, base_url =None):
|
|||
|
||||
utils.set_config_defaults()
|
||||
if base_url:
|
||||
config.set('base', base_url)
|
||||
config['base'] = base_url
|
||||
else:
|
||||
config.set('base','')
|
||||
config.set('disallow',{})
|
||||
config['base'] = ''
|
||||
config['disallow'] = {}
|
||||
utils.apply_config()
|
||||
|
||||
|
||||
|
@ -119,11 +120,11 @@ def create_webserver(debug = False, base_url =None):
|
|||
|
||||
wsgi_app = WsgiApplication(middleware)
|
||||
|
||||
server_address=("0.0.0.0", int(config.get('port')))
|
||||
server_address=("0.0.0.0", int(config['port']))
|
||||
server = CherryPyWSGIServer(server_address, wsgi_app, server_name="localhost")
|
||||
|
||||
https = False
|
||||
if config.get("https"):
|
||||
if config["https"]:
|
||||
import os
|
||||
from deluge.common import get_default_config_dir
|
||||
cert_path = os.path.join(get_default_config_dir("ssl") ,"deluge.cert.pem" )
|
||||
|
|
|
@ -98,7 +98,7 @@ class json_rpc:
|
|||
web.header("Content-Type", "application/x-json")
|
||||
ck = cookies()
|
||||
id = 0
|
||||
if not(ck.has_key("session_id") and ck["session_id"] in utils.config.get("sessions")):
|
||||
if not(ck.has_key("session_id") and ck["session_id"] in utils.config["sessions"]):
|
||||
return json_error("not authenticated", id)
|
||||
|
||||
try:
|
||||
|
@ -172,7 +172,7 @@ class json_rpc:
|
|||
}
|
||||
|
||||
def get_webui_config(self):
|
||||
return dict([x for x in utils.config.get_config().iteritems() if not x[0].startswith("pwd")])
|
||||
return dict([x for x in utils.config.config().iteritems() if not x[0].startswith("pwd")])
|
||||
|
||||
def set_webui_config(self, data):
|
||||
utils.validate_config(data)
|
||||
|
@ -188,10 +188,10 @@ class json_rpc:
|
|||
return render.get_templates()
|
||||
|
||||
def download_torrent_from_url(self, url):
|
||||
"""
|
||||
"""
|
||||
input:
|
||||
url: the url of the torrent to download
|
||||
|
||||
|
||||
returns:
|
||||
filename: the temporary file name of the torrent file
|
||||
"""
|
||||
|
@ -202,15 +202,15 @@ class json_rpc:
|
|||
filename, headers = urllib.urlretrieve(url, tmp_file)
|
||||
log.debug("filename: %s", filename)
|
||||
return filename
|
||||
|
||||
|
||||
def get_torrent_info(self, filename):
|
||||
"""
|
||||
Goal:
|
||||
allow the webui to retrieve data about the torrent
|
||||
|
||||
|
||||
input:
|
||||
filename: the filename of the torrent to gather info about
|
||||
|
||||
|
||||
returns:
|
||||
{
|
||||
"filename": the torrent file
|
||||
|
@ -231,7 +231,7 @@ class json_rpc:
|
|||
}]
|
||||
"""
|
||||
import os
|
||||
|
||||
|
||||
for torrent in torrents:
|
||||
filename = os.path.basename(torrent['path'])
|
||||
fdump = open(torrent['path'], 'r').read()
|
||||
|
|
|
@ -49,7 +49,7 @@ def check_session(func):
|
|||
#check session:
|
||||
vars = web.input(redir_after_login = None)
|
||||
ck = cookies()
|
||||
if ck.has_key("session_id") and ck["session_id"] in utils.config.get("sessions"):
|
||||
if ck.has_key("session_id") and ck["session_id"] in utils.config["sessions"]:
|
||||
return func(self, name) #check_session:ok
|
||||
elif vars.redir_after_login:
|
||||
utils.seeother(url("/login",redir=self_url()))
|
||||
|
@ -135,16 +135,3 @@ def remote(func):
|
|||
print traceback.format_exc()
|
||||
deco.__name__ = func.__name__
|
||||
return deco
|
||||
|
||||
"""
|
||||
obsolete: -> using check-session.
|
||||
def check_allowed(capability):
|
||||
def check_allowed_inner(func):
|
||||
def deco(self, name = None): #check allowed (capablity)
|
||||
if capability in config.get("disallow"):
|
||||
return error_page("Not allowed to: '%s' , because:'%s'"
|
||||
% (capability , config.get("disallow")[capability]))
|
||||
return func(self, name)
|
||||
return deco
|
||||
return check_allowed_inner
|
||||
"""
|
|
@ -310,8 +310,8 @@ class connect:
|
|||
connected = None
|
||||
|
||||
connect_list = ["http://localhost:58846"]
|
||||
if config.get('daemon') <> "http://localhost:58846":
|
||||
connect_list = [config.get('daemon')] + connect_list
|
||||
if config['daemon'] <> "http://localhost:58846":
|
||||
connect_list = [config['daemon']] + connect_list
|
||||
|
||||
return render.connect(connect_list, connected ,restart)
|
||||
|
||||
|
@ -390,7 +390,7 @@ route("/static/(.*)", static)
|
|||
class template_static(static_handler):
|
||||
def get_base_dir(self):
|
||||
return os.path.join(os.path.dirname(__file__),
|
||||
'templates/%s/static' % config.get('template'))
|
||||
'templates/%s/static' % config['template'])
|
||||
route("/template/static/(.*)", template_static)
|
||||
|
||||
class template_render:
|
||||
|
@ -398,9 +398,9 @@ class template_render:
|
|||
|
||||
def GET(self, name):
|
||||
web.header("Content-type",utils.guess_mime_type(name))
|
||||
#security : assumes config.get('template') returns a safe subdir.
|
||||
#security : assumes config['template'] returns a safe subdir.
|
||||
basepath = os.path.normpath(os.path.join(os.path.dirname(__file__),
|
||||
'templates/%s/render' % config.get('template')))
|
||||
'templates/%s/render' % config['template']))
|
||||
filename = os.path.normpath(os.path.join(basepath,name))
|
||||
if not filename.startswith(basepath):
|
||||
#hack detected?
|
||||
|
|
|
@ -54,13 +54,13 @@ class subclassed_render(object):
|
|||
self.plugin_renderers = []
|
||||
|
||||
def apply_cfg(self):
|
||||
self.cache = config.get('cache_templates')
|
||||
self.cache = config['cache_templates']
|
||||
self.renderers = []
|
||||
self.template_cache = {}
|
||||
self.webui_path = os.path.dirname(__file__)
|
||||
|
||||
#load template-meta-data
|
||||
self.cfg_template = config.get('template')
|
||||
self.cfg_template = config['template']
|
||||
template_path = os.path.join(self.webui_path, 'templates/%s/' % self.cfg_template)
|
||||
if not os.path.exists(template_path):
|
||||
template_path = os.path.join(self.webui_path, 'templates/white/')
|
||||
|
@ -155,7 +155,7 @@ def template_part_stats():
|
|||
return '[not connected]'
|
||||
|
||||
def get_config(var):
|
||||
return config.get(var)
|
||||
return config[var]
|
||||
|
||||
irow = 0
|
||||
def altrow(reset = False):
|
||||
|
|
|
@ -54,7 +54,7 @@ from webserver_common import TORRENT_KEYS, CONFIG_DEFAULTS
|
|||
from deluge.ui.client import sclient, aclient
|
||||
|
||||
webui_plugin_manager = component.get("WebPluginManager")
|
||||
config = ConfigManager("webui06.conf")
|
||||
config = ConfigManager("webui06.conf", CONFIG_DEFAULTS)
|
||||
|
||||
#async-proxy: map callback to a a dict-setter
|
||||
def dict_cb(key,d):
|
||||
|
@ -70,18 +70,18 @@ def setcookie(key, val):
|
|||
#really simple sessions, to bad i had to implement them myself.
|
||||
def start_session():
|
||||
session_id = str(random.random())
|
||||
config.set("sessions", config.get("sessions") + [session_id])
|
||||
if len(config.get("sessions")) > 30: #store a max of 20 sessions.
|
||||
config.set("sessions",config["sessions"][:-20])
|
||||
config["sessions"] = config["sessions"] + [session_id]
|
||||
if len(config["sessions"]) > 30: #store a max of 20 sessions.
|
||||
config["sessions"] = config["sessions"][:-20]
|
||||
setcookie("session_id", session_id)
|
||||
config.save()
|
||||
|
||||
def end_session():
|
||||
session_id = getcookie("session_id")
|
||||
setcookie("session_id","")
|
||||
if session_id in config.get("sessions"):
|
||||
if session_id in config["sessions"]:
|
||||
config["sessions"].remove(session_id)
|
||||
config.set("sessions", config["sessions"])
|
||||
config["sessions"] = config["sessions"]
|
||||
#/sessions
|
||||
|
||||
def seeother(url, *args, **kwargs):
|
||||
|
@ -206,8 +206,8 @@ def daemon_start_localhost(port):
|
|||
subprocess.Popen(["deluged", "-p %s" % port])
|
||||
|
||||
def daemon_connect(uri):
|
||||
if config.get('daemon') <> uri:
|
||||
config.set('daemon', uri)
|
||||
if config['daemon'] <> uri:
|
||||
config['daemon'] = uri
|
||||
config.save()
|
||||
|
||||
sclient.set_core_uri(uri)
|
||||
|
@ -231,15 +231,15 @@ def update_pwd(pwd):
|
|||
m.update(salt)
|
||||
m.update(pwd)
|
||||
#
|
||||
config.set("pwd_salt", salt)
|
||||
config.set("pwd_md5", m.digest())
|
||||
config["pwd_salt"] = salt
|
||||
config["pwd_md5"] = m.digest()
|
||||
config.save()
|
||||
|
||||
def check_pwd(pwd):
|
||||
m = md5()
|
||||
m.update(config.get('pwd_salt'))
|
||||
m.update(config['pwd_salt'])
|
||||
m.update(pwd)
|
||||
return (m.digest() == config.get('pwd_md5'))
|
||||
return (m.digest() == config['pwd_md5'])
|
||||
|
||||
def validate_config(cfg_dict):
|
||||
"""
|
||||
|
@ -254,6 +254,9 @@ def validate_config(cfg_dict):
|
|||
|
||||
|
||||
def set_config_defaults():
|
||||
"""
|
||||
OBSOLETE, TODO REMOVE THIS !!
|
||||
"""
|
||||
changed = False
|
||||
for key, value in CONFIG_DEFAULTS.iteritems():
|
||||
if not key in config.config:
|
||||
|
@ -261,8 +264,8 @@ def set_config_defaults():
|
|||
changed = True
|
||||
|
||||
from render import render
|
||||
if not config.get("template") in render.get_templates():
|
||||
config.set("template", CONFIG_DEFAULTS["template"])
|
||||
if not config["template"] in render.get_templates():
|
||||
config["template"] = CONFIG_DEFAULTS["template"]
|
||||
changed = True
|
||||
|
||||
if changed:
|
||||
|
@ -272,11 +275,10 @@ def apply_config():
|
|||
#etc, mostly for apache:
|
||||
from render import render
|
||||
try:
|
||||
#sclient.set_core_uri(config.get('daemon'))
|
||||
daemon_connect(config.get('daemon'))
|
||||
daemon_connect(config['daemon'])
|
||||
except Exception,e:
|
||||
log.debug("error setting core uri:%s:%s:%s" % (config.get('daemon'),e,e.message))
|
||||
render.set_global('base', config.get('base'))
|
||||
log.debug("error setting core uri:%s:%s:%s" % (config['daemon'], e, e.message))
|
||||
render.set_global('base', config['base'])
|
||||
render.apply_cfg()
|
||||
|
||||
#exceptions:
|
||||
|
|
|
@ -73,9 +73,9 @@ namespace libtorrent
|
|||
// uses username and password
|
||||
http_pw
|
||||
};
|
||||
|
||||
|
||||
proxy_type type;
|
||||
|
||||
|
||||
};
|
||||
|
||||
struct TORRENT_EXPORT session_settings
|
||||
|
@ -153,7 +153,7 @@ namespace libtorrent
|
|||
// the number of seconds to wait until giving up on a
|
||||
// tracker request if it hasn't finished
|
||||
int tracker_completion_timeout;
|
||||
|
||||
|
||||
// the number of seconds where no data is received
|
||||
// from the tracker until it should be considered
|
||||
// as timed out
|
||||
|
@ -183,7 +183,7 @@ namespace libtorrent
|
|||
// all the pieces. i.e. the actual number of requests
|
||||
// depends on the download rate and this number.
|
||||
float request_queue_time;
|
||||
|
||||
|
||||
// the number of outstanding block requests a peer is
|
||||
// allowed to queue up in the client. If a peer sends
|
||||
// more requests than this (before the first one has
|
||||
|
@ -191,7 +191,7 @@ namespace libtorrent
|
|||
// the higher this is, the faster upload speeds the
|
||||
// client can get to a single peer.
|
||||
int max_allowed_in_request_queue;
|
||||
|
||||
|
||||
// the maximum number of outstanding requests to
|
||||
// send to a peer. This limit takes precedence over
|
||||
// request_queue_time.
|
||||
|
@ -204,23 +204,23 @@ namespace libtorrent
|
|||
// doing localized accesses and also to make it easier
|
||||
// to identify bad peers if a piece fails the hash check.
|
||||
int whole_pieces_threshold;
|
||||
|
||||
|
||||
// the number of seconds to wait for any activity on
|
||||
// the peer wire before closing the connectiong due
|
||||
// to time out.
|
||||
int peer_timeout;
|
||||
|
||||
|
||||
// same as peer_timeout, but only applies to url-seeds.
|
||||
// this is usually set lower, because web servers are
|
||||
// expected to be more reliable.
|
||||
int urlseed_timeout;
|
||||
|
||||
|
||||
// controls the pipelining size of url-seeds
|
||||
int urlseed_pipeline_size;
|
||||
|
||||
// time to wait until a new retry takes place
|
||||
int urlseed_wait_retry;
|
||||
|
||||
|
||||
// sets the upper limit on the total number of files this
|
||||
// session will keep open. The reason why files are
|
||||
// left open at all is that some anti virus software
|
||||
|
@ -234,7 +234,7 @@ namespace libtorrent
|
|||
// number of connections and the number of files
|
||||
// limits so their sum is slightly below it.
|
||||
int file_pool_size;
|
||||
|
||||
|
||||
// false to not allow multiple connections from the same
|
||||
// IP address. true will allow it.
|
||||
bool allow_multiple_connections_per_ip;
|
||||
|
@ -242,7 +242,7 @@ namespace libtorrent
|
|||
// the number of times we can fail to connect to a peer
|
||||
// before we stop retrying it.
|
||||
int max_failcount;
|
||||
|
||||
|
||||
// the number of seconds to wait to reconnect to a peer.
|
||||
// this time is multiplied with the failcount.
|
||||
int min_reconnect_time;
|
||||
|
@ -391,7 +391,7 @@ namespace libtorrent
|
|||
// the number of seconds in between recalculating which
|
||||
// torrents to activate and which ones to queue
|
||||
int auto_manage_interval;
|
||||
|
||||
|
||||
// when a seeding torrent reaches eaither the share ratio
|
||||
// (bytes up / bytes down) or the seed time ratio
|
||||
// (seconds as seed / seconds as downloader) or the seed
|
||||
|
@ -461,7 +461,7 @@ namespace libtorrent
|
|||
, service_port(0)
|
||||
, max_fail_count(20)
|
||||
{}
|
||||
|
||||
|
||||
// the maximum number of peers to send in a
|
||||
// reply to get_peers
|
||||
int max_peers_reply;
|
||||
|
@ -469,11 +469,11 @@ namespace libtorrent
|
|||
// the number of simultanous "connections" when
|
||||
// searching the DHT.
|
||||
int search_branching;
|
||||
|
||||
|
||||
// the listen port for the dht. This is a UDP port.
|
||||
// zero means use the same as the tcp interface
|
||||
int service_port;
|
||||
|
||||
|
||||
// the maximum number of times a node can fail
|
||||
// in a row before it is removed from the table.
|
||||
int max_fail_count;
|
||||
|
@ -501,7 +501,7 @@ namespace libtorrent
|
|||
enum enc_level
|
||||
{
|
||||
plaintext, // use only plaintext encryption
|
||||
rc4, // use only rc4 encryption
|
||||
rc4, // use only rc4 encryption
|
||||
both // allow both
|
||||
};
|
||||
|
||||
|
|
|
@ -114,7 +114,7 @@ void upnp::discover_device()
|
|||
|
||||
void upnp::discover_device_impl()
|
||||
{
|
||||
const char msearch[] =
|
||||
const char msearch[] =
|
||||
"M-SEARCH * HTTP/1.1\r\n"
|
||||
"HOST: 239.255.255.250:1900\r\n"
|
||||
"ST:upnp:rootdevice\r\n"
|
||||
|
@ -220,7 +220,7 @@ void upnp::delete_mapping(int mapping)
|
|||
#endif
|
||||
|
||||
if (m.protocol == none) return;
|
||||
|
||||
|
||||
for (std::set<rootdevice>::iterator i = m_devices.begin()
|
||||
, end(m_devices.end()); i != end; ++i)
|
||||
{
|
||||
|
@ -257,7 +257,7 @@ void upnp::resend_request(error_code const& e)
|
|||
disable("no UPnP router found");
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
for (std::set<rootdevice>::iterator i = m_devices.begin()
|
||||
, end(m_devices.end()); i != end; ++i)
|
||||
{
|
||||
|
@ -349,7 +349,7 @@ void upnp::on_reply(udp::endpoint const& from, char* buffer
|
|||
}
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<ip_route> routes = enum_routes(m_io_service, ec);
|
||||
if (m_ignore_non_routers && std::find_if(routes.begin(), routes.end()
|
||||
|
@ -559,7 +559,7 @@ void upnp::post(upnp::rootdevice const& d, std::string const& soap
|
|||
TORRENT_ASSERT(d.upnp_connection);
|
||||
|
||||
std::stringstream header;
|
||||
|
||||
|
||||
header << "POST " << d.path << " HTTP/1.0\r\n"
|
||||
"Host: " << d.hostname << ":" << d.port << "\r\n"
|
||||
"Content-Type: text/xml; charset=\"utf-8\"\r\n"
|
||||
|
@ -572,7 +572,7 @@ void upnp::post(upnp::rootdevice const& d, std::string const& soap
|
|||
m_log << time_now_string()
|
||||
<< " ==> sending: " << header.str() << std::endl;
|
||||
#endif
|
||||
|
||||
|
||||
}
|
||||
|
||||
void upnp::create_port_mapping(http_connection& c, rootdevice& d, int i)
|
||||
|
@ -590,11 +590,11 @@ void upnp::create_port_mapping(http_connection& c, rootdevice& d, int i)
|
|||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
std::string soap_action = "AddPortMapping";
|
||||
|
||||
std::stringstream soap;
|
||||
|
||||
|
||||
soap << "<?xml version=\"1.0\"?>\n"
|
||||
"<s:Envelope xmlns:s=\"http://schemas.xmlsoap.org/soap/envelope/\" "
|
||||
"s:encodingStyle=\"http://schemas.xmlsoap.org/soap/encoding/\">"
|
||||
|
@ -709,7 +709,7 @@ void upnp::delete_port_mapping(rootdevice& d, int i)
|
|||
}
|
||||
|
||||
std::stringstream soap;
|
||||
|
||||
|
||||
std::string soap_action = "DeletePortMapping";
|
||||
|
||||
soap << "<?xml version=\"1.0\"?>\n"
|
||||
|
@ -721,7 +721,7 @@ void upnp::delete_port_mapping(rootdevice& d, int i)
|
|||
"<NewExternalPort>" << d.mapping[i].external_port << "</NewExternalPort>"
|
||||
"<NewProtocol>" << (d.mapping[i].protocol == udp ? "UDP" : "TCP") << "</NewProtocol>";
|
||||
soap << "</u:" << soap_action << "></s:Body></s:Envelope>";
|
||||
|
||||
|
||||
post(d, soap.str(), soap_action);
|
||||
}
|
||||
|
||||
|
@ -738,7 +738,7 @@ namespace
|
|||
dst.clear();
|
||||
while (*src) dst.push_back(tolower(*src++));
|
||||
}
|
||||
|
||||
|
||||
bool string_equal_nocase(char const* lhs, char const* rhs)
|
||||
{
|
||||
while (tolower(*lhs) == tolower(*rhs))
|
||||
|
@ -900,7 +900,10 @@ void upnp::on_upnp_xml(error_code const& e
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (s.url_base.empty()) d.control_url = s.control_url;
|
||||
else d.control_url = s.url_base + s.control_url;
|
||||
|
||||
if (s.url_base.empty()) d.control_url = s.control_url;
|
||||
else d.control_url = s.url_base + s.control_url;
|
||||
|
||||
|
@ -949,7 +952,7 @@ void upnp::disable(char const* msg)
|
|||
i->protocol = none;
|
||||
m_callback(i - m_mappings.begin(), 0, msg);
|
||||
}
|
||||
|
||||
|
||||
m_devices.clear();
|
||||
error_code ec;
|
||||
m_broadcast_timer.cancel(ec);
|
||||
|
@ -966,7 +969,7 @@ namespace
|
|||
bool exit;
|
||||
int error_code;
|
||||
};
|
||||
|
||||
|
||||
void find_error_code(int type, char const* string, error_code_parse_state& state)
|
||||
{
|
||||
if (state.exit) return;
|
||||
|
@ -989,7 +992,7 @@ namespace
|
|||
int code;
|
||||
char const* msg;
|
||||
};
|
||||
|
||||
|
||||
error_code_t error_codes[] =
|
||||
{
|
||||
{402, "Invalid Arguments"}
|
||||
|
@ -1031,9 +1034,9 @@ void upnp::on_upnp_map_response(error_code const& e
|
|||
d.disabled = true;
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (m_closing) return;
|
||||
|
||||
|
||||
// error code response may look like this:
|
||||
// <s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/"
|
||||
// s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
|
||||
|
@ -1075,7 +1078,7 @@ void upnp::on_upnp_map_response(error_code const& e
|
|||
<< " <== got error message: " << s.error_code << std::endl;
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
mapping_t& m = d.mapping[mapping];
|
||||
|
||||
if (s.error_code == 725)
|
||||
|
|
11
setup.py
11
setup.py
|
@ -88,6 +88,7 @@ _extra_compile_args = [
|
|||
"-DNDEBUG",
|
||||
"-DTORRENT_USE_OPENSSL=1",
|
||||
"-O2",
|
||||
"-DTORRENT_UPNP_LOGGING",
|
||||
]
|
||||
|
||||
if windows_check():
|
||||
|
@ -169,15 +170,15 @@ else:
|
|||
'ssl',
|
||||
'z'
|
||||
]
|
||||
|
||||
|
||||
if not windows_check():
|
||||
dynamic_lib_extension = ".so"
|
||||
if osx_check():
|
||||
dynamic_lib_extension = ".dylib"
|
||||
|
||||
_lib_extensions = ['-mt-1_36', '-mt-1_35', '-mt']
|
||||
|
||||
# Modify the libs if necessary for systems with only -mt boost libs
|
||||
|
||||
# Modify the libs if necessary for systems with only -mt boost libs
|
||||
for lib in _libraries:
|
||||
if lib[:6] == "boost_":
|
||||
for lib_prefix in _library_dirs:
|
||||
|
@ -187,7 +188,7 @@ else:
|
|||
_libraries[_libraries.index(lib)] = lib + lib_suffix
|
||||
lib = lib + lib_suffix
|
||||
break
|
||||
|
||||
|
||||
_sources = glob.glob("./libtorrent/src/*.cpp") + \
|
||||
glob.glob("./libtorrent/src/*.c") + \
|
||||
glob.glob("./libtorrent/src/kademlia/*.cpp") + \
|
||||
|
@ -213,7 +214,7 @@ if windows_check() or not os.path.exists(os.path.join(sysconfig.get_config_var("
|
|||
library_dirs = _library_dirs,
|
||||
sources = _sources
|
||||
)
|
||||
|
||||
|
||||
_ext_modules = [libtorrent]
|
||||
|
||||
class build_trans(cmd.Command):
|
||||
|
|
Loading…
Reference in New Issue