mirror of
https://github.com/codex-storage/deluge.git
synced 2025-02-02 14:44:21 +00:00
much work...
This commit is contained in:
parent
dc86045220
commit
c88442abe4
313
library/pytorrent.py
Normal file
313
library/pytorrent.py
Normal file
@ -0,0 +1,313 @@
|
|||||||
|
#
|
||||||
|
# Copyright (C) 2006 Zach Tibbitts <zach@collegegeek.org>
|
||||||
|
# Copyright (C) 2006 Alon Zakai ('Kripken') <kripkensteiner@gmail.com>
|
||||||
|
#
|
||||||
|
# This program is free software; you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation; either version 2, or (at your option)
|
||||||
|
# any later version.
|
||||||
|
#
|
||||||
|
# This program is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this program; if not, write to the Free Software
|
||||||
|
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
||||||
|
#
|
||||||
|
|
||||||
|
# pytorrent-manager: backend/non-gui routines, that are not part of the core
|
||||||
|
# pytorrent module. pytorrent itself is mainly an interface to libtorrent,
|
||||||
|
# with some arrangements of exception classes for Python, etc.; also, some
|
||||||
|
# additional code that fits in well at the C++ level of libtorrent. All other
|
||||||
|
# backend routines should be in pytorrent-manager.
|
||||||
|
|
||||||
|
|
||||||
|
import pytorrent_core
|
||||||
|
import os, shutil
|
||||||
|
import pickle
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
# Information for a single torrent
|
||||||
|
|
||||||
|
class torrent:
|
||||||
|
def __init__(self, filename, save_dir, compact):
|
||||||
|
self.filename = filename
|
||||||
|
self.save_dir = save_dir
|
||||||
|
self.compact = compact
|
||||||
|
|
||||||
|
self.user_paused = False # start out unpaused
|
||||||
|
self.uploaded_memory = 0
|
||||||
|
|
||||||
|
|
||||||
|
## SAve pause state in here, or in libtorrent...?
|
||||||
|
|
||||||
|
|
||||||
|
self.filter_out = []
|
||||||
|
|
||||||
|
self.delete_me = False # set this to true, to delete it on next sync
|
||||||
|
|
||||||
|
|
||||||
|
# The persistent state of the torrent system. Everything in this will be pickled
|
||||||
|
|
||||||
|
class persistent_state:
|
||||||
|
def __init__(self):
|
||||||
|
# Basic preferences (use a dictionary, so adding new ones is easy
|
||||||
|
self.preferences = {}
|
||||||
|
|
||||||
|
self.max_half_open = -1
|
||||||
|
self.download_rate_limit = -1
|
||||||
|
self.upload_rate_limit = -1
|
||||||
|
self.listen_on = [6881,9999]
|
||||||
|
self.max_uploads = -1 # Per torrent, read the libtorrent docs
|
||||||
|
self.max_connections = 80
|
||||||
|
|
||||||
|
self.use_DHT = True
|
||||||
|
self.base_dir = "~/Temp"
|
||||||
|
self.torrents_subdir + "torrentfiles"
|
||||||
|
self.max_active_torrents = 1
|
||||||
|
self.auto_seed_ratio = -1
|
||||||
|
|
||||||
|
# Prepare queue (queue is pickled, just like everything else)
|
||||||
|
self.queue = [] # queue[x] is the unique_ID of the x-th queue position. Simple.
|
||||||
|
|
||||||
|
# Torrents
|
||||||
|
self.torrents = []
|
||||||
|
|
||||||
|
|
||||||
|
# The manager for the torrent system
|
||||||
|
|
||||||
|
class manager:
|
||||||
|
def __init__(self, client_ID, version, user_agent, state_filename):
|
||||||
|
self.state_filename = state_filename
|
||||||
|
|
||||||
|
# Ensure directories exist
|
||||||
|
if not self.torrents_subdir in os.listdir(self.base_dir):
|
||||||
|
os.mkdir(self.base_dir + "/" + self.torrents_subdir)
|
||||||
|
|
||||||
|
# Start up the core
|
||||||
|
assert(len(version) == 4)
|
||||||
|
pytorrent_core.init(client_ID,
|
||||||
|
int(version[0]),
|
||||||
|
int(version[1]),
|
||||||
|
int(version[2]),
|
||||||
|
int(version[3]),
|
||||||
|
user_agent)
|
||||||
|
|
||||||
|
# Unique IDs are NOT in the state, since they are temporary for each session
|
||||||
|
self.unique_IDs = {} # unique_ID -> a torrent object
|
||||||
|
|
||||||
|
# Unpickle the state, or create a new one
|
||||||
|
try:
|
||||||
|
pkl_file = open(state_filename, 'rb')
|
||||||
|
self.state = pickle.load(pkl_file)
|
||||||
|
pkl_file.close()
|
||||||
|
|
||||||
|
# Sync with the core: tell core about torrents, and get unique_IDs
|
||||||
|
self.sync()
|
||||||
|
except IOError:
|
||||||
|
self.state = persistent_state()
|
||||||
|
|
||||||
|
def quit(self):
|
||||||
|
# Pickle the state
|
||||||
|
output = open(self.state_filename, 'wb')
|
||||||
|
pickle.dump(self.state, output)
|
||||||
|
output.close()
|
||||||
|
|
||||||
|
# Save fastresume data
|
||||||
|
self.save_fastresume_data()
|
||||||
|
|
||||||
|
# Shutdown torrent core
|
||||||
|
pytorrent.quit()
|
||||||
|
|
||||||
|
def add_torrent(self, filename, save_dir, compact):
|
||||||
|
print "add_torrent"
|
||||||
|
self.add_torrent_ns(filename, save_dir, compact)
|
||||||
|
return self.sync() # Syncing will create a new torrent in the core, and return it's ID
|
||||||
|
|
||||||
|
def remove_torrent(self, unique_ID, data_also):
|
||||||
|
# Save some data before we remove the torrent, needed later in this func
|
||||||
|
temp = self.unique_IDs[unique_ID]
|
||||||
|
temp_fileinfo = pytorrent_core.get_fileinfo(unique_ID)
|
||||||
|
|
||||||
|
self.remove_torrent_ns(unique_ID)
|
||||||
|
self.sync()
|
||||||
|
|
||||||
|
# Remove .torrent and .fastresume
|
||||||
|
os.remove(temp.filename)
|
||||||
|
try:
|
||||||
|
# Must be after removal of the torrent, because that saves a new .fastresume
|
||||||
|
os.remove(temp.filename + ".fastresume")
|
||||||
|
except OSError:
|
||||||
|
pass # Perhaps there never was one to begin with
|
||||||
|
|
||||||
|
# Remove data, if asked to do so
|
||||||
|
if data_also:
|
||||||
|
# Must be done AFTER the torrent is removed
|
||||||
|
# Note: can this be to the trash?
|
||||||
|
for filedata in temp_fileinfo:
|
||||||
|
filename = filedata['path']
|
||||||
|
try:
|
||||||
|
os.remove(temp.save_dir + "/" + filename)
|
||||||
|
except OSError:
|
||||||
|
pass # No file just means it wasn't downloaded, we can continue
|
||||||
|
|
||||||
|
# A separate function, because people may want to call it from time to time
|
||||||
|
def save_fastresume_data(self):
|
||||||
|
for unique_ID in self.unique_IDs:
|
||||||
|
pytorrent_core.save_fastresume(unique_ID, self.unique_IDs[unique_ID].filename)
|
||||||
|
|
||||||
|
def get_state(self, unique_ID):
|
||||||
|
return pytorrent_core.get_state(unique_ID)
|
||||||
|
|
||||||
|
def queue_up(self, unique_ID):
|
||||||
|
curr_index = self.get_queue_index(unique_ID)
|
||||||
|
if curr_index > 0:
|
||||||
|
temp = self.state.queue[curr_index - 1]
|
||||||
|
self.state.queue[curr_index - 1] = unique_ID
|
||||||
|
self.state.queue[curr_index] = temp
|
||||||
|
|
||||||
|
def queue_down(self, unique_ID):
|
||||||
|
curr_index = self.get_queue_index(unique_ID)
|
||||||
|
if curr_index < (len(self.state.queue) - 1):
|
||||||
|
temp = self.state.queue[curr_index + 1]
|
||||||
|
self.state.queue[curr_index + 1] = unique_ID
|
||||||
|
self.state.queue[curr_index] = temp
|
||||||
|
|
||||||
|
def queue_bottom(self, unique_ID):
|
||||||
|
curr_index = self.get_queue_index(unique_ID)
|
||||||
|
if curr_index < (len(self.state.queue) - 1):
|
||||||
|
self.state.queue.remove(curr_index)
|
||||||
|
self.state.queue.append(unique_ID)
|
||||||
|
|
||||||
|
def clear_completed(self):
|
||||||
|
for unique_ID in self.unique_IDs:
|
||||||
|
torrent_state = pytorrent_core.get_state(unique_ID)
|
||||||
|
if torrent_state['progress'] == 100.0:
|
||||||
|
self.remove_torrent_ns(unique_ID)
|
||||||
|
|
||||||
|
self.sync()
|
||||||
|
|
||||||
|
def set_user_pause(self, unique_ID, new_value):
|
||||||
|
self.unique_IDs[unique_ID].user_paused = new_value
|
||||||
|
self.apply_queue()
|
||||||
|
|
||||||
|
def is_user_paused(self, unique_ID):
|
||||||
|
return self.unique_IDs[unique_ID].user_paused
|
||||||
|
|
||||||
|
def is_paused(self, unique_ID):
|
||||||
|
return pytorrent_core.is_paused(unique_ID])
|
||||||
|
|
||||||
|
# Enforce the queue: pause/unpause as needed, based on queue and user_pausing
|
||||||
|
# This should be called after changes to relevant parameters (user_pausing, or
|
||||||
|
# altering max_active_torrents), or just from time to time
|
||||||
|
# ___ALL queuing code should be in this function, and ONLY here___
|
||||||
|
def apply_queue(self):
|
||||||
|
# Handle autoseeding - downqueue as needed
|
||||||
|
|
||||||
|
if self.auto_seed_ratio != -1:
|
||||||
|
for unique_ID in self.unique_IDs:
|
||||||
|
if pytorrent_core.is_seeding(unique_ID):
|
||||||
|
torrent_state = pytorrent_core.get_state(unique_ID)
|
||||||
|
ratio = self.calc_ratio(unique_ID, torrent_state)
|
||||||
|
if ratio >= self.auto_seed_ratio:
|
||||||
|
self.queue_bottom(unique_ID)
|
||||||
|
|
||||||
|
# Pause and resume torrents
|
||||||
|
for index in range(len(self.state.queue)):
|
||||||
|
unique_ID = self.state.queue[index]
|
||||||
|
if (index < self.state.max_active_torrents or self.state_max_active_torrents == -1)
|
||||||
|
and self.is_paused(unique_ID)
|
||||||
|
and not self.is_user_paused(unique_ID):
|
||||||
|
pytorrent_core.resume(unique_ID)
|
||||||
|
elif not self.is_paused(unique_ID) and
|
||||||
|
(index >= self.state.max_active_torrents or self.is_user_paused(unique_ID)):
|
||||||
|
pytorrent_core.pause(unique_ID)
|
||||||
|
|
||||||
|
def calc_ratio(self, unique_ID, torrent_state):
|
||||||
|
up = float(torrent_state['total_upload'] + self.unique_IDs[unique_ID].uploaded_memory
|
||||||
|
down = float(torrent_state["total_done"])
|
||||||
|
|
||||||
|
try:
|
||||||
|
ret = float(up/down)
|
||||||
|
except:
|
||||||
|
ret = -1
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Internal functions
|
||||||
|
####################
|
||||||
|
|
||||||
|
# Non-syncing functions. Used when we loop over such events, and sync manually at the end
|
||||||
|
|
||||||
|
def add_torrent_ns(self, filename, save_dir, compact):
|
||||||
|
# Cache torrent file
|
||||||
|
(temp, torrent_file_short) = os.path.split(filename)
|
||||||
|
|
||||||
|
time.sleep(0.01) # Ensure we use a unique time for the new filename
|
||||||
|
new_name = str(time.time()) + ".torrent"
|
||||||
|
full_new_name = self.state.base_dir + "/" + self.torrents_subdir + newName
|
||||||
|
|
||||||
|
if new_name in os.listdir(self.state.base_dir + "/" + self.torrents_subdir):
|
||||||
|
raise PyTorrentCoreError("Could not cache torrent file locally, failed: " + new_name)
|
||||||
|
|
||||||
|
shutil.copy(filename, full_new_name)
|
||||||
|
|
||||||
|
# Create torrent object
|
||||||
|
new_torrent = torrent(full_new_name, save_dir, compact)
|
||||||
|
self.state.torrents.append(new_torrent)
|
||||||
|
|
||||||
|
def remove_torrent_ns(self, unique_ID):
|
||||||
|
self.unique_IDs[unique_ID].delete_me = True
|
||||||
|
|
||||||
|
# Sync the state.torrents and unique_IDs lists with the core
|
||||||
|
# ___ALL syncing code with the core is here, and ONLY here___
|
||||||
|
# Also all self-syncing is done here (various lists)
|
||||||
|
|
||||||
|
def sync(self):
|
||||||
|
print "sync"
|
||||||
|
|
||||||
|
ret = None # We return new added unique ID(s), or None
|
||||||
|
|
||||||
|
# Add torrents to core and unique_IDs
|
||||||
|
torrents_with_unique_ID = self.unique_IDs.values()
|
||||||
|
|
||||||
|
for torrent in self.state.torrents:
|
||||||
|
if torrent not in torrents_with_unique_ID:
|
||||||
|
print "Adding torrent to core:", torrent.filename, torrent.save_dir, torrent.compact
|
||||||
|
unique_ID = pytorrent_core.add_torrent(torrent.filename,
|
||||||
|
torrent.save_dir,
|
||||||
|
torrent.compact)
|
||||||
|
print "Got unique ID:", unique_ID
|
||||||
|
ret = unique_ID
|
||||||
|
self.unique_IDs[unique_ID] = torrent
|
||||||
|
|
||||||
|
# Remove torrents from core, unique_IDs and queue
|
||||||
|
to_delete = []
|
||||||
|
for torrent in self.state.torrents:
|
||||||
|
if torrent.delete_me:
|
||||||
|
pytorrent_core.remove_torrent(torrent.unique_ID, torrent.filename)
|
||||||
|
to_delete.append(torrent.unique_ID)
|
||||||
|
|
||||||
|
for unique_ID in to_delete:
|
||||||
|
self.state.torrents.remove(self.unique_IDs[unique_ID])
|
||||||
|
self.state.queue.remove(self.unique_IDs[unique_ID])
|
||||||
|
del self.unique_IDs[unique_ID]
|
||||||
|
|
||||||
|
# Add torrents to queue - at the end, of course
|
||||||
|
for unique_ID in self.unique_IDs:
|
||||||
|
if unique_ID not in self.state.queue:
|
||||||
|
self.state.queue.append(unique_ID)
|
||||||
|
|
||||||
|
assert(len(self.unique_IDs) == len(self.state.torrents))
|
||||||
|
assert(len(self.unique_IDs) == len(self.state.queue))
|
||||||
|
assert(len(self.unique_IDs) == pytorrent_core.get_num_torrents())
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def get_queue_index(self, unique_ID):
|
||||||
|
return self.state.queue.index(unique_ID)
|
@ -213,29 +213,12 @@ long internal_add_torrent(std::string const& torrent_name,
|
|||||||
return (new_torrent.unique_ID);
|
return (new_torrent.unique_ID);
|
||||||
}
|
}
|
||||||
|
|
||||||
void internal_remove_torrent(long index, std::string const& torrent_name)
|
void internal_remove_torrent(long index)
|
||||||
{
|
{
|
||||||
assert(index < M_torrents->size());
|
assert(index < M_torrents->size());
|
||||||
|
|
||||||
torrent_handle& h = M_torrents->at(index).handle;
|
torrent_handle& h = M_torrents->at(index).handle;
|
||||||
|
|
||||||
// For valid torrents, save fastresume data
|
|
||||||
if (h.is_valid() && h.has_metadata())
|
|
||||||
{
|
|
||||||
h.pause();
|
|
||||||
|
|
||||||
entry data = h.write_resume_data();
|
|
||||||
|
|
||||||
std::stringstream s;
|
|
||||||
s << torrent_name << ".fastresume";
|
|
||||||
|
|
||||||
boost::filesystem::ofstream out(s.str(), std::ios_base::binary);
|
|
||||||
|
|
||||||
out.unsetf(std::ios_base::skipws);
|
|
||||||
|
|
||||||
bencode(std::ostream_iterator<char>(out), data);
|
|
||||||
}
|
|
||||||
|
|
||||||
M_ses->remove_torrent(h);
|
M_ses->remove_torrent(h);
|
||||||
|
|
||||||
torrents_t_iterator it = M_torrents->begin() + index;
|
torrents_t_iterator it = M_torrents->begin() + index;
|
||||||
@ -358,6 +341,37 @@ static PyObject *torrent_quit(PyObject *self, PyObject *args)
|
|||||||
Py_INCREF(Py_None); return Py_None;
|
Py_INCREF(Py_None); return Py_None;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static PyObject *torrent_save_fastresume(PyObject *self, PyObject *args)
|
||||||
|
{
|
||||||
|
python_long unique_ID;
|
||||||
|
const char *torrent_name;
|
||||||
|
if (!PyArg_ParseTuple(args, "is", &unique_ID, &torrent_name))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
long index = get_index_from_unique_ID(unique_ID);
|
||||||
|
if (PyErr_Occurred())
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
torrent_handle& h = M_torrents->at(index).handle;
|
||||||
|
// For valid torrents, save fastresume data
|
||||||
|
if (h.is_valid() && h.has_metadata())
|
||||||
|
{
|
||||||
|
h.pause();
|
||||||
|
|
||||||
|
entry data = h.write_resume_data();
|
||||||
|
|
||||||
|
std::stringstream s;
|
||||||
|
s << torrent_name << ".fastresume";
|
||||||
|
|
||||||
|
boost::filesystem::ofstream out(s.str(), std::ios_base::binary);
|
||||||
|
|
||||||
|
out.unsetf(std::ios_base::skipws);
|
||||||
|
|
||||||
|
bencode(std::ostream_iterator<char>(out), data);
|
||||||
|
} else
|
||||||
|
PYTORRENTCORE_RAISE_PTR(PyTorrentCoreError, "Invalid handle or no metadata for fastresume.");
|
||||||
|
}
|
||||||
|
|
||||||
static PyObject *torrent_set_max_half_open(PyObject *self, PyObject *args)
|
static PyObject *torrent_set_max_half_open(PyObject *self, PyObject *args)
|
||||||
{
|
{
|
||||||
python_long arg;
|
python_long arg;
|
||||||
@ -469,15 +483,14 @@ static PyObject *torrent_add_torrent(PyObject *self, PyObject *args)
|
|||||||
static PyObject *torrent_remove_torrent(PyObject *self, PyObject *args)
|
static PyObject *torrent_remove_torrent(PyObject *self, PyObject *args)
|
||||||
{
|
{
|
||||||
python_long unique_ID;
|
python_long unique_ID;
|
||||||
const char *name;
|
if (!PyArg_ParseTuple(args, "i", &unique_ID))
|
||||||
if (!PyArg_ParseTuple(args, "is", &unique_ID, &name))
|
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
long index = get_index_from_unique_ID(unique_ID);
|
long index = get_index_from_unique_ID(unique_ID);
|
||||||
if (PyErr_Occurred())
|
if (PyErr_Occurred())
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
internal_remove_torrent(index, name);
|
internal_remove_torrent(index);
|
||||||
|
|
||||||
Py_INCREF(Py_None); return Py_None;
|
Py_INCREF(Py_None); return Py_None;
|
||||||
}
|
}
|
||||||
@ -502,6 +515,32 @@ static PyObject *torrent_reannounce(PyObject *self, PyObject *args)
|
|||||||
Py_INCREF(Py_None); return Py_None;
|
Py_INCREF(Py_None); return Py_None;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static PyObject *torrent_is_seeding(PyObject *self, PyObject *args)
|
||||||
|
{
|
||||||
|
python_long unique_ID;
|
||||||
|
if (!PyArg_ParseTuple(args, "i", &unique_ID))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
long index = get_index_from_unique_ID(unique_ID);
|
||||||
|
if (PyErr_Occurred())
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return Py_BuildValue("i", M_torrents->at(index).handle.is_seed());
|
||||||
|
}
|
||||||
|
|
||||||
|
static PyObject *torrent_is_paused(PyObject *self, PyObject *args)
|
||||||
|
{
|
||||||
|
python_long unique_ID;
|
||||||
|
if (!PyArg_ParseTuple(args, "i", &unique_ID))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
long index = get_index_from_unique_ID(unique_ID);
|
||||||
|
if (PyErr_Occurred())
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
return Py_BuildValue("i", M_torrents->at(index).handle.is_paused());
|
||||||
|
}
|
||||||
|
|
||||||
static PyObject *torrent_pause(PyObject *self, PyObject *args)
|
static PyObject *torrent_pause(PyObject *self, PyObject *args)
|
||||||
{
|
{
|
||||||
python_long unique_ID;
|
python_long unique_ID;
|
||||||
@ -532,7 +571,7 @@ static PyObject *torrent_resume(PyObject *self, PyObject *args)
|
|||||||
Py_INCREF(Py_None); return Py_None;
|
Py_INCREF(Py_None); return Py_None;
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *torrent_get_name(PyObject *self, PyObject *args)
|
static PyObject *torrent_get_torrent_info(PyObject *self, PyObject *args)
|
||||||
{
|
{
|
||||||
python_long unique_ID;
|
python_long unique_ID;
|
||||||
if (!PyArg_ParseTuple(args, "i", &unique_ID))
|
if (!PyArg_ParseTuple(args, "i", &unique_ID))
|
||||||
@ -542,7 +581,12 @@ static PyObject *torrent_get_name(PyObject *self, PyObject *args)
|
|||||||
if (PyErr_Occurred())
|
if (PyErr_Occurred())
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
return Py_BuildValue("s", M_torrents->at(index).handle.get_torrent_info().name().c_str());
|
torrent_t &t = M_torrents->at(index);
|
||||||
|
|
||||||
|
return Py_BuildValue("{s:s,s:l}",
|
||||||
|
"name", t.handle.get_torrent_info().name().c_str(),
|
||||||
|
"num_files", t.handle.get_torrent_info().num_files()
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
static PyObject *torrent_get_state(PyObject *self, PyObject *args)
|
static PyObject *torrent_get_state(PyObject *self, PyObject *args)
|
||||||
@ -843,6 +887,99 @@ static PyObject *torrent_get_peer_info(PyObject *self, PyObject *args)
|
|||||||
return ret;
|
return ret;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static PyObject *torrent_get_file_info(PyObject *self, PyObject *args)
|
||||||
|
{
|
||||||
|
python_long unique_ID;
|
||||||
|
if (!PyArg_ParseTuple(args, "i", &unique_ID))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
long index = get_index_from_unique_ID(unique_ID);
|
||||||
|
if (PyErr_Occurred())
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
std::vector<PyObject *> temp_files;
|
||||||
|
|
||||||
|
PyObject *file_info;
|
||||||
|
|
||||||
|
std::vector<float> progresses;
|
||||||
|
|
||||||
|
torrent_t &t = M_torrents->at(index);
|
||||||
|
t.handle.file_progress(progresses);
|
||||||
|
|
||||||
|
torrent_info::file_iterator start =
|
||||||
|
t.handle.get_torrent_info().begin_files();
|
||||||
|
torrent_info::file_iterator end =
|
||||||
|
t.handle.get_torrent_info().end_files();
|
||||||
|
|
||||||
|
long fileIndex = 0;
|
||||||
|
|
||||||
|
for(torrent_info::file_iterator i = start; i != end; ++i)
|
||||||
|
{
|
||||||
|
file_entry const &currFile = (*i);
|
||||||
|
|
||||||
|
file_info = Py_BuildValue(
|
||||||
|
"{s:s,s:d,s:d,s:f}",
|
||||||
|
"path", currFile.path.string().c_str(),
|
||||||
|
"offset", double(currFile.offset),
|
||||||
|
"size", double(currFile.size),
|
||||||
|
"progress", progresses[i - start]*100.0
|
||||||
|
);
|
||||||
|
|
||||||
|
fileIndex++;
|
||||||
|
|
||||||
|
temp_files.push_back(file_info);
|
||||||
|
};
|
||||||
|
|
||||||
|
PyObject *ret = PyTuple_New(temp_files.size());
|
||||||
|
|
||||||
|
for (unsigned long i = 0; i < temp_files.size(); i++)
|
||||||
|
PyTuple_SetItem(ret, i, temp_files[i]);
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
};
|
||||||
|
|
||||||
|
static PyObject *torrent_set_filter_out(PyObject *self, PyObject *args)
|
||||||
|
{
|
||||||
|
python_long unique_ID;
|
||||||
|
PyObject *filter_out_object;
|
||||||
|
if (!PyArg_ParseTuple(args, "iO", &unique_ID, &filter_out_object))
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
long index = get_index_from_unique_ID(unique_ID);
|
||||||
|
if (PyErr_Occurred())
|
||||||
|
return NULL;
|
||||||
|
|
||||||
|
torrent_t &t = M_torrents->at(index);
|
||||||
|
long num_files = t.handle.get_torrent_info().num_files();
|
||||||
|
assert(PyList_Size(filter_out_object) == num_files);
|
||||||
|
|
||||||
|
filter_out_t filter_out(num_files);
|
||||||
|
|
||||||
|
for (long i = 0; i < num_files; i++)
|
||||||
|
{
|
||||||
|
filter_out.at(i) =
|
||||||
|
PyInt_AsLong(PyList_GetItem(filter_out_object, i));
|
||||||
|
};
|
||||||
|
|
||||||
|
t.handle.filter_files(filter_out);
|
||||||
|
|
||||||
|
Py_INCREF(Py_None); return Py_None;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*static PyObject *torrent_get_unique_IDs(PyObject *self, PyObject *args)
|
||||||
|
{
|
||||||
|
PyObject *ret = PyTuple_New(M_torrents.size());
|
||||||
|
PyObject *temp;
|
||||||
|
|
||||||
|
for (unsigned long i = 0; i < M_torrents.size(); i++)
|
||||||
|
{
|
||||||
|
temp = Py_BuildValue("i", M_torrents->at(i).unique_ID)
|
||||||
|
|
||||||
|
PyTuple_SetItem(ret, i, temp);
|
||||||
|
};
|
||||||
|
|
||||||
|
return ret;
|
||||||
|
};*/
|
||||||
|
|
||||||
|
|
||||||
static PyObject *torrent_constants(PyObject *self, PyObject *args)
|
static PyObject *torrent_constants(PyObject *self, PyObject *args)
|
||||||
@ -1054,6 +1191,7 @@ static PyObject *torrent_apply_IP_filter(PyObject *self, PyObject *args)
|
|||||||
static PyMethodDef pytorrent_core_methods[] = {
|
static PyMethodDef pytorrent_core_methods[] = {
|
||||||
{"init", torrent_init, METH_VARARGS, "."},
|
{"init", torrent_init, METH_VARARGS, "."},
|
||||||
{"quit", torrent_quit, METH_VARARGS, "."},
|
{"quit", torrent_quit, METH_VARARGS, "."},
|
||||||
|
{"save_fastresume", torrent_save_fastresume, METH_VARARGS, "."},
|
||||||
{"set_max_half_open", torrent_set_max_half_open, METH_VARARGS, "."},
|
{"set_max_half_open", torrent_set_max_half_open, METH_VARARGS, "."},
|
||||||
{"set_download_rate_limit", torrent_set_download_rate_limit, METH_VARARGS, "."},
|
{"set_download_rate_limit", torrent_set_download_rate_limit, METH_VARARGS, "."},
|
||||||
{"set_upload_rate_limit", torrent_set_upload_rate_limit, METH_VARARGS, "."},
|
{"set_upload_rate_limit", torrent_set_upload_rate_limit, METH_VARARGS, "."},
|
||||||
@ -1066,15 +1204,17 @@ static PyMethodDef pytorrent_core_methods[] = {
|
|||||||
{"remove_torrent", torrent_remove_torrent, METH_VARARGS, "."},
|
{"remove_torrent", torrent_remove_torrent, METH_VARARGS, "."},
|
||||||
{"get_num_torrents", torrent_get_num_torrents, METH_VARARGS, "."},
|
{"get_num_torrents", torrent_get_num_torrents, METH_VARARGS, "."},
|
||||||
{"reannounce", torrent_reannounce, METH_VARARGS, "."},
|
{"reannounce", torrent_reannounce, METH_VARARGS, "."},
|
||||||
|
{"is_paused", torrent_is_paused, METH_VARARGS, "."},
|
||||||
{"pause", torrent_pause, METH_VARARGS, "."},
|
{"pause", torrent_pause, METH_VARARGS, "."},
|
||||||
{"resume", torrent_resume, METH_VARARGS, "."},
|
{"resume", torrent_resume, METH_VARARGS, "."},
|
||||||
{"get_name", torrent_get_name, METH_VARARGS, "."},
|
{"get_torrent_info", torrent_get_torrent_info, METH_VARARGS, "."},
|
||||||
{"get_state", torrent_get_state, METH_VARARGS, "."},
|
{"get_state", torrent_get_state, METH_VARARGS, "."},
|
||||||
{"pop_event", torrent_pop_event, METH_VARARGS, "."},
|
{"pop_event", torrent_pop_event, METH_VARARGS, "."},
|
||||||
{"get_session_info", torrent_get_session_info, METH_VARARGS, "."},
|
{"get_session_info", torrent_get_session_info, METH_VARARGS, "."},
|
||||||
{"get_peer_info", torrent_get_peer_info, METH_VARARGS, "."},
|
{"get_peer_info", torrent_get_peer_info, METH_VARARGS, "."},
|
||||||
// {"get_file_info", torrent_get_file_info, METH_VARARGS, "."},
|
/* {"get_unique_IDs", torrent_get_unique_IDs, METH_VARARGS, "."},*/
|
||||||
// {"set_filter_out", torrent_set_filter_out, METH_VARARGS, "."},
|
{"get_file_info", torrent_get_file_info, METH_VARARGS, "."},
|
||||||
|
{"set_filter_out", torrent_set_filter_out, METH_VARARGS, "."},
|
||||||
{"constants", torrent_constants, METH_VARARGS, "."},
|
{"constants", torrent_constants, METH_VARARGS, "."},
|
||||||
{"start_DHT", torrent_start_DHT, METH_VARARGS, "."},
|
{"start_DHT", torrent_start_DHT, METH_VARARGS, "."},
|
||||||
{"stop_DHT", torrent_stop_DHT, METH_VARARGS, "."},
|
{"stop_DHT", torrent_stop_DHT, METH_VARARGS, "."},
|
||||||
|
@ -1,187 +0,0 @@
|
|||||||
#
|
|
||||||
# Copyright (C) 2006 Zach Tibbitts <zach@collegegeek.org>
|
|
||||||
# Copyright (C) 2006 Alon Zakai ('Kripken') <kripkensteiner@gmail.com>
|
|
||||||
#
|
|
||||||
# This program is free software; you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU General Public License as published by
|
|
||||||
# the Free Software Foundation; either version 2, or (at your option)
|
|
||||||
# any later version.
|
|
||||||
#
|
|
||||||
# This program is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License
|
|
||||||
# along with this program; if not, write to the Free Software
|
|
||||||
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
|
|
||||||
#
|
|
||||||
|
|
||||||
# pytorrent-manager: backend/non-gui routines, that are not part of the core
|
|
||||||
# pytorrent module. pytorrent itself is mainly an interface to libtorrent,
|
|
||||||
# with some arrangements of exception classes for Python, etc.; also, some
|
|
||||||
# additional code that fits in well at the C++ level of libtorrent. All other
|
|
||||||
# backend routines should be in pytorrent-manager.
|
|
||||||
#
|
|
||||||
# Things which pytorrent-manager should do:
|
|
||||||
#
|
|
||||||
# 1. Save/Load torrent states (list of torrents in system, + their states) to file
|
|
||||||
# (AutoSaveTorrents in deluge.py)
|
|
||||||
# 2. Manage basic queuing: how many active downloads, and autopause the rest (this
|
|
||||||
# is currently spread along deluge.py and torrenthandler.py)
|
|
||||||
# 2a.Queue up and queue down, etc., functions (in deluge.py)
|
|
||||||
# 3. Save/Load a preferences file, with all settings (max ports, listen port, use
|
|
||||||
# DHT, etc. etc.)
|
|
||||||
# 4. Manage autoseeding to a certain share % (currently in torrenthandler.py)
|
|
||||||
# 5. Handle caching of .torrent files and so forth (currently in deluge.py)
|
|
||||||
# 6. A 'clear completed' function, that works on the BACKEND data, unlike the
|
|
||||||
# current implementation which works on the frontend (in torrenthander.py)
|
|
||||||
# 7. Various statistics-reporting functions - # of active torrents, etc. etc.
|
|
||||||
# (getNumActiveTorrents in torrenthandler.py)
|
|
||||||
# 8. Remove torrent's data (in deluge.py)
|
|
||||||
#
|
|
||||||
|
|
||||||
import pytorrent_core
|
|
||||||
import os
|
|
||||||
import pickle
|
|
||||||
|
|
||||||
class torrent:
|
|
||||||
def __init__(self, filename, save_dir, compact):
|
|
||||||
self.filename = filename
|
|
||||||
self.save_dir = save_dir
|
|
||||||
self.compact = compact
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
static PyObject *torrent_get_file_info(PyObject *self, PyObject *args)
|
|
||||||
{
|
|
||||||
python_long unique_ID;
|
|
||||||
if (!PyArg_ParseTuple(args, "i", &unique_ID))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
long index = get_index_from_unique_ID(unique_ID);
|
|
||||||
if (PyErr_Occurred())
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
std::vector<PyObject *> temp_files;
|
|
||||||
|
|
||||||
PyObject *file_info;
|
|
||||||
|
|
||||||
std::vector<float> progresses;
|
|
||||||
|
|
||||||
torrent_t &t = M_torrents->at(index);
|
|
||||||
t.handle.file_progress(progresses);
|
|
||||||
|
|
||||||
torrent_info::file_iterator start =
|
|
||||||
t.handle.get_torrent_info().begin_files();
|
|
||||||
torrent_info::file_iterator end =
|
|
||||||
t.handle.get_torrent_info().end_files();
|
|
||||||
|
|
||||||
long fileIndex = 0;
|
|
||||||
|
|
||||||
filter_out_t &filter_out = t.filter_out;
|
|
||||||
|
|
||||||
for(torrent_info::file_iterator i = start; i != end; ++i)
|
|
||||||
{
|
|
||||||
file_entry const &currFile = (*i);
|
|
||||||
|
|
||||||
file_info = Py_BuildValue(
|
|
||||||
"{s:s,s:d,s:d,s:f,s:i}",
|
|
||||||
"path", currFile.path.string().c_str(),
|
|
||||||
"offset", double(currFile.offset),
|
|
||||||
"size", double(currFile.size),
|
|
||||||
"progress", progresses[i - start]*100.0,
|
|
||||||
"filtered_out", long(filter_out.at(fileIndex))
|
|
||||||
);
|
|
||||||
|
|
||||||
fileIndex++;
|
|
||||||
|
|
||||||
temp_files.push_back(file_info);
|
|
||||||
};
|
|
||||||
|
|
||||||
PyObject *ret = PyTuple_New(temp_files.size());
|
|
||||||
|
|
||||||
for (unsigned long i = 0; i < temp_files.size(); i++)
|
|
||||||
PyTuple_SetItem(ret, i, temp_files[i]);
|
|
||||||
|
|
||||||
return ret;
|
|
||||||
};
|
|
||||||
|
|
||||||
static PyObject *torrent_set_filter_out(PyObject *self, PyObject *args)
|
|
||||||
{
|
|
||||||
python_long unique_ID;
|
|
||||||
PyObject *filter_out_object;
|
|
||||||
if (!PyArg_ParseTuple(args, "iO", &unique_ID, &filter_out_object))
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
long index = get_index_from_unique_ID(unique_ID);
|
|
||||||
if (PyErr_Occurred())
|
|
||||||
return NULL;
|
|
||||||
|
|
||||||
torrent_t &t = M_torrents->at(index);
|
|
||||||
long num_files = t.handle.get_torrent_info().num_files();
|
|
||||||
assert(PyList_Size(filter_out_object) == num_files);
|
|
||||||
|
|
||||||
for (long i = 0; i < num_files; i++)
|
|
||||||
{
|
|
||||||
t.filter_out.at(i) =
|
|
||||||
PyInt_AsLong(PyList_GetItem(filter_out_object, i));
|
|
||||||
};
|
|
||||||
|
|
||||||
t.handle.filter_files(t.filter_out);
|
|
||||||
|
|
||||||
Py_INCREF(Py_None); return Py_None;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// Shut down torrents gracefully
|
|
||||||
for (long i = 0; i < Num; i++)
|
|
||||||
internal_remove_torrent(0);
|
|
||||||
|
|
||||||
|
|
||||||
struct torrent_t {
|
|
||||||
torrent_handle handle;
|
|
||||||
unique_ID_t unique_ID;
|
|
||||||
filter_out_t filter_out;
|
|
||||||
torrent_name_t name;
|
|
||||||
};
|
|
||||||
|
|
||||||
typedef std::vector<torrent_t> torrents_t;
|
|
||||||
typedef torrents_t::iterator torrents_t_iterator;
|
|
||||||
|
|
||||||
class state:
|
|
||||||
def __init__:
|
|
||||||
self.max_connections = 60 # Etc. etc. etc.
|
|
||||||
|
|
||||||
# Prepare queue (queue is pickled, just like everything else)
|
|
||||||
self.queue = [] # queue[x] is the unique_ID of the x-th queue position. Simple.
|
|
||||||
|
|
||||||
# Torrents
|
|
||||||
self.torrents = []
|
|
||||||
self.unique_IDs = {}
|
|
||||||
|
|
||||||
class manager:
|
|
||||||
def __init__(self, state_filename):
|
|
||||||
print "Init"
|
|
||||||
|
|
||||||
self.state_filename = state_filename
|
|
||||||
|
|
||||||
# Unpickle the state
|
|
||||||
try:
|
|
||||||
pkl_file = open(state_filename, 'rb')
|
|
||||||
self.state = pickle.load(pkl_file)
|
|
||||||
pkl_file.close()
|
|
||||||
except IOError:
|
|
||||||
self.state = new state()
|
|
||||||
|
|
||||||
# How does the queue get updated? Use biology
|
|
||||||
|
|
||||||
|
|
||||||
def add_torrent(self, filename, save_dir, compact)
|
|
||||||
unique_ID = pytorrent_core.add_torrent(filename, save_dir, compact)
|
|
||||||
|
|
||||||
def quit(self):
|
|
||||||
# Pickle the state
|
|
||||||
output = open(self.state_filename, 'wb')
|
|
||||||
pickle.dump(self.state, output)
|
|
||||||
output.close()
|
|
@ -9,16 +9,18 @@
|
|||||||
#*/
|
#*/
|
||||||
|
|
||||||
|
|
||||||
import pytorrent_core
|
import pytorrent
|
||||||
from time import sleep
|
from time import sleep
|
||||||
|
|
||||||
pytorrent_core.init("PT", 0, 5, 0, 0, "pytorrent - testing only")
|
manager = pytorrent.manager("PT", "0500", "pytorrent - testing only", "test_state.dat")
|
||||||
|
|
||||||
myTorrent = pytorrent_core.add_torrent("ubuntu.torrent", ".", True)
|
my_torrent = manager.add_torrent("ubuntu.torrent", ".", True)
|
||||||
|
|
||||||
|
print "Unique ID:", my_torrent
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
print "STATE:"
|
print "STATE:"
|
||||||
print pytorrent_core.get_state(myTorrent)
|
print manager.get_state(my_torrent)
|
||||||
print ""
|
print ""
|
||||||
|
|
||||||
sleep(1)
|
sleep(2)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user