aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Lyon <matthewlyon18@gmail.com>2019-12-21 11:06:53 +0000
committerMatt Lyon <matthewlyon18@gmail.com>2019-12-21 11:06:53 +0000
commitc45b6ca3e82a5d10e14c31c4b7d0fdaf66fff933 (patch)
treeb1907ac15c689c9f441d5e3468935a2a330553c5
parent26e7bacfcc1a86b703e92373b37ea7debdde8b34 (diff)
downloadtpb-lite-c45b6ca3e82a5d10e14c31c4b7d0fdaf66fff933.tar.gz
tpb-lite-c45b6ca3e82a5d10e14c31c4b7d0fdaf66fff933.tar.bz2
tpb-lite-c45b6ca3e82a5d10e14c31c4b7d0fdaf66fff933.zip
added better error handling for connection issuesv0.2.4
-rw-r--r--README.md2
-rw-r--r--setup.py2
-rw-r--r--tpblite/models/torrents.py27
-rw-r--r--tpblite/models/utils.py10
4 files changed, 16 insertions, 25 deletions
diff --git a/README.md b/README.md
index d47db7d..4fba4ab 100644
--- a/README.md
+++ b/README.md
@@ -70,7 +70,7 @@ You can see how many `Torrent` objects your query has returned, by using the `le
Example Workflow
==========
-With a commandline torrent client such as [aria2](), you can automate search and downloading of torrents like so:
+With a commandline torrent client such as [aria2](https://aria2.github.io/), you can automate search and downloading of torrents like so:
```python
import subprocess
from tpblite import TPB
diff --git a/setup.py b/setup.py
index 9dce75f..8b3f4d2 100644
--- a/setup.py
+++ b/setup.py
@@ -7,7 +7,7 @@ this_dir = path.abspath(path.dirname(__file__))
with open(path.join(this_dir, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
-version = '0.2.3'
+version = '0.2.4'
setup(name = 'tpblite',
version = version,
diff --git a/tpblite/models/torrents.py b/tpblite/models/torrents.py
index eb007cb..2d8bc6a 100644
--- a/tpblite/models/torrents.py
+++ b/tpblite/models/torrents.py
@@ -32,9 +32,9 @@ class Torrent(object):
def __str__(self):
return '{0}, S: {1}, L: {2}, {3}'.format(self.title,
- self.seeds,
- self.leeches,
- self.filesize)
+ self.seeds,
+ self.leeches,
+ self.filesize)
def __repr__(self):
return '<Torrent object: {}>'.format(self.title)
@@ -91,12 +91,14 @@ class Torrents(object):
@property
def _search_set(self):
- if self.__search_set == None:
+ if self.__search_set is None:
self.__search_set = set(filter(None, re.split(r'[\s.|\(|\)]',self.search_str.lower())))
return self.__search_set
def _createTorrentList(self):
soup = BeautifulSoup(self.html_source, features='html.parser')
+ if soup.body is None:
+ raise ConnectionError('Could not determine torrents (empty html body)')
rows = soup.body.find_all('tr')
torrents = []
for row in rows:
@@ -106,19 +108,6 @@ class Torrents(object):
if self._search_set.issubset(text_set):
torrents.append(Torrent(row))
return torrents
-
- def __getRows(self, soup):
- rows = soup.body.find_all('tr')
- # remove first entry (header)
- if len(rows) > 1:
- del rows[0]
- if len(rows) == 31:
- # last row is bottom of page
- del rows[-1]
- return rows
- else:
- return []
-
def getBestTorrent(self, min_seeds=30, min_filesize='1 GiB', max_filesize='4 GiB'):
'''Filters torrent list based on some constraints, then returns highest seeded torrent
@@ -126,9 +115,9 @@ class Torrents(object):
:param min_filesize (str): minimum filesize in XiB form, eg. GiB
:param max_filesize (str): maximum filesize in XiB form, eg. GiB
:return Torrent Object: Torrent with highest seed number, will return None if all are filtered out'''
- if not type(min_filesize) == 'int':
+ if not isinstance(min_filesize, int):
min_filesize = fileSizeStrToInt(min_filesize)
- if not type(max_filesize) == 'int':
+ if not isinstance(max_filesize, int):
max_filesize = fileSizeStrToInt(max_filesize)
filtered_list = filter(lambda x: self._filterTorrent(x, min_seeds, min_filesize, max_filesize), self.list)
sorted_list = sorted(filtered_list, key=lambda x: x.seeds, reverse=True)
diff --git a/tpblite/models/utils.py b/tpblite/models/utils.py
index 14c002f..1d6b351 100644
--- a/tpblite/models/utils.py
+++ b/tpblite/models/utils.py
@@ -1,5 +1,6 @@
import random
from urllib.request import Request, urlopen
+import urllib.error
from purl import URL as pURL
@@ -12,7 +13,10 @@ class QueryParser(object):
self.base_url = base_url
segments = ('search', query, str(page), str(order), str(category))
self.url = URL(base_url, segments)
- self.html_source = self._sendRequest()
+ try:
+ self.html_source = self._sendRequest()
+ except urllib.error.URLError:
+ raise ConnectionError('Could not establish connection wtih {}'.format(self.base_url))
def _sendRequest(self):
req = Request(self.url, headers=headers())
@@ -50,6 +54,4 @@ USER_AGENTS = (
'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/60.0.3112.113 Safari/537.36',
-)
-
-### ==================== \ No newline at end of file
+) \ No newline at end of file