123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117 |
- #
- # This file is part of stov, written by Helmut Pozimski 2012-2014.
- #
- # stov is free software: you can redistribute it and/or modify
- # it under the terms of the GNU General Public License as published by
- # the Free Software Foundation, version 2 of the License.
- #
- # stov is distributed in the hope that it will be useful,
- # but WITHOUT ANY WARRANTY; without even the implied warranty of
- # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- # GNU General Public License for more details.
- #
- # You should have received a copy of the GNU General Public License
- # along with stov. If not, see <http://www.gnu.org/licenses/>.
- # -*- coding: utf8 -*-
- import subprocess
- import sys
- import lxml.html
- if sys.version_info >= (3,):
- import urllib.request as urllib2
- else:
- import urllib2
- from lib_stov import youtubeAPI
- from lib_stov import stov_exceptions
- class Connector(object):
- """This class will retrieve all the necessary data from youtube using
- youtube-dl, thus bypassing the API.
- """
- def __init__(self, type, name, conf, search = ""):
- """Populates the object with all necessary data."""
- self._type = type
- self._name = name
- self._search = search
- self._conf = conf
- self._title = ""
- self._url = ""
- self._construct_url()
- def _construct_url(self):
- if self._type == "channel":
- self._url = "https://www.youtube.com/user/%s" \
- % urllib2.quote(self._name)
- elif self._type == "search":
- self._url = "https://www.youtube.com/results?search_query=%s"\
- % urllib2.quote(self._search)
- elif self._type == "playlist":
- self._url = "https://www.youtube.com/playlist?list=%s" \
- % urllib2.quote(self._search)
- def _fetch_title(self):
- """Retrieves the title of the HTML page to use as a title for the
- subscription."""
- data = urllib2.urlopen(self._url)
- parsed_html = lxml.html.parse(data)
- i = 0
- for item in parsed_html.iter("title"):
- if i == 0:
- self._title = item.text_content().strip().replace("\n", "")
- i += 1
- def _fetch_videos(self):
- """Retrieves all the relevant videos in a subscription."""
- videos_list = []
- if self._type == "channel" and self._search != "":
- try:
- video_ids = subprocess.check_output([self._conf.values["youtube-dl"], "--max-downloads",
- self._conf.values["maxvideos"],
- "--match-title",
- self._search,
- "--get-id",
- self._url])
- except subprocess.CalledProcessError as e:
- video_ids = e.output.strip()
- else:
- try:
- video_ids = subprocess.check_output([self._conf.values["youtube-dl"], "--max-downloads",
- self._conf.values["maxvideos"],
- "--get-id",
- self._url])
- except subprocess.CalledProcessError as e:
- video_ids = e.output.strip()
- for video_id in video_ids.split("\n"):
- try:
- video_title = subprocess.check_output([
- self._conf.values["youtube-dl"], "--get-title",
- "https://www.youtube.com/watch?v=%s"
- % video_id]).strip()
- video_description = subprocess.check_output([
- self._conf.values["youtube-dl"], "--get-description",
- "https://www.youtube.com/watch?v=%s"
- % video_id]).strip()
- except subprocess.CalledProcessError:
- raise stov_exceptions.YoutubeDlCallFailed()
- else:
- videos_list.append(youtubeAPI.YtVideo(video_title,
- video_description,
- video_id))
- return videos_list
- def ParseAPIData(self):
- """This method calls all necessary methods to retrieve the data
- and assembles them into a Channel object. The naming of this
- method is set according to the method in youtubeAPI to be
- compatible.
- """
- self._fetch_title()
- videos = self._fetch_videos()
- channel = youtubeAPI.YtChannel()
- channel.title = self._title
- channel.videos = videos
- return channel
|