diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..5d3615c --- /dev/null +++ b/.gitattributes @@ -0,0 +1,8 @@ +.gitignore export-ignore +.gitattributes export-ignore +.github export-ignore +__pycache__ export-ignore +*.psd export-ignore +*.pyo export-ignore +*.pyc export-ignore +*.mo export-ignore \ No newline at end of file diff --git a/.github/workflows/addon-submitter.yml b/.github/workflows/addon-submitter.yml new file mode 100644 index 0000000..1103ce0 --- /dev/null +++ b/.github/workflows/addon-submitter.yml @@ -0,0 +1,70 @@ +name: Kodi Addon-Submitter + +on: + create: + tags: + - v* + +jobs: + kodi-addon-submitter: + runs-on: ubuntu-latest + name: Kodi Addon Submitter + steps: + + - name: Checkout + uses: actions/checkout@v1 + + - name: Extract kodi official repository target + id: extract_branch_pr + shell: bash + env: + ADDON_ID: ${{ github.event.repository.name }} + run: | + echo "##[set-output name=branch;]matrix" + if [[ $ADDON_ID == plugin* ]]; then + echo "##[set-output name=repo;]repo-plugins" + else + echo "##[set-output name=repo;]repo-scripts" + fi + + - name: Kodi addon checker validation + id: kodi-addon-checker + uses: xbmc/action-kodi-addon-checker@v1.2 + with: + kodi-version: ${{ steps.extract_branch_pr.outputs.branch }} + addon-id: ${{ github.event.repository.name }} + is-pr: false + + - name: Generate distribution zip and submit to official kodi repository + id: kodi-addon-submitter + uses: xbmc/action-kodi-addon-submitter@v1.2 + with: # Replace all the below values + kodi-repository: ${{ steps.extract_branch_pr.outputs.repo }} + kodi-version: ${{ steps.extract_branch_pr.outputs.branch }} + addon-id: ${{ github.event.repository.name }} + env: # Make sure you create the below secrets (GH_TOKEN and EMAIL) + GH_USERNAME: ${{ github.repository_owner }} + GH_TOKEN: ${{secrets.GH_TOKEN}} + EMAIL: ${{secrets.EMAIL}} + + - name: Create Github Release + id: create_release + uses: actions/create-release@v1.0.0 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: ${{ github.ref }} + release_name: Release ${{ github.ref }} + draft: false + prerelease: false + + - name: Upload Addon zip to github release + id: upload-release-asset + uses: actions/upload-release-asset@v1.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: ${{ steps.kodi-addon-submitter.outputs.addon-zip }} + asset_name: ${{ steps.kodi-addon-submitter.outputs.addon-zip }} + asset_content_type: application/zip \ No newline at end of file diff --git a/addon.xml b/addon.xml index c16dca4..19f07c2 100644 --- a/addon.xml +++ b/addon.xml @@ -1,16 +1,18 @@ - + - - + - Module providing some AutoCompletion functions - Module providing some AutoCompletion functions + Module providing some AutoCompletion functions + Module providing some AutoCompletion functions all - GNU GENERAL PUBLIC LICENSE Version 2.1, February 1999 - https://github.com/phil65/script.module.autocompletion + GPL-2.0-or-later + https://github.com/finkleandeinhorn/script.module.autocompletion + + icon.png + diff --git a/lib/AutoCompletion.py b/lib/AutoCompletion.py index df6152f..fdf1bcc 100644 --- a/lib/AutoCompletion.py +++ b/lib/AutoCompletion.py @@ -3,30 +3,26 @@ # Copyright (C) 2015 - Philipp Temminghoff # This program is Free Software see LICENSE file for details -import sys -import urllib -import codecs +from abc import ABC, abstractmethod +from urllib.parse import quote_plus import os import time import hashlib import requests -import simplejson +import json +import xbmc import xbmcaddon import xbmcvfs -import xbmc - -PY2 = sys.version_info[0] == 2 -HEADERS = {'User-agent': 'Mozilla/5.0'} - -ADDON = xbmcaddon.Addon() -SETTING = ADDON.getSetting -ADDON_PATH = os.path.join(os.path.dirname(__file__), "..") -ADDON_ID = ADDON.getAddonInfo('id') -ADDON_DATA_PATH = xbmc.translatePath("special://profile/addon_data/%s" % ADDON_ID) -if PY2: - ADDON_DATA_PATH = ADDON_DATA_PATH.decode("utf-8") +SCRIPT_ID = "script.module.autocompletion" +SCRIPT_ADDON = xbmcaddon.Addon(SCRIPT_ID) +PLUGIN_ID = "plugin.program.autocompletion" +PLUGIN_ADDON = xbmcaddon.Addon(PLUGIN_ID) +SETTING = PLUGIN_ADDON.getSetting +ADDON_PATH = xbmcvfs.translatePath(SCRIPT_ADDON.getAddonInfo("path")) +ADDON_ID = SCRIPT_ADDON.getAddonInfo("id") +ADDON_DATA_PATH = xbmcvfs.translatePath(SCRIPT_ADDON.getAddonInfo("profile")) def get_autocomplete_items(search_str, limit=10, provider=None): @@ -35,14 +31,17 @@ def get_autocomplete_items(search_str, limit=10, provider=None): """ if xbmc.getCondVisibility("System.HasHiddenInput"): return [] - if SETTING("autocomplete_provider") == "youtube": + + setting = SETTING("autocomplete_provider").lower() + + if setting == "youtube": provider = GoogleProvider(youtube=True, limit=limit) - elif SETTING("autocomplete_provider") == "google": + elif setting == "google": provider = GoogleProvider(limit=limit) - elif SETTING("autocomplete_provider") == "bing": + elif setting == "bing": provider = BingProvider(limit=limit) - elif SETTING("autocomplete_provider") == "netflix": - provider = NetflixProvider(limit=limit) + elif setting == "tmdb": + provider = TmdbProvider(limit=limit) else: provider = LocalDictProvider(limit=limit) provider.limit = limit @@ -50,18 +49,23 @@ def get_autocomplete_items(search_str, limit=10, provider=None): def prep_search_str(text): - if not isinstance(text, unicode): - text = text.decode('utf-8') for char in text: if 1488 <= ord(char) <= 1514: return text[::-1] return text -class BaseProvider(object): +class BaseProvider(ABC): + + HEADERS = {'User-agent': 'Mozilla/5.0'} def __init__(self, *args, **kwargs): self.limit = kwargs.get("limit", 10) + self.language = SETTING("autocomplete_lang") + + @abstractmethod + def build_url(self, query): + pass def get_predictions(self, search_str): if not search_str: @@ -69,101 +73,117 @@ def get_predictions(self, search_str): items = [] result = self.fetch_data(search_str) for i, item in enumerate(result): - li = {"label": item, - "search_string": prep_search_str(item)} + li = {"label": item, "search_string": prep_search_str(item)} items.append(li) - if i > self.limit: + if i > int(self.limit): break return items def get_prediction_listitems(self, search_str): for item in self.get_predictions(search_str): - li = {"label": item, - "search_string": search_str} + li = {"label": item, "search_string": search_str} yield li + def fetch_data(self, search_str): + url = self.build_url(quote_plus(search_str)) + result = get_JSON_response(url=self.BASE_URL.format(endpoint=url), headers=self.HEADERS, folder=self.FOLDER) + return self.process_result(result) + + def process_result(self, result): + if not result or len(result) <= 1: + return [] + else: + return result[1] if isinstance(result[1], list) else result + class GoogleProvider(BaseProvider): - BASE_URL = "http://clients1.google.com/complete/" + BASE_URL = "http://clients1.google.com/complete/{endpoint}" + FOLDER = "Google" def __init__(self, *args, **kwargs): - super(GoogleProvider, self).__init__(*args, **kwargs) + super().__init__(*args, **kwargs) self.youtube = kwargs.get("youtube", False) - def fetch_data(self, search_str): - url = "search?hl=%s&q=%s&json=t&client=serp" % (SETTING("autocomplete_lang"), urllib.quote_plus(search_str)) + def build_url(self, query): + url = f"search?hl={self.language}&q={query}&json=t&client=serp" if self.youtube: url += "&ds=yt" - result = get_JSON_response(url=self.BASE_URL + url, - headers=HEADERS, - folder="Google") - if not result or len(result) <= 1: - return [] - else: - return result[1] + return url class BingProvider(BaseProvider): - BASE_URL = "http://api.bing.com/osjson.aspx?" + BASE_URL = "http://api.bing.com/osjson.aspx?{endpoint}" + FOLDER = "Bing" def __init__(self, *args, **kwargs): super(BingProvider, self).__init__(*args, **kwargs) - def fetch_data(self, search_str): - url = "query=%s" % (urllib.quote_plus(search_str)) - result = get_JSON_response(url=self.BASE_URL + url, - headers=HEADERS, - folder="Bing") - if not result: - return [] - else: - return result[1] + def build_url(self, query): + url = f"query={query}" + return url -class NetflixProvider(BaseProvider): +class TmdbProvider(BaseProvider): - BASE_URL = "http://api-global.netflix.com/desktop/search/autocomplete?" + BASE_URL = "https://www.themoviedb.org/search/multi?{endpoint}" + FOLDER = "TMDB" def __init__(self, *args, **kwargs): - super(NetflixProvider, self).__init__(*args, **kwargs) + super(TmdbProvider, self).__init__(*args, **kwargs) - def fetch_data(self, search_str): - url = "term=%s" % (urllib.quote_plus(search_str)) - result = get_JSON_response(url=self.BASE_URL + url, - headers=HEADERS, - folder="Bing") - if not result or not result["groups"]: + def build_url(self, query): + url = f"language={self.language}&query={query}" + return url + + def process_result(self, result): + if not result or not result.get("results"): return [] - return [i["title"] for i in result["groups"][0]["items"]] + out = [] + results = result.get("results") + for i in results: + title = None + media_type = i.get("media_type") + if media_type == "movie": + title = i["title"] + elif media_type in ["tv", "person"]: + title = i["name"] + else: + title = i + out.append(title) + return out class LocalDictProvider(BaseProvider): - def __init__(self, *args, **kwargs): super(LocalDictProvider, self).__init__(*args, **kwargs) + local = SETTING("autocomplete_lang_local") + if local: + self.language = local + else: + self.language = "en" - def get_predictions(self, search_str): - """ - get dict list with autocomplete labels from locally saved lists - """ - listitems = [] + def build_url(self, query): + return super().build_url(query) + + def fetch_data(self, search_str): k = search_str.rfind(" ") if k >= 0: - search_str = search_str[k + 1:] - local = SETTING("autocomplete_lang_local") - path = os.path.join(ADDON_PATH, "resources", "data", "common_%s.txt" % (local if local else "en")) - with codecs.open(path, encoding="utf8") as f: - for line in f.readlines(): + search_str = search_str[k + 1 :] + + path = os.path.join(ADDON_PATH, "resources", "data", f"common_{self.language}.txt") + suggestions = [] + + with xbmcvfs.File(path) as f: + for line in f.read().split('\n'): if not line.startswith(search_str) or len(line) <= 2: continue - li = {"label": line, - "search_string": line} - listitems.append(li) - if len(listitems) > self.limit: + suggestions.append(line) + if len(suggestions) > int(self.limit): break - return listitems + + return suggestions def get_JSON_response(url="", cache_days=7.0, folder=False, headers=False): @@ -171,51 +191,44 @@ def get_JSON_response(url="", cache_days=7.0, folder=False, headers=False): get JSON response for *url, makes use of file cache. """ now = time.time() - hashed_url = hashlib.md5(url).hexdigest() - if folder: - cache_path = xbmc.translatePath(os.path.join(ADDON_DATA_PATH, folder)) - else: - cache_path = xbmc.translatePath(os.path.join(ADDON_DATA_PATH)) - path = os.path.join(cache_path, hashed_url + ".txt") - cache_seconds = int(cache_days * 86400.0) + hashed_url = hashlib.md5(url.encode("utf-8")).hexdigest() + cache_path = xbmcvfs.translatePath(os.path.join(ADDON_DATA_PATH, folder) if folder else ADDON_DATA_PATH) + path = os.path.join(cache_path, f"{hashed_url}.txt") + cache_seconds = int(cache_days * 86400) + results = [] + if xbmcvfs.exists(path) and ((now - os.path.getmtime(path)) < cache_seconds): results = read_from_file(path) - log("loaded file for %s. time: %f" % (url, time.time() - now)) + log(f"loaded file for {url}. time: {float(time.time() - now)}") else: response = get_http(url, headers) try: - results = simplejson.loads(response) - log("download %s. time: %f" % (url, time.time() - now)) + results = json.loads(response) + log(f"download {url}. time: {float(time.time() - now)}") save_to_file(results, hashed_url, cache_path) except Exception: - log("Exception: Could not get new JSON data from %s. Tryin to fallback to cache" % url) + log(f"Exception: Could not get new JSON data from {url}. Trying to fallback to cache") log(response) - if xbmcvfs.exists(path): - results = read_from_file(path) - else: - results = [] - if results: - return results - else: - return [] + results = read_from_file(path) + + return results -def get_http(url=None, headers=False): +def get_http(url, headers): """ fetches data from *url, returns it as a string """ succeed = 0 - if not headers: - headers = {'User-agent': 'XBMC/16.0 ( phil65@kodi.tv )'} - while (succeed < 2) and (not xbmc.abortRequested): + monitor = xbmc.Monitor() + while (succeed < 2) and (not monitor.abortRequested()): try: - r = requests.get(url, headers=headers) - if r.status_code != 200: + response = requests.get(url, headers=headers) + if not response.ok: raise Exception - return r.text + return response.text except Exception: - log("get_http: could not get data from %s" % url) - xbmc.sleep(1000) + log(f"get_http: could not get data from {url}") + monitor.waitForAbort(1) succeed += 1 return None @@ -225,25 +238,23 @@ def read_from_file(path="", raw=False): return data from file with *path """ if not xbmcvfs.exists(path): - return False + return [] + try: - with open(path) as f: - log("opened textfile %s." % (path)) + with xbmcvfs.File(path) as f: + log(f"opened textfile {path}.") if raw: return f.read() else: - return simplejson.load(f) + return json.load(f) except Exception: - log("failed to load textfile: " + path) - return False + log(f"failed to load textfile: {path}") + return [] def log(txt): - if isinstance(txt, str): - txt = txt.decode("utf-8", 'ignore') - message = u'%s: %s' % (ADDON_ID, txt) - xbmc.log(msg=message.encode("utf-8", 'ignore'), - level=xbmc.LOGDEBUG) + message = f"{ADDON_ID}: {txt}" + xbmc.log(msg=message, level=xbmc.LOGDEBUG) def save_to_file(content, filename, path=""): @@ -252,10 +263,12 @@ def save_to_file(content, filename, path=""): """ if not xbmcvfs.exists(path): xbmcvfs.mkdirs(path) - text_file_path = os.path.join(path, filename + ".txt") + + text_file_path = os.path.join(path, f"{filename}.txt") now = time.time() - text_file = xbmcvfs.File(text_file_path, "w") - simplejson.dump(content, text_file) - text_file.close() - log("saved textfile %s. Time: %f" % (text_file_path, time.time() - now)) + + with xbmcvfs.File(text_file_path, "w") as text_file: + json.dump(content, text_file) + + log(f"saved textfile {text_file_path}. Time: {float(time.time() - now)}") return True